diff --git a/.gitattributes b/.gitattributes index 51381ee84f5e0b684dbc962eb1418042442e71ef..c2452934f0e785de6ed407a7a250602da124888e 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1241,3 +1241,4 @@ vlmpy310/lib/python3.10/site-packages/pandas/_libs/interval.cpython-310-x86_64-l vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/timezones.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vlmpy310/lib/python3.10/site-packages/pandas/_libs/window/aggregations.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vlmpy310/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f361bc7fe5e8fb2f8a39d6ff331b9ec1391276c0 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_backward_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & _adaptive_avg_pool3d_backward_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & self); +TORCH_API at::Tensor & _adaptive_avg_pool3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h new file mode 100644 index 0000000000000000000000000000000000000000..5a41f66aa11e40f764ee7771f8193866bcc02ced --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_amp_foreach_non_finite_check_and_unscale_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple<::std::vector,at::Tensor> _amp_foreach_non_finite_check_and_unscale(at::TensorList self, const at::Tensor & found_inf, const at::Tensor & inv_scale); +TORCH_API void _amp_foreach_non_finite_check_and_unscale_out(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale, at::TensorList out); +TORCH_API void _amp_foreach_non_finite_check_and_unscale_cuda_(at::TensorList self, at::Tensor & found_inf, const at::Tensor & inv_scale); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b657735ee5f585de0c00a2291775fda4568a8ef6 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _assert_async_cpu(const at::Tensor & self); +TORCH_API void _assert_async_cuda(const at::Tensor & self); +TORCH_API void _assert_async_msg_cpu(const at::Tensor & self, c10::string_view assert_msg); +TORCH_API void _assert_async_msg_cuda(const at::Tensor & self, c10::string_view assert_msg); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d6313ce2d653d8b9b4f0841cf22f5459862082fe --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_cast_Double_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor _cast_Double(const at::Tensor & self, bool non_blocking=false); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..eddef49ceb921a59f26806a645dbc09e12237dc3 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_coalesced_meta_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & _coalesced_(at::Tensor & self, bool coalesced); + +} // namespace meta +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..82b3c8241311615dae92e2dc9cf61f3253b82134 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_fft_r2c_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor _fft_r2c(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided); +TORCH_API at::Tensor & _fft_r2c_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided); +TORCH_API at::Tensor & _fft_r2c_outf(const at::Tensor & self, at::IntArrayRef dim, int64_t normalization, bool onesided, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h new file mode 100644 index 0000000000000000000000000000000000000000..02e3e40f993af792de8684d256f391160ec134f4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_clamp_min_native.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API void _foreach_clamp_min_Scalar_out(at::TensorList self, const at::Scalar & scalar, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalar_kernel_slow(at::TensorList self, const at::Scalar & scalar); +TORCH_API void foreach_tensor_clamp_min_scalar_kernel_slow_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalar_kernel_cuda(at::TensorList self, const at::Scalar & scalar); +TORCH_API void foreach_tensor_clamp_min_scalar_kernel_cuda_(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_clamp_min_List_out(at::TensorList self, at::TensorList other, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_min_list_kernel_slow(at::TensorList self, at::TensorList other); +TORCH_API void foreach_tensor_clamp_min_list_kernel_slow_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector foreach_tensor_clamp_min_list_kernel_cuda(at::TensorList self, at::TensorList other); +TORCH_API void foreach_tensor_clamp_min_list_kernel_cuda_(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_clamp_min_ScalarList_out(at::TensorList self, at::ArrayRef scalars, at::TensorList out); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalarlist_kernel_slow(at::TensorList self, at::ArrayRef scalars); +TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_slow_(at::TensorList self, at::ArrayRef scalars); +TORCH_API ::std::vector foreach_tensor_clamp_min_scalarlist_kernel_cuda(at::TensorList self, at::ArrayRef scalars); +TORCH_API void foreach_tensor_clamp_min_scalarlist_kernel_cuda_(at::TensorList self, at::ArrayRef scalars); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2b2f6197eee49d9f28c3b81431eef18699efbdaf --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_lerp_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, at::TensorList weights); +TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, at::TensorList weights); +TORCH_API ::std::vector _foreach_lerp(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); +TORCH_API void _foreach_lerp_(at::TensorList self, at::TensorList tensors1, const at::Scalar & weight); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0a6a3295196f34b3b878efeb6ff99447a5f4e662 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_minimum_cpu_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::vector _foreach_minimum(at::TensorList self, const at::Scalar & scalar); +TORCH_API void _foreach_minimum_(at::TensorList self, const at::Scalar & scalar); +TORCH_API ::std::vector _foreach_minimum(at::TensorList self, at::TensorList other); +TORCH_API void _foreach_minimum_(at::TensorList self, at::TensorList other); +TORCH_API ::std::vector _foreach_minimum(at::TensorList self, at::ArrayRef scalars); +TORCH_API void _foreach_minimum_(at::TensorList self, at::ArrayRef scalars); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..242aa56705a2fd2da0051f9a5d2c06e2f2c0f425 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_zero_cuda_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API void _foreach_zero_(at::TensorList self); + +} // namespace cuda +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c0aa6efd611de34e123d9c35c584939772a1c39a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_grid_sampler_2d_cpu_fallback_compositeexplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor _grid_sampler_2d_cpu_fallback(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & _grid_sampler_2d_cpu_fallback_out(at::Tensor & out, const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners); +TORCH_API at::Tensor & _grid_sampler_2d_cpu_fallback_outf(const at::Tensor & input, const at::Tensor & grid, int64_t interpolation_mode, int64_t padding_mode, bool align_corners, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd.h new file mode 100644 index 0000000000000000000000000000000000000000..a31348d1eab9b3b468cd86adb51f8905d6afbc2d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_linalg_svd(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None) -> (Tensor U, Tensor S, Tensor Vh) +inline ::std::tuple _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt) { + return at::_ops::_linalg_svd::call(A, full_matrices, compute_uv, driver); +} + +// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +inline ::std::tuple _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt) { + return at::_ops::_linalg_svd_U::call(A, full_matrices, compute_uv, driver, U, S, Vh); +} +// aten::_linalg_svd.U(Tensor A, bool full_matrices=False, bool compute_uv=True, *, str? driver=None, Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) -> (Tensor(a!) U, Tensor(b!) S, Tensor(c!) Vh) +inline ::std::tuple _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh) { + return at::_ops::_linalg_svd_U::call(A, full_matrices, compute_uv, driver, U, S, Vh); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2829f9c3f0b5ec74fce1c41fa4c924d0a1318709 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_svd_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _linalg_svd(const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt); +TORCH_API ::std::tuple _linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=false, bool compute_uv=true, c10::optional driver=c10::nullopt); +TORCH_API ::std::tuple _linalg_svd_outf(const at::Tensor & A, bool full_matrices, bool compute_uv, c10::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..42ad8d517f041f609a5ff671b69024e8cd29dc94 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_local_scalar_dense_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Scalar _local_scalar_dense(const at::Tensor & self); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..716da905e0c1d7f29f9416237ab040cf2979f911 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_nested_sum_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _nested_sum_backward_cpu(const at::Tensor & grad, const at::Tensor & self, at::OptionalIntArrayRef dim, bool keepdim=false); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6dd2795ecb91fc8a99375fbb77cd256229c478a5 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_softmax_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor _softmax(const at::Tensor & self, int64_t dim, bool half_to_float); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..c845d340e71e4052716b1253c6986c0543ac986d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_thnn_fused_lstm_cell_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API _thnn_fused_lstm_cell { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_fused_lstm_cell") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_fused_lstm_cell(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None) -> (Tensor, Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias); +}; + +struct TORCH_API _thnn_fused_lstm_cell_out { + using schema = ::std::tuple (const at::Tensor &, const at::Tensor &, const at::Tensor &, const c10::optional &, const c10::optional &, at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_thnn_fused_lstm_cell") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_thnn_fused_lstm_cell.out(Tensor input_gates, Tensor hidden_gates, Tensor cx, Tensor? input_bias=None, Tensor? hidden_bias=None, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2) -> (Tensor(a!), Tensor(b!), Tensor(c!))") + static ::std::tuple call(const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input_gates, const at::Tensor & hidden_gates, const at::Tensor & cx, const c10::optional & input_bias, const c10::optional & hidden_bias, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_bsc_tensor_args.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_bsc_tensor_args.h new file mode 100644 index 0000000000000000000000000000000000000000..78d3453ca6e6d1700fdd1b77fd2fca9102794674 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_bsc_tensor_args.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_validate_sparse_bsc_tensor_args(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size) -> () +inline void _validate_sparse_bsc_tensor_args(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size) { + return at::_ops::_validate_sparse_bsc_tensor_args::call(ccol_indices, row_indices, values, size); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..75c35a9462fcba4d794410aeaa33419b5d3b344b --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_validate_sparse_compressed_tensor_args_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API void _validate_sparse_compressed_tensor_args(const at::Tensor & compressed_indices, const at::Tensor & plain_indices, const at::Tensor & values, at::IntArrayRef size, at::Layout layout); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/angle.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/angle.h new file mode 100644 index 0000000000000000000000000000000000000000..cdabb6835b60e569db680903e41df05286861cbf --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/angle.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::angle(Tensor self) -> Tensor +inline at::Tensor angle(const at::Tensor & self) { + return at::_ops::angle::call(self); +} + +// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & angle_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::angle_out::call(self, out); +} +// aten::angle.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & angle_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::angle_out::call(self, out); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/any_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/any_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3ffe69a485d05056652b532161880773f3b3537f --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/any_native.h @@ -0,0 +1,29 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_any_out : public at::meta::structured_any_dim { +void impl(const at::Tensor & self, int64_t dim, bool keepdim, const at::Tensor & out); +}; +TORCH_API at::Tensor any(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API at::Tensor & any_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & out); +struct TORCH_API structured_any_all_out : public at::meta::structured_any { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +TORCH_API at::Tensor any_sparse(const at::Tensor & self); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5909575414c6f83fcab926abda20dfb153ec318a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_avg_pool2d_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional divisor_override); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..96424b9faaacf5e6d12e4fc914e06625fb27de90 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/bernoulli_meta_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, const at::Tensor & p, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & bernoulli_(at::Tensor & self, double p=0.5, c10::optional generator=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/blackman_window_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/blackman_window_native.h new file mode 100644 index 0000000000000000000000000000000000000000..2a0b78572a2b22bbbfa9663fb1dad3af7132f83c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/blackman_window_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor blackman_window(int64_t window_length, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & blackman_window_out(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor blackman_window(int64_t window_length, bool periodic, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & blackman_window_periodic_out(int64_t window_length, bool periodic, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c1f8abf82221ca4add8642fa8c050cd1fff54d5c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor block_diag(at::TensorList tensors); +TORCH_API at::Tensor & block_diag_out(at::TensorList tensors, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/concat_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/concat_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..07679628d3c2afcf100117dfd09f6afb93c0d73e --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/concat_compositeimplicitautograd_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor concat(at::TensorList tensors, int64_t dim=0); +TORCH_API at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0); +TORCH_API at::Tensor & concat_outf(at::TensorList tensors, int64_t dim, at::Tensor & out); +TORCH_API at::Tensor concat(at::TensorList tensors, at::Dimname dim); +TORCH_API at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim); +TORCH_API at::Tensor & concat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..6564d4b26454bb3518bce789877507b0acdf4283 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cudnn_affine_grid_generator_compositeexplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & cudnn_affine_grid_generator_out(at::Tensor & out, const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W); +TORCH_API at::Tensor & cudnn_affine_grid_generator_outf(const at::Tensor & theta, int64_t N, int64_t C, int64_t H, int64_t W, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_meta_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a308a5a556819e69440aaafe7deddfc6830fa327 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cumprod_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor cumprod(const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumprod_out(at::Tensor & out, const at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); +TORCH_API at::Tensor & cumprod_outf(const at::Tensor & self, int64_t dim, c10::optional dtype, at::Tensor & out); +TORCH_API at::Tensor & cumprod_(at::Tensor & self, int64_t dim, c10::optional dtype=c10::nullopt); + +} // namespace meta +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/det_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/det_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..77ceba91198bb20f151d7cf0ccb52a84794b5de8 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/det_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API det { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::det") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "det(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_native.h new file mode 100644 index 0000000000000000000000000000000000000000..3e2f3f4e044bbd4222b2254f5357e8fe2dbcf922 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/diag_embed_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & diag_embed_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out); +TORCH_API at::Tensor diag_embed(const at::Tensor & self, int64_t offset=0, int64_t dim1=-2, int64_t dim2=-1); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal.h new file mode 100644 index 0000000000000000000000000000000000000000..6c4710cf0fef2f0c7b4a8989a880fb9e9b3b990e --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::diagonal(Tensor(a) self, int offset=0, int dim1=0, int dim2=1) -> Tensor(a) +inline at::Tensor diagonal(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1) { + return at::_ops::diagonal::call(self, offset, dim1, dim2); +} + +// aten::diagonal.Dimname(Tensor(a) self, *, Dimname outdim, Dimname dim1, Dimname dim2, int offset=0) -> Tensor(a) +inline at::Tensor diagonal(const at::Tensor & self, at::Dimname outdim, at::Dimname dim1, at::Dimname dim2, int64_t offset=0) { + return at::_ops::diagonal_Dimname::call(self, outdim, dim1, dim2, offset); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..910751b784553e5051bc976436196626db0a0be5 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/exp2_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_exp2 : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..f6b9aee974c9101d289672f4e7c1e75e052945e1 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_cachemask_backward_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fake_quantize_per_channel_affine_cachemask_backward(const at::Tensor & grad, const at::Tensor & mask); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2ee6e61c5f8d5dfa8cfb69d4f78261caf9e0f205 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_tensor_affine_cachemask_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple fake_quantize_per_tensor_affine_cachemask(const at::Tensor & self, double scale, int64_t zero_point, int64_t quant_min, int64_t quant_max); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2d66e8b04e5db132e860f74d59aad7a4b8ff4106 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_int8_weight_fp32_activation_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor fbgemm_linear_int8_weight_fp32_activation(const at::Tensor & input, const at::Tensor & weight, const at::Tensor & packed, const at::Tensor & col_offsets, const at::Scalar & weight_scale, const at::Scalar & weight_zero_point, const at::Tensor & bias); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_native.h new file mode 100644 index 0000000000000000000000000000000000000000..e5d7c16641a0066a9c914840b11db712735b6666 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/fft_fftfreq_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor fft_fftfreq(int64_t n, double d=1.0, c10::optional dtype={}, c10::optional layout={}, c10::optional device={}, c10::optional pin_memory={}); +TORCH_API at::Tensor & fft_fftfreq_out(int64_t n, double d, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..fa915dc91bc9194e93fcf99f124570314d57201b --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/gelu_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor gelu_backward(const at::Tensor & grad_output, const at::Tensor & self, c10::string_view approximate="none"); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38e950a9e74183da370fda5496cec06e932a26d0 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/hardsigmoid_backward_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor hardsigmoid_backward(const at::Tensor & grad_output, const at::Tensor & self); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..43ab5e789844440fd79ba3a29d0e68474f9ba3b0 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API im2col_out { + using schema = at::Tensor & (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::im2col") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "im2col.out(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +}; + +struct TORCH_API im2col { + using schema = at::Tensor (const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::im2col") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "im2col(Tensor self, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor") + static at::Tensor call(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..a5cb4b2722ce246adcf5849897fdc53a303bf5d8 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/isin_meta.h @@ -0,0 +1,37 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_isin_Tensor_Tensor : public at::impl::MetaBase { + + + void meta(const at::Tensor & elements, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; +struct TORCH_API structured_isin_Tensor_Scalar : public at::impl::MetaBase { + + + void meta(const at::Tensor & elements, const at::Scalar & test_element, bool assume_unique, bool invert); +}; +struct TORCH_API structured_isin_Scalar_Tensor : public at::impl::MetaBase { + + + void meta(const at::Scalar & element, const at::Tensor & test_elements, bool assume_unique, bool invert); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..25113660f49c52f1936bbe8f556b47cb3cb3d828 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API isneginf { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isneginf") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isneginf(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API isneginf_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::isneginf") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "isneginf.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h new file mode 100644 index 0000000000000000000000000000000000000000..81f8abe3429127bd1e87f2655b2e022fc3ca1c45 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/less_equal_native.h @@ -0,0 +1,26 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Scalar & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Scalar & other); +TORCH_API at::Tensor less_equal(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & less_equal_out(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & less_equal_(at::Tensor & self, const at::Tensor & other); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..a66562f96947f829325e5d5ba44e438a48a5ab5e --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_matrix_exp_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API linalg_matrix_exp { + using schema = at::Tensor (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_exp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_exp(Tensor self) -> Tensor") + static at::Tensor call(const at::Tensor & self); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +struct TORCH_API linalg_matrix_exp_out { + using schema = at::Tensor & (const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_matrix_exp") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_matrix_exp.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..14ae857ce4c98b46c147ba95333ae13fca71cd33 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/lstm_cell.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::lstm_cell(Tensor input, Tensor[] hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> (Tensor, Tensor) +inline ::std::tuple lstm_cell(const at::Tensor & input, at::TensorList hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::lstm_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..bc84a23cb81139cc840dabe9286a18fdf6d83deb --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_maximum : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & other); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution.h new file mode 100644 index 0000000000000000000000000000000000000000..739c219bcdfd2ad443991015f4dbe4decda35825 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_depthwise_convolution.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +inline at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic); +} +namespace symint { + template ::value>> + at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic); + } +} + +// aten::miopen_depthwise_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic) -> Tensor +inline at::Tensor miopen_depthwise_convolution_symint(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); +} +namespace symint { + template ::value>> + at::Tensor miopen_depthwise_convolution(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, c10::fromIntArrayRefSlow(padding), stride, dilation, groups, benchmark, deterministic, out); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } +} + +// aten::miopen_depthwise_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, int[] stride, int[] dilation, int groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & miopen_depthwise_convolution_symint_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); +} +namespace symint { + template ::value>> + at::Tensor & miopen_depthwise_convolution_outf(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, at::IntArrayRef stride, at::IntArrayRef dilation, int64_t groups, bool benchmark, bool deterministic, at::Tensor & out) { + return at::_ops::miopen_depthwise_convolution_out::call(self, weight, bias, padding, stride, dilation, groups, benchmark, deterministic, out); + } +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/mode_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/mode_native.h new file mode 100644 index 0000000000000000000000000000000000000000..dcbd4519afd73f60502203539558adb1502a8f79 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/mode_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple mode_out(const at::Tensor & self, int64_t dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +TORCH_API ::std::tuple mode(const at::Tensor & self, int64_t dim=-1, bool keepdim=false); +TORCH_API ::std::tuple mode(const at::Tensor & self, at::Dimname dim, bool keepdim=false); +TORCH_API ::std::tuple mode_out(const at::Tensor & self, at::Dimname dim, bool keepdim, at::Tensor & values, at::Tensor & indices); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/mv_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/mv_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..fa0adc3aa2fb42b5e3cb71e4fa4f6e50065f04ad --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/mv_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mv { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mv(Tensor self, Tensor vec) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & vec); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec); +}; + +struct TORCH_API mv_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mv") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mv.out(Tensor self, Tensor vec, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & vec, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & vec, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_cuda_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c6dada5f4711b3b90bd732a8b5e7f276b11d306d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nan_to_num_cuda_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor & nan_to_num_out(at::Tensor & out, const at::Tensor & self, c10::optional nan=c10::nullopt, c10::optional posinf=c10::nullopt, c10::optional neginf=c10::nullopt); +TORCH_API at::Tensor & nan_to_num_outf(const at::Tensor & self, c10::optional nan, c10::optional posinf, c10::optional neginf, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..0b1a2ff600d2752711597d3cfbea4fdb0c3b0493 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_numpy_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::vector nonzero_numpy(const at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/normal_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/normal_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..cdc0aa4b1cb3afa0e32d0e4660961d2885a89dcb --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/normal_cpu_dispatch.h @@ -0,0 +1,32 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & normal_(at::Tensor & self, double mean=0, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor normal(const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, double std=1, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(const at::Tensor & mean, double std, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor normal(double mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_out(at::Tensor & out, double mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(double mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); +TORCH_API at::Tensor normal(const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_out(at::Tensor & out, const at::Tensor & mean, const at::Tensor & std, c10::optional generator=c10::nullopt); +TORCH_API at::Tensor & normal_outf(const at::Tensor & mean, const at::Tensor & std, c10::optional generator, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_native.h new file mode 100644 index 0000000000000000000000000000000000000000..750f65e5d62312c5e399327c02371ef466389e79 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pairwise_distance_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor pairwise_distance(const at::Tensor & x1, const at::Tensor & x2, double p=2, double eps=1e-06, bool keepdim=false); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell.h new file mode 100644 index 0000000000000000000000000000000000000000..2b6651db3af97e7994bc282c7092ebaac528d084 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/rnn_relu_cell.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::rnn_relu_cell(Tensor input, Tensor hx, Tensor w_ih, Tensor w_hh, Tensor? b_ih=None, Tensor? b_hh=None) -> Tensor +inline at::Tensor rnn_relu_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const c10::optional & b_ih={}, const c10::optional & b_hh={}) { + return at::_ops::rnn_relu_cell::call(input, hx, w_ih, w_hh, b_ih, b_hh); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d184797c64a87c74da40e1c90050ef5bfb1bc816 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/special_shifted_chebyshev_polynomial_u_meta_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor special_shifted_chebyshev_polynomial_u(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_shifted_chebyshev_polynomial_u_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace meta +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tan_meta_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tan_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4ab10b6feef4def8c7125c3430f05ded3b563144 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tan_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor tan(const at::Tensor & self); +TORCH_API at::Tensor & tan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & tan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & tan_(at::Tensor & self); + +} // namespace meta +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cuda_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..8445a7e500683a99d64d60e761dd0d963b18e43f --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor threshold(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +TORCH_API at::Tensor & threshold_outf(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, at::Tensor & out); +TORCH_API at::Tensor & threshold_(at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); + +} // namespace cuda +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6c8ae67512a023c587ac03bdcdee03a5b0f1c057 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/threshold_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_threshold_out : public at::meta::structured_threshold { +void impl(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value, const at::Tensor & out); +}; +TORCH_API at::Tensor threshold_quantized_cpu(const at::Tensor & self, const at::Scalar & threshold, const at::Scalar & value); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csr_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csr_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e548b98f1c83bb08ca818ea3d8dc3b0fc08cbb91 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/to_sparse_csr_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API to_sparse_csr { + using schema = at::Tensor (const at::Tensor &, c10::optional); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::to_sparse_csr") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "to_sparse_csr(Tensor self, int? dense_dim=None) -> Tensor") + static at::Tensor call(const at::Tensor & self, c10::optional dense_dim); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, c10::optional dense_dim); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4dfe5056e4d41771e59fa82ad47cdd68456127f1 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_compositeimplicitautograd_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1); +TORCH_API at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8a274341b5a1e1360cc19e16ce1a155cf6da73c8 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/trapz_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor trapz(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1); +TORCH_API at::Tensor trapz(const at::Tensor & y, double dx=1, int64_t dim=-1); +} // namespace native +} // namespace at diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so b/vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..4baf2245d7118e004bb5eba43d85dacbb23b5f68 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4de667c5e6dc7402d2a61c70093eb693db001c35ea4edcf80d92dfac382d3e40 +size 308168 diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/astype.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/astype.py new file mode 100644 index 0000000000000000000000000000000000000000..f5579082c679bf131c056f3f2029b2485e88bd0d --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/astype.py @@ -0,0 +1,301 @@ +""" +Functions for implementing 'astype' methods according to pandas conventions, +particularly ones that differ from numpy. +""" +from __future__ import annotations + +import inspect +from typing import ( + TYPE_CHECKING, + overload, +) +import warnings + +import numpy as np + +from pandas._libs import lib +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.errors import IntCastingNaNError + +from pandas.core.dtypes.common import ( + is_object_dtype, + is_string_dtype, + pandas_dtype, +) +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + NumpyEADtype, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + DtypeObj, + IgnoreRaise, + ) + + from pandas.core.arrays import ExtensionArray + +_dtype_obj = np.dtype(object) + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: np.dtype, copy: bool = ..., skipna: bool = ... +) -> np.ndarray: + ... + + +@overload +def _astype_nansafe( + arr: np.ndarray, dtype: ExtensionDtype, copy: bool = ..., skipna: bool = ... +) -> ExtensionArray: + ... + + +def _astype_nansafe( + arr: np.ndarray, dtype: DtypeObj, copy: bool = True, skipna: bool = False +) -> ArrayLike: + """ + Cast the elements of an array to a given dtype a nan-safe manner. + + Parameters + ---------- + arr : ndarray + dtype : np.dtype or ExtensionDtype + copy : bool, default True + If False, a view will be attempted but may fail, if + e.g. the item sizes don't align. + skipna: bool, default False + Whether or not we should skip NaN when casting as a string-type. + + Raises + ------ + ValueError + The dtype was a datetime64/timedelta64 dtype, but it had no unit. + """ + + # dispatch on extension dtype if needed + if isinstance(dtype, ExtensionDtype): + return dtype.construct_array_type()._from_sequence(arr, dtype=dtype, copy=copy) + + elif not isinstance(dtype, np.dtype): # pragma: no cover + raise ValueError("dtype must be np.dtype or ExtensionDtype") + + if arr.dtype.kind in "mM": + from pandas.core.construction import ensure_wrapped_if_datetimelike + + arr = ensure_wrapped_if_datetimelike(arr) + res = arr.astype(dtype, copy=copy) + return np.asarray(res) + + if issubclass(dtype.type, str): + shape = arr.shape + if arr.ndim > 1: + arr = arr.ravel() + return lib.ensure_string_array( + arr, skipna=skipna, convert_na_value=False + ).reshape(shape) + + elif np.issubdtype(arr.dtype, np.floating) and dtype.kind in "iu": + return _astype_float_to_int_nansafe(arr, dtype, copy) + + elif arr.dtype == object: + # if we have a datetime/timedelta array of objects + # then coerce to datetime64[ns] and use DatetimeArray.astype + + if lib.is_np_dtype(dtype, "M"): + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray._from_sequence(arr, dtype=dtype) + return dta._ndarray + + elif lib.is_np_dtype(dtype, "m"): + from pandas.core.construction import ensure_wrapped_if_datetimelike + + # bc we know arr.dtype == object, this is equivalent to + # `np.asarray(to_timedelta(arr))`, but using a lower-level API that + # does not require a circular import. + tdvals = array_to_timedelta64(arr).view("m8[ns]") + + tda = ensure_wrapped_if_datetimelike(tdvals) + return tda.astype(dtype, copy=False)._ndarray + + if dtype.name in ("datetime64", "timedelta64"): + msg = ( + f"The '{dtype.name}' dtype has no unit. Please pass in " + f"'{dtype.name}[ns]' instead." + ) + raise ValueError(msg) + + if copy or arr.dtype == object or dtype == object: + # Explicit copy, or required since NumPy can't view from / to object. + return arr.astype(dtype, copy=True) + + return arr.astype(dtype, copy=copy) + + +def _astype_float_to_int_nansafe( + values: np.ndarray, dtype: np.dtype, copy: bool +) -> np.ndarray: + """ + astype with a check preventing converting NaN to an meaningless integer value. + """ + if not np.isfinite(values).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + if dtype.kind == "u": + # GH#45151 + if not (values >= 0).all(): + raise ValueError(f"Cannot losslessly cast from {values.dtype} to {dtype}") + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + return values.astype(dtype, copy=copy) + + +def astype_array(values: ArrayLike, dtype: DtypeObj, copy: bool = False) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : dtype object + copy : bool, default False + copy if indicated + + Returns + ------- + ndarray or ExtensionArray + """ + if values.dtype == dtype: + if copy: + return values.copy() + return values + + if not isinstance(values, np.ndarray): + # i.e. ExtensionArray + values = values.astype(dtype, copy=copy) + + else: + values = _astype_nansafe(values, dtype, copy=copy) + + # in pandas we don't store numpy str dtypes, so convert to object + if isinstance(dtype, np.dtype) and issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + return values + + +def astype_array_safe( + values: ArrayLike, dtype, copy: bool = False, errors: IgnoreRaise = "raise" +) -> ArrayLike: + """ + Cast array (ndarray or ExtensionArray) to the new dtype. + + This basically is the implementation for DataFrame/Series.astype and + includes all custom logic for pandas (NaN-safety, converting str to object, + not allowing ) + + Parameters + ---------- + values : ndarray or ExtensionArray + dtype : str, dtype convertible + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + + Returns + ------- + ndarray or ExtensionArray + """ + errors_legal_values = ("raise", "ignore") + + if errors not in errors_legal_values: + invalid_arg = ( + "Expected value of kwarg 'errors' to be one of " + f"{list(errors_legal_values)}. Supplied value is '{errors}'" + ) + raise ValueError(invalid_arg) + + if inspect.isclass(dtype) and issubclass(dtype, ExtensionDtype): + msg = ( + f"Expected an instance of {dtype.__name__}, " + "but got the class instead. Try instantiating 'dtype'." + ) + raise TypeError(msg) + + dtype = pandas_dtype(dtype) + if isinstance(dtype, NumpyEADtype): + # Ensure we don't end up with a NumpyExtensionArray + dtype = dtype.numpy_dtype + + try: + new_values = astype_array(values, dtype, copy=copy) + except (ValueError, TypeError): + # e.g. _astype_nansafe can fail on object-dtype of strings + # trying to convert to float + if errors == "ignore": + new_values = values + else: + raise + + return new_values + + +def astype_is_view(dtype: DtypeObj, new_dtype: DtypeObj) -> bool: + """Checks if astype avoided copying the data. + + Parameters + ---------- + dtype : Original dtype + new_dtype : target dtype + + Returns + ------- + True if new data is a view or not guaranteed to be a copy, False otherwise + """ + if isinstance(dtype, np.dtype) and not isinstance(new_dtype, np.dtype): + new_dtype, dtype = dtype, new_dtype + + if dtype == new_dtype: + return True + + elif isinstance(dtype, np.dtype) and isinstance(new_dtype, np.dtype): + # Only equal numpy dtypes avoid a copy + return False + + elif is_string_dtype(dtype) and is_string_dtype(new_dtype): + # Potentially! a view when converting from object to string + return True + + elif is_object_dtype(dtype) and new_dtype.kind == "O": + # When the underlying array has dtype object, we don't have to make a copy + return True + + elif dtype.kind in "mM" and new_dtype.kind in "mM": + dtype = getattr(dtype, "numpy_dtype", dtype) + new_dtype = getattr(new_dtype, "numpy_dtype", new_dtype) + return getattr(dtype, "unit", None) == getattr(new_dtype, "unit", None) + + numpy_dtype = getattr(dtype, "numpy_dtype", None) + new_numpy_dtype = getattr(new_dtype, "numpy_dtype", None) + + if numpy_dtype is None and isinstance(dtype, np.dtype): + numpy_dtype = dtype + + if new_numpy_dtype is None and isinstance(new_dtype, np.dtype): + new_numpy_dtype = new_dtype + + if numpy_dtype is not None and new_numpy_dtype is not None: + # if both have NumPy dtype or one of them is a numpy dtype + # they are only a view when the numpy dtypes are equal, e.g. + # int64 -> Int64 or int64[pyarrow] + # int64 -> Int32 copies + return numpy_dtype == new_numpy_dtype + + # Assume this is a view since we don't know for sure if a copy was made + return True diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/cast.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/cast.py new file mode 100644 index 0000000000000000000000000000000000000000..b72293b52df06bcca863815da997f13a22c1dad1 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/cast.py @@ -0,0 +1,1970 @@ +""" +Routines for casting. +""" + +from __future__ import annotations + +import datetime as dt +import functools +from typing import ( + TYPE_CHECKING, + Any, + Literal, + TypeVar, + cast, + overload, +) +import warnings + +import numpy as np + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import ( + Interval, + Period, + lib, +) +from pandas._libs.missing import ( + NA, + NAType, + checknull, +) +from pandas._libs.tslibs import ( + NaT, + OutOfBoundsDatetime, + OutOfBoundsTimedelta, + Timedelta, + Timestamp, + is_supported_dtype, +) +from pandas._libs.tslibs.timedeltas import array_to_timedelta64 +from pandas.errors import ( + IntCastingNaNError, + LossySetitemError, +) + +from pandas.core.dtypes.common import ( + ensure_int8, + ensure_int16, + ensure_int32, + ensure_int64, + ensure_object, + ensure_str, + is_bool, + is_complex, + is_float, + is_integer, + is_object_dtype, + is_scalar, + is_string_dtype, + pandas_dtype as pandas_dtype_func, +) +from pandas.core.dtypes.dtypes import ( + ArrowDtype, + BaseMaskedDtype, + CategoricalDtype, + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + PandasExtensionDtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.inference import is_list_like +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, + notna, +) + +from pandas.io._util import _arrow_dtype_mapping + +if TYPE_CHECKING: + from collections.abc import ( + Sequence, + Sized, + ) + + from pandas._typing import ( + ArrayLike, + Dtype, + DtypeObj, + NumpyIndexT, + Scalar, + npt, + ) + + from pandas import Index + from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + IntervalArray, + PeriodArray, + TimedeltaArray, + ) + + +_int8_max = np.iinfo(np.int8).max +_int16_max = np.iinfo(np.int16).max +_int32_max = np.iinfo(np.int32).max + +_dtype_obj = np.dtype(object) + +NumpyArrayT = TypeVar("NumpyArrayT", bound=np.ndarray) + + +def maybe_convert_platform( + values: list | tuple | range | np.ndarray | ExtensionArray, +) -> ArrayLike: + """try to do platform conversion, allow ndarray or list here""" + arr: ArrayLike + + if isinstance(values, (list, tuple, range)): + arr = construct_1d_object_array_from_listlike(values) + else: + # The caller is responsible for ensuring that we have np.ndarray + # or ExtensionArray here. + arr = values + + if arr.dtype == _dtype_obj: + arr = cast(np.ndarray, arr) + arr = lib.maybe_convert_objects(arr) + + return arr + + +def is_nested_object(obj) -> bool: + """ + return a boolean if we have a nested object, e.g. a Series with 1 or + more Series elements + + This may not be necessarily be performant. + + """ + return bool( + isinstance(obj, ABCSeries) + and is_object_dtype(obj.dtype) + and any(isinstance(v, ABCSeries) for v in obj._values) + ) + + +def maybe_box_datetimelike(value: Scalar, dtype: Dtype | None = None) -> Scalar: + """ + Cast scalar to Timestamp or Timedelta if scalar is datetime-like + and dtype is not object. + + Parameters + ---------- + value : scalar + dtype : Dtype, optional + + Returns + ------- + scalar + """ + if dtype == _dtype_obj: + pass + elif isinstance(value, (np.datetime64, dt.datetime)): + value = Timestamp(value) + elif isinstance(value, (np.timedelta64, dt.timedelta)): + value = Timedelta(value) + + return value + + +def maybe_box_native(value: Scalar | None | NAType) -> Scalar | None | NAType: + """ + If passed a scalar cast the scalar to a python native type. + + Parameters + ---------- + value : scalar or Series + + Returns + ------- + scalar or Series + """ + if is_float(value): + value = float(value) + elif is_integer(value): + value = int(value) + elif is_bool(value): + value = bool(value) + elif isinstance(value, (np.datetime64, np.timedelta64)): + value = maybe_box_datetimelike(value) + elif value is NA: + value = None + return value + + +def _maybe_unbox_datetimelike(value: Scalar, dtype: DtypeObj) -> Scalar: + """ + Convert a Timedelta or Timestamp to timedelta64 or datetime64 for setting + into a numpy array. Failing to unbox would risk dropping nanoseconds. + + Notes + ----- + Caller is responsible for checking dtype.kind in "mM" + """ + if is_valid_na_for_dtype(value, dtype): + # GH#36541: can't fill array directly with pd.NaT + # > np.empty(10, dtype="datetime64[ns]").fill(pd.NaT) + # ValueError: cannot convert float NaN to integer + value = dtype.type("NaT", "ns") + elif isinstance(value, Timestamp): + if value.tz is None: + value = value.to_datetime64() + elif not isinstance(dtype, DatetimeTZDtype): + raise TypeError("Cannot unbox tzaware Timestamp to tznaive dtype") + elif isinstance(value, Timedelta): + value = value.to_timedelta64() + + _disallow_mismatched_datetimelike(value, dtype) + return value + + +def _disallow_mismatched_datetimelike(value, dtype: DtypeObj): + """ + numpy allows np.array(dt64values, dtype="timedelta64[ns]") and + vice-versa, but we do not want to allow this, so we need to + check explicitly + """ + vdtype = getattr(value, "dtype", None) + if vdtype is None: + return + elif (vdtype.kind == "m" and dtype.kind == "M") or ( + vdtype.kind == "M" and dtype.kind == "m" + ): + raise TypeError(f"Cannot cast {repr(value)} to {dtype}") + + +@overload +def maybe_downcast_to_dtype(result: np.ndarray, dtype: str | np.dtype) -> np.ndarray: + ... + + +@overload +def maybe_downcast_to_dtype(result: ExtensionArray, dtype: str | np.dtype) -> ArrayLike: + ... + + +def maybe_downcast_to_dtype(result: ArrayLike, dtype: str | np.dtype) -> ArrayLike: + """ + try to cast to the specified dtype (e.g. convert back to bool/int + or could be an astype of float64->float32 + """ + if isinstance(result, ABCSeries): + result = result._values + do_round = False + + if isinstance(dtype, str): + if dtype == "infer": + inferred_type = lib.infer_dtype(result, skipna=False) + if inferred_type == "boolean": + dtype = "bool" + elif inferred_type == "integer": + dtype = "int64" + elif inferred_type == "datetime64": + dtype = "datetime64[ns]" + elif inferred_type in ["timedelta", "timedelta64"]: + dtype = "timedelta64[ns]" + + # try to upcast here + elif inferred_type == "floating": + dtype = "int64" + if issubclass(result.dtype.type, np.number): + do_round = True + + else: + # TODO: complex? what if result is already non-object? + dtype = "object" + + dtype = np.dtype(dtype) + + if not isinstance(dtype, np.dtype): + # enforce our signature annotation + raise TypeError(dtype) # pragma: no cover + + converted = maybe_downcast_numeric(result, dtype, do_round) + if converted is not result: + return converted + + # a datetimelike + # GH12821, iNaT is cast to float + if dtype.kind in "mM" and result.dtype.kind in "if": + result = result.astype(dtype) + + elif dtype.kind == "m" and result.dtype == _dtype_obj: + # test_where_downcast_to_td64 + result = cast(np.ndarray, result) + result = array_to_timedelta64(result) + + elif dtype == np.dtype("M8[ns]") and result.dtype == _dtype_obj: + result = cast(np.ndarray, result) + return np.asarray(maybe_cast_to_datetime(result, dtype=dtype)) + + return result + + +@overload +def maybe_downcast_numeric( + result: np.ndarray, dtype: np.dtype, do_round: bool = False +) -> np.ndarray: + ... + + +@overload +def maybe_downcast_numeric( + result: ExtensionArray, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + ... + + +def maybe_downcast_numeric( + result: ArrayLike, dtype: DtypeObj, do_round: bool = False +) -> ArrayLike: + """ + Subset of maybe_downcast_to_dtype restricted to numeric dtypes. + + Parameters + ---------- + result : ndarray or ExtensionArray + dtype : np.dtype or ExtensionDtype + do_round : bool + + Returns + ------- + ndarray or ExtensionArray + """ + if not isinstance(dtype, np.dtype) or not isinstance(result.dtype, np.dtype): + # e.g. SparseDtype has no itemsize attr + return result + + def trans(x): + if do_round: + return x.round() + return x + + if dtype.kind == result.dtype.kind: + # don't allow upcasts here (except if empty) + if result.dtype.itemsize <= dtype.itemsize and result.size: + return result + + if dtype.kind in "biu": + if not result.size: + # if we don't have any elements, just astype it + return trans(result).astype(dtype) + + if isinstance(result, np.ndarray): + element = result.item(0) + else: + element = result.iloc[0] + if not isinstance(element, (np.integer, np.floating, int, float, bool)): + # a comparable, e.g. a Decimal may slip in here + return result + + if ( + issubclass(result.dtype.type, (np.object_, np.number)) + and notna(result).all() + ): + new_result = trans(result).astype(dtype) + if new_result.dtype.kind == "O" or result.dtype.kind == "O": + # np.allclose may raise TypeError on object-dtype + if (new_result == result).all(): + return new_result + else: + if np.allclose(new_result, result, rtol=0): + return new_result + + elif ( + issubclass(dtype.type, np.floating) + and result.dtype.kind != "b" + and not is_string_dtype(result.dtype) + ): + with warnings.catch_warnings(): + warnings.filterwarnings( + "ignore", "overflow encountered in cast", RuntimeWarning + ) + new_result = result.astype(dtype) + + # Adjust tolerances based on floating point size + size_tols = {4: 5e-4, 8: 5e-8, 16: 5e-16} + + atol = size_tols.get(new_result.dtype.itemsize, 0.0) + + # Check downcast float values are still equal within 7 digits when + # converting from float64 to float32 + if np.allclose(new_result, result, equal_nan=True, rtol=0.0, atol=atol): + return new_result + + elif dtype.kind == result.dtype.kind == "c": + new_result = result.astype(dtype) + + if np.array_equal(new_result, result, equal_nan=True): + # TODO: use tolerance like we do for float? + return new_result + + return result + + +def maybe_upcast_numeric_to_64bit(arr: NumpyIndexT) -> NumpyIndexT: + """ + If array is a int/uint/float bit size lower than 64 bit, upcast it to 64 bit. + + Parameters + ---------- + arr : ndarray or ExtensionArray + + Returns + ------- + ndarray or ExtensionArray + """ + dtype = arr.dtype + if dtype.kind == "i" and dtype != np.int64: + return arr.astype(np.int64) + elif dtype.kind == "u" and dtype != np.uint64: + return arr.astype(np.uint64) + elif dtype.kind == "f" and dtype != np.float64: + return arr.astype(np.float64) + else: + return arr + + +def maybe_cast_pointwise_result( + result: ArrayLike, + dtype: DtypeObj, + numeric_only: bool = False, + same_dtype: bool = True, +) -> ArrayLike: + """ + Try casting result of a pointwise operation back to the original dtype if + appropriate. + + Parameters + ---------- + result : array-like + Result to cast. + dtype : np.dtype or ExtensionDtype + Input Series from which result was calculated. + numeric_only : bool, default False + Whether to cast only numerics or datetimes as well. + same_dtype : bool, default True + Specify dtype when calling _from_sequence + + Returns + ------- + result : array-like + result maybe casted to the dtype. + """ + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + if same_dtype: + result = _maybe_cast_to_extension_array(cls, result, dtype=dtype) + else: + result = _maybe_cast_to_extension_array(cls, result) + + elif (numeric_only and dtype.kind in "iufcb") or not numeric_only: + result = maybe_downcast_to_dtype(result, dtype) + + return result + + +def _maybe_cast_to_extension_array( + cls: type[ExtensionArray], obj: ArrayLike, dtype: ExtensionDtype | None = None +) -> ArrayLike: + """ + Call to `_from_sequence` that returns the object unchanged on Exception. + + Parameters + ---------- + cls : class, subclass of ExtensionArray + obj : arraylike + Values to pass to cls._from_sequence + dtype : ExtensionDtype, optional + + Returns + ------- + ExtensionArray or obj + """ + result: ArrayLike + + if dtype is not None: + try: + result = cls._from_scalars(obj, dtype=dtype) + except (TypeError, ValueError): + return obj + return result + + try: + result = cls._from_sequence(obj, dtype=dtype) + except Exception: + # We can't predict what downstream EA constructors may raise + result = obj + return result + + +@overload +def ensure_dtype_can_hold_na(dtype: np.dtype) -> np.dtype: + ... + + +@overload +def ensure_dtype_can_hold_na(dtype: ExtensionDtype) -> ExtensionDtype: + ... + + +def ensure_dtype_can_hold_na(dtype: DtypeObj) -> DtypeObj: + """ + If we have a dtype that cannot hold NA values, find the best match that can. + """ + if isinstance(dtype, ExtensionDtype): + if dtype._can_hold_na: + return dtype + elif isinstance(dtype, IntervalDtype): + # TODO(GH#45349): don't special-case IntervalDtype, allow + # overriding instead of returning object below. + return IntervalDtype(np.float64, closed=dtype.closed) + return _dtype_obj + elif dtype.kind == "b": + return _dtype_obj + elif dtype.kind in "iu": + return np.dtype(np.float64) + return dtype + + +_canonical_nans = { + np.datetime64: np.datetime64("NaT", "ns"), + np.timedelta64: np.timedelta64("NaT", "ns"), + type(np.nan): np.nan, +} + + +def maybe_promote(dtype: np.dtype, fill_value=np.nan): + """ + Find the minimal dtype that can hold both the given dtype and fill_value. + + Parameters + ---------- + dtype : np.dtype + fill_value : scalar, default np.nan + + Returns + ------- + dtype + Upcasted from dtype argument if necessary. + fill_value + Upcasted from fill_value argument if necessary. + + Raises + ------ + ValueError + If fill_value is a non-scalar and dtype is not object. + """ + orig = fill_value + orig_is_nat = False + if checknull(fill_value): + # https://github.com/pandas-dev/pandas/pull/39692#issuecomment-1441051740 + # avoid cache misses with NaN/NaT values that are not singletons + if fill_value is not NA: + try: + orig_is_nat = np.isnat(fill_value) + except TypeError: + pass + + fill_value = _canonical_nans.get(type(fill_value), fill_value) + + # for performance, we are using a cached version of the actual implementation + # of the function in _maybe_promote. However, this doesn't always work (in case + # of non-hashable arguments), so we fallback to the actual implementation if needed + try: + # error: Argument 3 to "__call__" of "_lru_cache_wrapper" has incompatible type + # "Type[Any]"; expected "Hashable" [arg-type] + dtype, fill_value = _maybe_promote_cached( + dtype, fill_value, type(fill_value) # type: ignore[arg-type] + ) + except TypeError: + # if fill_value is not hashable (required for caching) + dtype, fill_value = _maybe_promote(dtype, fill_value) + + if (dtype == _dtype_obj and orig is not None) or ( + orig_is_nat and np.datetime_data(orig)[0] != "ns" + ): + # GH#51592,53497 restore our potentially non-canonical fill_value + fill_value = orig + return dtype, fill_value + + +@functools.lru_cache +def _maybe_promote_cached(dtype, fill_value, fill_value_type): + # The cached version of _maybe_promote below + # This also use fill_value_type as (unused) argument to use this in the + # cache lookup -> to differentiate 1 and True + return _maybe_promote(dtype, fill_value) + + +def _maybe_promote(dtype: np.dtype, fill_value=np.nan): + # The actual implementation of the function, use `maybe_promote` above for + # a cached version. + if not is_scalar(fill_value): + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + if dtype != object: + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + raise ValueError("fill_value must be a scalar") + dtype = _dtype_obj + return dtype, fill_value + + if is_valid_na_for_dtype(fill_value, dtype) and dtype.kind in "iufcmM": + dtype = ensure_dtype_can_hold_na(dtype) + fv = na_value_for_dtype(dtype) + return dtype, fv + + elif isinstance(dtype, CategoricalDtype): + if fill_value in dtype.categories or isna(fill_value): + return dtype, fill_value + else: + return object, ensure_object(fill_value) + + elif isna(fill_value): + dtype = _dtype_obj + if fill_value is None: + # but we retain e.g. pd.NA + fill_value = np.nan + return dtype, fill_value + + # returns tuple of (dtype, fill_value) + if issubclass(dtype.type, np.datetime64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + from pandas.core.arrays import DatetimeArray + + dta = DatetimeArray._from_sequence([], dtype="M8[ns]") + try: + fv = dta._validate_setitem_value(fill_value) + return dta.dtype, fv + except (ValueError, TypeError): + return _dtype_obj, fill_value + + elif issubclass(dtype.type, np.timedelta64): + inferred, fv = infer_dtype_from_scalar(fill_value) + if inferred == dtype: + return dtype, fv + + elif inferred.kind == "m": + # different unit, e.g. passed np.timedelta64(24, "h") with dtype=m8[ns] + # see if we can losslessly cast it to our dtype + unit = np.datetime_data(dtype)[0] + try: + td = Timedelta(fill_value).as_unit(unit, round_ok=False) + except OutOfBoundsTimedelta: + return _dtype_obj, fill_value + else: + return dtype, td.asm8 + + return _dtype_obj, fill_value + + elif is_float(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + dtype = np.dtype(np.float64) + + elif dtype.kind == "f": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.float64 and dtype is np.float32 + dtype = mst + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif is_bool(fill_value): + if not issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif is_integer(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, np.integer): + if not np_can_cast_scalar(fill_value, dtype): # type: ignore[arg-type] + # upcast to prevent overflow + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + if dtype.kind == "f": + # Case where we disagree with numpy + dtype = np.dtype(np.object_) + + elif is_complex(fill_value): + if issubclass(dtype.type, np.bool_): + dtype = np.dtype(np.object_) + + elif issubclass(dtype.type, (np.integer, np.floating)): + mst = np.min_scalar_type(fill_value) + dtype = np.promote_types(dtype, mst) + + elif dtype.kind == "c": + mst = np.min_scalar_type(fill_value) + if mst > dtype: + # e.g. mst is np.complex128 and dtype is np.complex64 + dtype = mst + + else: + dtype = np.dtype(np.object_) + + # in case we have a string that looked like a number + if issubclass(dtype.type, (bytes, str)): + dtype = np.dtype(np.object_) + + fill_value = _ensure_dtype_type(fill_value, dtype) + return dtype, fill_value + + +def _ensure_dtype_type(value, dtype: np.dtype): + """ + Ensure that the given value is an instance of the given dtype. + + e.g. if out dtype is np.complex64_, we should have an instance of that + as opposed to a python complex object. + + Parameters + ---------- + value : object + dtype : np.dtype + + Returns + ------- + object + """ + # Start with exceptions in which we do _not_ cast to numpy types + + if dtype == _dtype_obj: + return value + + # Note: before we get here we have already excluded isna(value) + return dtype.type(value) + + +def infer_dtype_from(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar or array. + + Parameters + ---------- + val : object + """ + if not is_list_like(val): + return infer_dtype_from_scalar(val) + return infer_dtype_from_array(val) + + +def infer_dtype_from_scalar(val) -> tuple[DtypeObj, Any]: + """ + Interpret the dtype from a scalar. + + Parameters + ---------- + val : object + """ + dtype: DtypeObj = _dtype_obj + + # a 1-element ndarray + if isinstance(val, np.ndarray): + if val.ndim != 0: + msg = "invalid ndarray passed to infer_dtype_from_scalar" + raise ValueError(msg) + + dtype = val.dtype + val = lib.item_from_zerodim(val) + + elif isinstance(val, str): + # If we create an empty array using a string to infer + # the dtype, NumPy will only allocate one character per entry + # so this is kind of bad. Alternately we could use np.repeat + # instead of np.empty (but then you still don't want things + # coming out as np.str_! + + dtype = _dtype_obj + if using_pyarrow_string_dtype(): + from pandas.core.arrays.string_ import StringDtype + + dtype = StringDtype(storage="pyarrow_numpy") + + elif isinstance(val, (np.datetime64, dt.datetime)): + try: + val = Timestamp(val) + except OutOfBoundsDatetime: + return _dtype_obj, val + + if val is NaT or val.tz is None: + val = val.to_datetime64() + dtype = val.dtype + # TODO: test with datetime(2920, 10, 1) based on test_replace_dtypes + else: + dtype = DatetimeTZDtype(unit=val.unit, tz=val.tz) + + elif isinstance(val, (np.timedelta64, dt.timedelta)): + try: + val = Timedelta(val) + except (OutOfBoundsTimedelta, OverflowError): + dtype = _dtype_obj + else: + if val is NaT: + val = np.timedelta64("NaT", "ns") + else: + val = val.asm8 + dtype = val.dtype + + elif is_bool(val): + dtype = np.dtype(np.bool_) + + elif is_integer(val): + if isinstance(val, np.integer): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.int64) + + try: + np.array(val, dtype=dtype) + except OverflowError: + dtype = np.array(val).dtype + + elif is_float(val): + if isinstance(val, np.floating): + dtype = np.dtype(type(val)) + else: + dtype = np.dtype(np.float64) + + elif is_complex(val): + dtype = np.dtype(np.complex128) + + if isinstance(val, Period): + dtype = PeriodDtype(freq=val.freq) + elif isinstance(val, Interval): + subtype = infer_dtype_from_scalar(val.left)[0] + dtype = IntervalDtype(subtype=subtype, closed=val.closed) + + return dtype, val + + +def dict_compat(d: dict[Scalar, Scalar]) -> dict[Scalar, Scalar]: + """ + Convert datetimelike-keyed dicts to a Timestamp-keyed dict. + + Parameters + ---------- + d: dict-like object + + Returns + ------- + dict + """ + return {maybe_box_datetimelike(key): value for key, value in d.items()} + + +def infer_dtype_from_array(arr) -> tuple[DtypeObj, ArrayLike]: + """ + Infer the dtype from an array. + + Parameters + ---------- + arr : array + + Returns + ------- + tuple (pandas-compat dtype, array) + + + Examples + -------- + >>> np.asarray([1, '1']) + array(['1', '1'], dtype='>> infer_dtype_from_array([1, '1']) + (dtype('O'), [1, '1']) + """ + if isinstance(arr, np.ndarray): + return arr.dtype, arr + + if not is_list_like(arr): + raise TypeError("'arr' must be list-like") + + arr_dtype = getattr(arr, "dtype", None) + if isinstance(arr_dtype, ExtensionDtype): + return arr.dtype, arr + + elif isinstance(arr, ABCSeries): + return arr.dtype, np.asarray(arr) + + # don't force numpy coerce with nan's + inferred = lib.infer_dtype(arr, skipna=False) + if inferred in ["string", "bytes", "mixed", "mixed-integer"]: + return (np.dtype(np.object_), arr) + + arr = np.asarray(arr) + return arr.dtype, arr + + +def _maybe_infer_dtype_type(element): + """ + Try to infer an object's dtype, for use in arithmetic ops. + + Uses `element.dtype` if that's available. + Objects implementing the iterator protocol are cast to a NumPy array, + and from there the array's type is used. + + Parameters + ---------- + element : object + Possibly has a `.dtype` attribute, and possibly the iterator + protocol. + + Returns + ------- + tipo : type + + Examples + -------- + >>> from collections import namedtuple + >>> Foo = namedtuple("Foo", "dtype") + >>> _maybe_infer_dtype_type(Foo(np.dtype("i8"))) + dtype('int64') + """ + tipo = None + if hasattr(element, "dtype"): + tipo = element.dtype + elif is_list_like(element): + element = np.asarray(element) + tipo = element.dtype + return tipo + + +def invalidate_string_dtypes(dtype_set: set[DtypeObj]) -> None: + """ + Change string like dtypes to object for + ``DataFrame.select_dtypes()``. + """ + # error: Argument 1 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + # error: Argument 2 to has incompatible type "Type[generic]"; expected + # "Union[dtype[Any], ExtensionDtype, None]" + non_string_dtypes = dtype_set - { + np.dtype("S").type, # type: ignore[arg-type] + np.dtype(" np.ndarray: + """coerce the indexer input array to the smallest dtype possible""" + length = len(categories) + if length < _int8_max: + return ensure_int8(indexer) + elif length < _int16_max: + return ensure_int16(indexer) + elif length < _int32_max: + return ensure_int32(indexer) + return ensure_int64(indexer) + + +def convert_dtypes( + input_array: ArrayLike, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + infer_objects: bool = False, + dtype_backend: Literal["numpy_nullable", "pyarrow"] = "numpy_nullable", +) -> DtypeObj: + """ + Convert objects to best possible type, and optionally, + to types supporting ``pd.NA``. + + Parameters + ---------- + input_array : ExtensionArray or np.ndarray + convert_string : bool, default True + Whether object dtypes should be converted to ``StringDtype()``. + convert_integer : bool, default True + Whether, if possible, conversion can be done to integer extension types. + convert_boolean : bool, defaults True + Whether object dtypes should be converted to ``BooleanDtypes()``. + convert_floating : bool, defaults True + Whether, if possible, conversion can be done to floating extension types. + If `convert_integer` is also True, preference will be give to integer + dtypes if the floats can be faithfully casted to integers. + infer_objects : bool, defaults False + Whether to also infer objects to float/int if possible. Is only hit if the + object array contains pd.NA. + dtype_backend : {'numpy_nullable', 'pyarrow'}, default 'numpy_nullable' + Back-end data type applied to the resultant :class:`DataFrame` + (still experimental). Behaviour is as follows: + + * ``"numpy_nullable"``: returns nullable-dtype-backed :class:`DataFrame` + (default). + * ``"pyarrow"``: returns pyarrow-backed nullable :class:`ArrowDtype` + DataFrame. + + .. versionadded:: 2.0 + + Returns + ------- + np.dtype, or ExtensionDtype + """ + inferred_dtype: str | DtypeObj + + if ( + convert_string or convert_integer or convert_boolean or convert_floating + ) and isinstance(input_array, np.ndarray): + if input_array.dtype == object: + inferred_dtype = lib.infer_dtype(input_array) + else: + inferred_dtype = input_array.dtype + + if is_string_dtype(inferred_dtype): + if not convert_string or inferred_dtype == "bytes": + inferred_dtype = input_array.dtype + else: + inferred_dtype = pandas_dtype_func("string") + + if convert_integer: + target_int_dtype = pandas_dtype_func("Int64") + + if input_array.dtype.kind in "iu": + from pandas.core.arrays.integer import NUMPY_INT_TO_DTYPE + + inferred_dtype = NUMPY_INT_TO_DTYPE.get( + input_array.dtype, target_int_dtype + ) + elif input_array.dtype.kind in "fcb": + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = target_int_dtype + else: + inferred_dtype = input_array.dtype + elif ( + infer_objects + and input_array.dtype == object + and (isinstance(inferred_dtype, str) and inferred_dtype == "integer") + ): + inferred_dtype = target_int_dtype + + if convert_floating: + if input_array.dtype.kind in "fcb": + # i.e. numeric but not integer + from pandas.core.arrays.floating import NUMPY_FLOAT_TO_DTYPE + + inferred_float_dtype: DtypeObj = NUMPY_FLOAT_TO_DTYPE.get( + input_array.dtype, pandas_dtype_func("Float64") + ) + # if we could also convert to integer, check if all floats + # are actually integers + if convert_integer: + # TODO: de-dup with maybe_cast_to_integer_array? + arr = input_array[notna(input_array)] + if (arr.astype(int) == arr).all(): + inferred_dtype = pandas_dtype_func("Int64") + else: + inferred_dtype = inferred_float_dtype + else: + inferred_dtype = inferred_float_dtype + elif ( + infer_objects + and input_array.dtype == object + and ( + isinstance(inferred_dtype, str) + and inferred_dtype == "mixed-integer-float" + ) + ): + inferred_dtype = pandas_dtype_func("Float64") + + if convert_boolean: + if input_array.dtype.kind == "b": + inferred_dtype = pandas_dtype_func("boolean") + elif isinstance(inferred_dtype, str) and inferred_dtype == "boolean": + inferred_dtype = pandas_dtype_func("boolean") + + if isinstance(inferred_dtype, str): + # If we couldn't do anything else, then we retain the dtype + inferred_dtype = input_array.dtype + + else: + inferred_dtype = input_array.dtype + + if dtype_backend == "pyarrow": + from pandas.core.arrays.arrow.array import to_pyarrow_type + from pandas.core.arrays.string_ import StringDtype + + assert not isinstance(inferred_dtype, str) + + if ( + (convert_integer and inferred_dtype.kind in "iu") + or (convert_floating and inferred_dtype.kind in "fc") + or (convert_boolean and inferred_dtype.kind == "b") + or (convert_string and isinstance(inferred_dtype, StringDtype)) + or ( + inferred_dtype.kind not in "iufcb" + and not isinstance(inferred_dtype, StringDtype) + ) + ): + if isinstance(inferred_dtype, PandasExtensionDtype) and not isinstance( + inferred_dtype, DatetimeTZDtype + ): + base_dtype = inferred_dtype.base + elif isinstance(inferred_dtype, (BaseMaskedDtype, ArrowDtype)): + base_dtype = inferred_dtype.numpy_dtype + elif isinstance(inferred_dtype, StringDtype): + base_dtype = np.dtype(str) + else: + base_dtype = inferred_dtype + if ( + base_dtype.kind == "O" # type: ignore[union-attr] + and input_array.size > 0 + and isna(input_array).all() + ): + import pyarrow as pa + + pa_type = pa.null() + else: + pa_type = to_pyarrow_type(base_dtype) + if pa_type is not None: + inferred_dtype = ArrowDtype(pa_type) + elif dtype_backend == "numpy_nullable" and isinstance(inferred_dtype, ArrowDtype): + # GH 53648 + inferred_dtype = _arrow_dtype_mapping()[inferred_dtype.pyarrow_dtype] + + # error: Incompatible return value type (got "Union[str, Union[dtype[Any], + # ExtensionDtype]]", expected "Union[dtype[Any], ExtensionDtype]") + return inferred_dtype # type: ignore[return-value] + + +def maybe_infer_to_datetimelike( + value: npt.NDArray[np.object_], +) -> np.ndarray | DatetimeArray | TimedeltaArray | PeriodArray | IntervalArray: + """ + we might have a array (or single object) that is datetime like, + and no dtype is passed don't change the value unless we find a + datetime/timedelta set + + this is pretty strict in that a datetime/timedelta is REQUIRED + in addition to possible nulls/string likes + + Parameters + ---------- + value : np.ndarray[object] + + Returns + ------- + np.ndarray, DatetimeArray, TimedeltaArray, PeriodArray, or IntervalArray + + """ + if not isinstance(value, np.ndarray) or value.dtype != object: + # Caller is responsible for passing only ndarray[object] + raise TypeError(type(value)) # pragma: no cover + if value.ndim != 1: + # Caller is responsible + raise ValueError(value.ndim) # pragma: no cover + + if not len(value): + return value + + # error: Incompatible return value type (got "Union[ExtensionArray, + # ndarray[Any, Any]]", expected "Union[ndarray[Any, Any], DatetimeArray, + # TimedeltaArray, PeriodArray, IntervalArray]") + return lib.maybe_convert_objects( # type: ignore[return-value] + value, + # Here we do not convert numeric dtypes, as if we wanted that, + # numpy would have done it for us. + convert_numeric=False, + convert_non_numeric=True, + dtype_if_all_nat=np.dtype("M8[ns]"), + ) + + +def maybe_cast_to_datetime( + value: np.ndarray | list, dtype: np.dtype +) -> ExtensionArray | np.ndarray: + """ + try to cast the array/value to a datetimelike dtype, converting float + nan to iNaT + + Caller is responsible for handling ExtensionDtype cases and non dt64/td64 + cases. + """ + from pandas.core.arrays.datetimes import DatetimeArray + from pandas.core.arrays.timedeltas import TimedeltaArray + + assert dtype.kind in "mM" + if not is_list_like(value): + raise TypeError("value must be listlike") + + # TODO: _from_sequence would raise ValueError in cases where + # _ensure_nanosecond_dtype raises TypeError + _ensure_nanosecond_dtype(dtype) + + if lib.is_np_dtype(dtype, "m"): + res = TimedeltaArray._from_sequence(value, dtype=dtype) + return res + else: + try: + dta = DatetimeArray._from_sequence(value, dtype=dtype) + except ValueError as err: + # We can give a Series-specific exception message. + if "cannot supply both a tz and a timezone-naive dtype" in str(err): + raise ValueError( + "Cannot convert timezone-aware data to " + "timezone-naive dtype. Use " + "pd.Series(values).dt.tz_localize(None) instead." + ) from err + raise + + return dta + + +def _ensure_nanosecond_dtype(dtype: DtypeObj) -> None: + """ + Convert dtypes with granularity less than nanosecond to nanosecond + + >>> _ensure_nanosecond_dtype(np.dtype("M8[us]")) + + >>> _ensure_nanosecond_dtype(np.dtype("M8[D]")) + Traceback (most recent call last): + ... + TypeError: dtype=datetime64[D] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + + >>> _ensure_nanosecond_dtype(np.dtype("m8[ps]")) + Traceback (most recent call last): + ... + TypeError: dtype=timedelta64[ps] is not supported. Supported resolutions are 's', 'ms', 'us', and 'ns' + """ # noqa: E501 + msg = ( + f"The '{dtype.name}' dtype has no unit. " + f"Please pass in '{dtype.name}[ns]' instead." + ) + + # unpack e.g. SparseDtype + dtype = getattr(dtype, "subtype", dtype) + + if not isinstance(dtype, np.dtype): + # i.e. datetime64tz + pass + + elif dtype.kind in "mM": + if not is_supported_dtype(dtype): + # pre-2.0 we would silently swap in nanos for lower-resolutions, + # raise for above-nano resolutions + if dtype.name in ["datetime64", "timedelta64"]: + raise ValueError(msg) + # TODO: ValueError or TypeError? existing test + # test_constructor_generic_timestamp_bad_frequency expects TypeError + raise TypeError( + f"dtype={dtype} is not supported. Supported resolutions are 's', " + "'ms', 'us', and 'ns'" + ) + + +# TODO: other value-dependent functions to standardize here include +# Index._find_common_type_compat +def find_result_type(left_dtype: DtypeObj, right: Any) -> DtypeObj: + """ + Find the type/dtype for the result of an operation between objects. + + This is similar to find_common_type, but looks at the right object instead + of just its dtype. This can be useful in particular when the right + object does not have a `dtype`. + + Parameters + ---------- + left_dtype : np.dtype or ExtensionDtype + right : Any + + Returns + ------- + np.dtype or ExtensionDtype + + See also + -------- + find_common_type + numpy.result_type + """ + new_dtype: DtypeObj + + if ( + isinstance(left_dtype, np.dtype) + and left_dtype.kind in "iuc" + and (lib.is_integer(right) or lib.is_float(right)) + ): + # e.g. with int8 dtype and right=512, we want to end up with + # np.int16, whereas infer_dtype_from(512) gives np.int64, + # which will make us upcast too far. + if lib.is_float(right) and right.is_integer() and left_dtype.kind != "f": + right = int(right) + # After NEP 50, numpy won't inspect Python scalars + # TODO: do we need to recreate numpy's inspection logic for floats too + # (this breaks some tests) + if isinstance(right, int) and not isinstance(right, np.integer): + # This gives an unsigned type by default + # (if our number is positive) + + # If our left dtype is signed, we might not want this since + # this might give us 1 dtype too big + # We should check if the corresponding int dtype (e.g. int64 for uint64) + # can hold the number + right_dtype = np.min_scalar_type(right) + if right == 0: + # Special case 0 + right = left_dtype + elif ( + not np.issubdtype(left_dtype, np.unsignedinteger) + and 0 < right <= np.iinfo(right_dtype).max + ): + # If left dtype isn't unsigned, check if it fits in the signed dtype + right = np.dtype(f"i{right_dtype.itemsize}") + else: + right = right_dtype + + new_dtype = np.result_type(left_dtype, right) + + elif is_valid_na_for_dtype(right, left_dtype): + # e.g. IntervalDtype[int] and None/np.nan + new_dtype = ensure_dtype_can_hold_na(left_dtype) + + else: + dtype, _ = infer_dtype_from(right) + new_dtype = find_common_type([left_dtype, dtype]) + + return new_dtype + + +def common_dtype_categorical_compat( + objs: Sequence[Index | ArrayLike], dtype: DtypeObj +) -> DtypeObj: + """ + Update the result of find_common_type to account for NAs in a Categorical. + + Parameters + ---------- + objs : list[np.ndarray | ExtensionArray | Index] + dtype : np.dtype or ExtensionDtype + + Returns + ------- + np.dtype or ExtensionDtype + """ + # GH#38240 + + # TODO: more generally, could do `not can_hold_na(dtype)` + if lib.is_np_dtype(dtype, "iu"): + for obj in objs: + # We don't want to accientally allow e.g. "categorical" str here + obj_dtype = getattr(obj, "dtype", None) + if isinstance(obj_dtype, CategoricalDtype): + if isinstance(obj, ABCIndex): + # This check may already be cached + hasnas = obj.hasnans + else: + # Categorical + hasnas = cast("Categorical", obj)._hasna + + if hasnas: + # see test_union_int_categorical_with_nan + dtype = np.dtype(np.float64) + break + return dtype + + +def np_find_common_type(*dtypes: np.dtype) -> np.dtype: + """ + np.find_common_type implementation pre-1.25 deprecation using np.result_type + https://github.com/pandas-dev/pandas/pull/49569#issuecomment-1308300065 + + Parameters + ---------- + dtypes : np.dtypes + + Returns + ------- + np.dtype + """ + try: + common_dtype = np.result_type(*dtypes) + if common_dtype.kind in "mMSU": + # NumPy promotion currently (1.25) misbehaves for for times and strings, + # so fall back to object (find_common_dtype did unless there + # was only one dtype) + common_dtype = np.dtype("O") + + except TypeError: + common_dtype = np.dtype("O") + return common_dtype + + +@overload +def find_common_type(types: list[np.dtype]) -> np.dtype: + ... + + +@overload +def find_common_type(types: list[ExtensionDtype]) -> DtypeObj: + ... + + +@overload +def find_common_type(types: list[DtypeObj]) -> DtypeObj: + ... + + +def find_common_type(types): + """ + Find a common data type among the given dtypes. + + Parameters + ---------- + types : list of dtypes + + Returns + ------- + pandas extension or numpy dtype + + See Also + -------- + numpy.find_common_type + + """ + if not types: + raise ValueError("no types given") + + first = types[0] + + # workaround for find_common_type([np.dtype('datetime64[ns]')] * 2) + # => object + if lib.dtypes_all_equal(list(types)): + return first + + # get unique types (dict.fromkeys is used as order-preserving set()) + types = list(dict.fromkeys(types).keys()) + + if any(isinstance(t, ExtensionDtype) for t in types): + for t in types: + if isinstance(t, ExtensionDtype): + res = t._get_common_dtype(types) + if res is not None: + return res + return np.dtype("object") + + # take lowest unit + if all(lib.is_np_dtype(t, "M") for t in types): + return np.dtype(max(types)) + if all(lib.is_np_dtype(t, "m") for t in types): + return np.dtype(max(types)) + + # don't mix bool / int or float or complex + # this is different from numpy, which casts bool with float/int as int + has_bools = any(t.kind == "b" for t in types) + if has_bools: + for t in types: + if t.kind in "iufc": + return np.dtype("object") + + return np_find_common_type(*types) + + +def construct_2d_arraylike_from_scalar( + value: Scalar, length: int, width: int, dtype: np.dtype, copy: bool +) -> np.ndarray: + shape = (length, width) + + if dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + elif dtype == _dtype_obj: + if isinstance(value, (np.timedelta64, np.datetime64)): + # calling np.array below would cast to pytimedelta/pydatetime + out = np.empty(shape, dtype=object) + out.fill(value) + return out + + # Attempt to coerce to a numpy array + try: + if not copy: + arr = np.asarray(value, dtype=dtype) + else: + arr = np.array(value, dtype=dtype, copy=copy) + except (ValueError, TypeError) as err: + raise TypeError( + f"DataFrame constructor called with incompatible data and dtype: {err}" + ) from err + + if arr.ndim != 0: + raise ValueError("DataFrame constructor not properly called!") + + return np.full(shape, arr) + + +def construct_1d_arraylike_from_scalar( + value: Scalar, length: int, dtype: DtypeObj | None +) -> ArrayLike: + """ + create a np.ndarray / pandas type of specified shape and dtype + filled with values + + Parameters + ---------- + value : scalar value + length : int + dtype : pandas_dtype or np.dtype + + Returns + ------- + np.ndarray / pandas type of length, filled with value + + """ + + if dtype is None: + try: + dtype, value = infer_dtype_from_scalar(value) + except OutOfBoundsDatetime: + dtype = _dtype_obj + + if isinstance(dtype, ExtensionDtype): + cls = dtype.construct_array_type() + seq = [] if length == 0 else [value] + subarr = cls._from_sequence(seq, dtype=dtype).repeat(length) + + else: + if length and dtype.kind in "iu" and isna(value): + # coerce if we have nan for an integer dtype + dtype = np.dtype("float64") + elif lib.is_np_dtype(dtype, "US"): + # we need to coerce to object dtype to avoid + # to allow numpy to take our string as a scalar value + dtype = np.dtype("object") + if not isna(value): + value = ensure_str(value) + elif dtype.kind in "mM": + value = _maybe_box_and_unbox_datetimelike(value, dtype) + + subarr = np.empty(length, dtype=dtype) + if length: + # GH 47391: numpy > 1.24 will raise filling np.nan into int dtypes + subarr.fill(value) + + return subarr + + +def _maybe_box_and_unbox_datetimelike(value: Scalar, dtype: DtypeObj): + # Caller is responsible for checking dtype.kind in "mM" + + if isinstance(value, dt.datetime): + # we dont want to box dt64, in particular datetime64("NaT") + value = maybe_box_datetimelike(value, dtype) + + return _maybe_unbox_datetimelike(value, dtype) + + +def construct_1d_object_array_from_listlike(values: Sized) -> np.ndarray: + """ + Transform any list-like object in a 1-dimensional numpy array of object + dtype. + + Parameters + ---------- + values : any iterable which has a len() + + Raises + ------ + TypeError + * If `values` does not have a len() + + Returns + ------- + 1-dimensional numpy array of dtype object + """ + # numpy will try to interpret nested lists as further dimensions, hence + # making a 1D array that contains list-likes is a bit tricky: + result = np.empty(len(values), dtype="object") + result[:] = values + return result + + +def maybe_cast_to_integer_array(arr: list | np.ndarray, dtype: np.dtype) -> np.ndarray: + """ + Takes any dtype and returns the casted version, raising for when data is + incompatible with integer/unsigned integer dtypes. + + Parameters + ---------- + arr : np.ndarray or list + The array to cast. + dtype : np.dtype + The integer dtype to cast the array to. + + Returns + ------- + ndarray + Array of integer or unsigned integer dtype. + + Raises + ------ + OverflowError : the dtype is incompatible with the data + ValueError : loss of precision has occurred during casting + + Examples + -------- + If you try to coerce negative values to unsigned integers, it raises: + + >>> pd.Series([-1], dtype="uint64") + Traceback (most recent call last): + ... + OverflowError: Trying to coerce negative values to unsigned integers + + Also, if you try to coerce float values to integers, it raises: + + >>> maybe_cast_to_integer_array([1, 2, 3.5], dtype=np.dtype("int64")) + Traceback (most recent call last): + ... + ValueError: Trying to coerce float values to integers + """ + assert dtype.kind in "iu" + + try: + if not isinstance(arr, np.ndarray): + with warnings.catch_warnings(): + # We already disallow dtype=uint w/ negative numbers + # (test_constructor_coercion_signed_to_unsigned) so safe to ignore. + warnings.filterwarnings( + "ignore", + "NumPy will stop allowing conversion of out-of-bound Python int", + DeprecationWarning, + ) + casted = np.asarray(arr, dtype=dtype) + else: + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + casted = arr.astype(dtype, copy=False) + except OverflowError as err: + raise OverflowError( + "The elements provided in the data cannot all be " + f"casted to the dtype {dtype}" + ) from err + + if isinstance(arr, np.ndarray) and arr.dtype == dtype: + # avoid expensive array_equal check + return casted + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore", category=RuntimeWarning) + warnings.filterwarnings( + "ignore", "elementwise comparison failed", FutureWarning + ) + if np.array_equal(arr, casted): + return casted + + # We do this casting to allow for proper + # data and dtype checking. + # + # We didn't do this earlier because NumPy + # doesn't handle `uint64` correctly. + arr = np.asarray(arr) + + if np.issubdtype(arr.dtype, str): + # TODO(numpy-2.0 min): This case will raise an OverflowError above + if (casted.astype(str) == arr).all(): + return casted + raise ValueError(f"string values cannot be losslessly cast to {dtype}") + + if dtype.kind == "u" and (arr < 0).any(): + # TODO: can this be hit anymore after numpy 2.0? + raise OverflowError("Trying to coerce negative values to unsigned integers") + + if arr.dtype.kind == "f": + if not np.isfinite(arr).all(): + raise IntCastingNaNError( + "Cannot convert non-finite values (NA or inf) to integer" + ) + raise ValueError("Trying to coerce float values to integers") + if arr.dtype == object: + raise ValueError("Trying to coerce float values to integers") + + if casted.dtype < arr.dtype: + # TODO: Can this path be hit anymore with numpy > 2 + # GH#41734 e.g. [1, 200, 923442] and dtype="int8" -> overflows + raise ValueError( + f"Values are too large to be losslessly converted to {dtype}. " + f"To cast anyway, use pd.Series(values).astype({dtype})" + ) + + if arr.dtype.kind in "mM": + # test_constructor_maskedarray_nonfloat + raise TypeError( + f"Constructing a Series or DataFrame from {arr.dtype} values and " + f"dtype={dtype} is not supported. Use values.view({dtype}) instead." + ) + + # No known cases that get here, but raising explicitly to cover our bases. + raise ValueError(f"values cannot be losslessly cast to {dtype}") + + +def can_hold_element(arr: ArrayLike, element: Any) -> bool: + """ + Can we do an inplace setitem with this element in an array with this dtype? + + Parameters + ---------- + arr : np.ndarray or ExtensionArray + element : Any + + Returns + ------- + bool + """ + dtype = arr.dtype + if not isinstance(dtype, np.dtype) or dtype.kind in "mM": + if isinstance(dtype, (PeriodDtype, IntervalDtype, DatetimeTZDtype, np.dtype)): + # np.dtype here catches datetime64ns and timedelta64ns; we assume + # in this case that we have DatetimeArray/TimedeltaArray + arr = cast( + "PeriodArray | DatetimeArray | TimedeltaArray | IntervalArray", arr + ) + try: + arr._validate_setitem_value(element) + return True + except (ValueError, TypeError): + return False + + # This is technically incorrect, but maintains the behavior of + # ExtensionBlock._can_hold_element + return True + + try: + np_can_hold_element(dtype, element) + return True + except (TypeError, LossySetitemError): + return False + + +def np_can_hold_element(dtype: np.dtype, element: Any) -> Any: + """ + Raise if we cannot losslessly set this element into an ndarray with this dtype. + + Specifically about places where we disagree with numpy. i.e. there are + cases where numpy will raise in doing the setitem that we do not check + for here, e.g. setting str "X" into a numeric ndarray. + + Returns + ------- + Any + The element, potentially cast to the dtype. + + Raises + ------ + ValueError : If we cannot losslessly store this element with this dtype. + """ + if dtype == _dtype_obj: + return element + + tipo = _maybe_infer_dtype_type(element) + + if dtype.kind in "iu": + if isinstance(element, range): + if _dtype_can_hold_range(element, dtype): + return element + raise LossySetitemError + + if is_integer(element) or (is_float(element) and element.is_integer()): + # e.g. test_setitem_series_int8 if we have a python int 1 + # tipo may be np.int32, despite the fact that it will fit + # in smaller int dtypes. + info = np.iinfo(dtype) + if info.min <= element <= info.max: + return dtype.type(element) + raise LossySetitemError + + if tipo is not None: + if tipo.kind not in "iu": + if isinstance(element, np.ndarray) and element.dtype.kind == "f": + # If all can be losslessly cast to integers, then we can hold them + with np.errstate(invalid="ignore"): + # We check afterwards if cast was losslessly, so no need to show + # the warning + casted = element.astype(dtype) + comp = casted == element + if comp.all(): + # Return the casted values bc they can be passed to + # np.putmask, whereas the raw values cannot. + # see TestSetitemFloatNDarrayIntoIntegerSeries + return casted + raise LossySetitemError + + elif isinstance(element, ABCExtensionArray) and isinstance( + element.dtype, CategoricalDtype + ): + # GH#52927 setting Categorical value into non-EA frame + # TODO: general-case for EAs? + try: + casted = element.astype(dtype) + except (ValueError, TypeError): + raise LossySetitemError + # Check for cases of either + # a) lossy overflow/rounding or + # b) semantic changes like dt64->int64 + comp = casted == element + if not comp.all(): + raise LossySetitemError + return casted + + # Anything other than integer we cannot hold + raise LossySetitemError + if ( + dtype.kind == "u" + and isinstance(element, np.ndarray) + and element.dtype.kind == "i" + ): + # see test_where_uint64 + casted = element.astype(dtype) + if (casted == element).all(): + # TODO: faster to check (element >=0).all()? potential + # itemsize issues there? + return casted + raise LossySetitemError + if dtype.itemsize < tipo.itemsize: + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype; we can put this into an ndarray + # losslessly iff it has no NAs + arr = element._values if isinstance(element, ABCSeries) else element + if arr._hasna: + raise LossySetitemError + return element + + return element + + raise LossySetitemError + + if dtype.kind == "f": + if lib.is_integer(element) or lib.is_float(element): + casted = dtype.type(element) + if np.isnan(casted) or casted == element: + return casted + # otherwise e.g. overflow see TestCoercionFloat32 + raise LossySetitemError + + if tipo is not None: + # TODO: itemsize check? + if tipo.kind not in "iuf": + # Anything other than float/integer we cannot hold + raise LossySetitemError + if not isinstance(tipo, np.dtype): + # i.e. nullable IntegerDtype or FloatingDtype; + # we can put this into an ndarray losslessly iff it has no NAs + if element._hasna: + raise LossySetitemError + return element + elif tipo.itemsize > dtype.itemsize or tipo.kind != dtype.kind: + if isinstance(element, np.ndarray): + # e.g. TestDataFrameIndexingWhere::test_where_alignment + casted = element.astype(dtype) + if np.array_equal(casted, element, equal_nan=True): + return casted + raise LossySetitemError + + return element + + raise LossySetitemError + + if dtype.kind == "c": + if lib.is_integer(element) or lib.is_complex(element) or lib.is_float(element): + if np.isnan(element): + # see test_where_complex GH#6345 + return dtype.type(element) + + with warnings.catch_warnings(): + warnings.filterwarnings("ignore") + casted = dtype.type(element) + if casted == element: + return casted + # otherwise e.g. overflow see test_32878_complex_itemsize + raise LossySetitemError + + if tipo is not None: + if tipo.kind in "iufc": + return element + raise LossySetitemError + raise LossySetitemError + + if dtype.kind == "b": + if tipo is not None: + if tipo.kind == "b": + if not isinstance(tipo, np.dtype): + # i.e. we have a BooleanArray + if element._hasna: + # i.e. there are pd.NA elements + raise LossySetitemError + return element + raise LossySetitemError + if lib.is_bool(element): + return element + raise LossySetitemError + + if dtype.kind == "S": + # TODO: test tests.frame.methods.test_replace tests get here, + # need more targeted tests. xref phofl has a PR about this + if tipo is not None: + if tipo.kind == "S" and tipo.itemsize <= dtype.itemsize: + return element + raise LossySetitemError + if isinstance(element, bytes) and len(element) <= dtype.itemsize: + return element + raise LossySetitemError + + if dtype.kind == "V": + # i.e. np.void, which cannot hold _anything_ + raise LossySetitemError + + raise NotImplementedError(dtype) + + +def _dtype_can_hold_range(rng: range, dtype: np.dtype) -> bool: + """ + _maybe_infer_dtype_type infers to int64 (and float64 for very large endpoints), + but in many cases a range can be held by a smaller integer dtype. + Check if this is one of those cases. + """ + if not len(rng): + return True + return np_can_cast_scalar(rng.start, dtype) and np_can_cast_scalar(rng.stop, dtype) + + +def np_can_cast_scalar(element: Scalar, dtype: np.dtype) -> bool: + """ + np.can_cast pandas-equivalent for pre 2-0 behavior that allowed scalar + inference + + Parameters + ---------- + element : Scalar + dtype : np.dtype + + Returns + ------- + bool + """ + try: + np_can_hold_element(dtype, element) + return True + except (LossySetitemError, NotImplementedError): + return False diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/generic.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/generic.py new file mode 100644 index 0000000000000000000000000000000000000000..9718ad600cb80b6e38f069a83aaf35ddb376fb00 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/dtypes/generic.py @@ -0,0 +1,147 @@ +""" define generic base classes for pandas objects """ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Type, + cast, +) + +if TYPE_CHECKING: + from pandas import ( + Categorical, + CategoricalIndex, + DataFrame, + DatetimeIndex, + Index, + IntervalIndex, + MultiIndex, + PeriodIndex, + RangeIndex, + Series, + TimedeltaIndex, + ) + from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, + ) + from pandas.core.generic import NDFrame + + +# define abstract base classes to enable isinstance type checking on our +# objects +def create_pandas_abc_type(name, attr, comp): + def _check(inst) -> bool: + return getattr(inst, attr, "_typ") in comp + + # https://github.com/python/mypy/issues/1006 + # error: 'classmethod' used with a non-method + @classmethod # type: ignore[misc] + def _instancecheck(cls, inst) -> bool: + return _check(inst) and not isinstance(inst, type) + + @classmethod # type: ignore[misc] + def _subclasscheck(cls, inst) -> bool: + # Raise instead of returning False + # This is consistent with default __subclasscheck__ behavior + if not isinstance(inst, type): + raise TypeError("issubclass() arg 1 must be a class") + + return _check(inst) + + dct = {"__instancecheck__": _instancecheck, "__subclasscheck__": _subclasscheck} + meta = type("ABCBase", (type,), dct) + return meta(name, (), dct) + + +ABCRangeIndex = cast( + "Type[RangeIndex]", + create_pandas_abc_type("ABCRangeIndex", "_typ", ("rangeindex",)), +) +ABCMultiIndex = cast( + "Type[MultiIndex]", + create_pandas_abc_type("ABCMultiIndex", "_typ", ("multiindex",)), +) +ABCDatetimeIndex = cast( + "Type[DatetimeIndex]", + create_pandas_abc_type("ABCDatetimeIndex", "_typ", ("datetimeindex",)), +) +ABCTimedeltaIndex = cast( + "Type[TimedeltaIndex]", + create_pandas_abc_type("ABCTimedeltaIndex", "_typ", ("timedeltaindex",)), +) +ABCPeriodIndex = cast( + "Type[PeriodIndex]", + create_pandas_abc_type("ABCPeriodIndex", "_typ", ("periodindex",)), +) +ABCCategoricalIndex = cast( + "Type[CategoricalIndex]", + create_pandas_abc_type("ABCCategoricalIndex", "_typ", ("categoricalindex",)), +) +ABCIntervalIndex = cast( + "Type[IntervalIndex]", + create_pandas_abc_type("ABCIntervalIndex", "_typ", ("intervalindex",)), +) +ABCIndex = cast( + "Type[Index]", + create_pandas_abc_type( + "ABCIndex", + "_typ", + { + "index", + "rangeindex", + "multiindex", + "datetimeindex", + "timedeltaindex", + "periodindex", + "categoricalindex", + "intervalindex", + }, + ), +) + + +ABCNDFrame = cast( + "Type[NDFrame]", + create_pandas_abc_type("ABCNDFrame", "_typ", ("series", "dataframe")), +) +ABCSeries = cast( + "Type[Series]", + create_pandas_abc_type("ABCSeries", "_typ", ("series",)), +) +ABCDataFrame = cast( + "Type[DataFrame]", create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",)) +) + +ABCCategorical = cast( + "Type[Categorical]", + create_pandas_abc_type("ABCCategorical", "_typ", ("categorical")), +) +ABCDatetimeArray = cast( + "Type[DatetimeArray]", + create_pandas_abc_type("ABCDatetimeArray", "_typ", ("datetimearray")), +) +ABCTimedeltaArray = cast( + "Type[TimedeltaArray]", + create_pandas_abc_type("ABCTimedeltaArray", "_typ", ("timedeltaarray")), +) +ABCPeriodArray = cast( + "Type[PeriodArray]", + create_pandas_abc_type("ABCPeriodArray", "_typ", ("periodarray",)), +) +ABCExtensionArray = cast( + "Type[ExtensionArray]", + create_pandas_abc_type( + "ABCExtensionArray", + "_typ", + # Note: IntervalArray and SparseArray are included bc they have _typ="extension" + {"extension", "categorical", "periodarray", "datetimearray", "timedeltaarray"}, + ), +) +ABCNumpyExtensionArray = cast( + "Type[NumpyExtensionArray]", + create_pandas_abc_type("ABCNumpyExtensionArray", "_typ", ("npy_extension",)), +) diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dd4f6d8b9d8928f45dfbb041ede90e5fda7c441 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/__init__.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2bd914395d80e5da9f27276ece54aeec5ae84e95 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/base.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..355365a94104e60ae76ed8e295f9dc20ffa34319 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/categorical.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53b2ed4656dbe22645d745e7bd8aa07632cc76a5 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/generic.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ac7b8129ae51737708cac5fe5d43603648fcd2f5 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/grouper.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80d0562e76b05b8e48f1edbb72e749ffbabd4655 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/indexing.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..265361318cef176bf6f7eb6c4aaa8f1cd4429a05 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/numba_.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d434b5758ee1c47599f6b02ce148621403c84df5 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/__pycache__/ops.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/base.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/base.py new file mode 100644 index 0000000000000000000000000000000000000000..a443597347283887deb9cbd3eafb5f6d3bb6d9a6 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/base.py @@ -0,0 +1,121 @@ +""" +Provide basic components for groupby. +""" +from __future__ import annotations + +import dataclasses +from typing import TYPE_CHECKING + +if TYPE_CHECKING: + from collections.abc import Hashable + + +@dataclasses.dataclass(order=True, frozen=True) +class OutputKey: + label: Hashable + position: int + + +# special case to prevent duplicate plots when catching exceptions when +# forwarding methods from NDFrames +plotting_methods = frozenset(["plot", "hist"]) + +# cythonized transformations or canned "agg+broadcast", which do not +# require postprocessing of the result by transform. +cythonized_kernels = frozenset(["cumprod", "cumsum", "shift", "cummin", "cummax"]) + +# List of aggregation/reduction functions. +# These map each group to a single numeric value +reduction_kernels = frozenset( + [ + "all", + "any", + "corrwith", + "count", + "first", + "idxmax", + "idxmin", + "last", + "max", + "mean", + "median", + "min", + "nunique", + "prod", + # as long as `quantile`'s signature accepts only + # a single quantile value, it's a reduction. + # GH#27526 might change that. + "quantile", + "sem", + "size", + "skew", + "std", + "sum", + "var", + ] +) + +# List of transformation functions. +# a transformation is a function that, for each group, +# produces a result that has the same shape as the group. + + +transformation_kernels = frozenset( + [ + "bfill", + "cumcount", + "cummax", + "cummin", + "cumprod", + "cumsum", + "diff", + "ffill", + "fillna", + "ngroup", + "pct_change", + "rank", + "shift", + ] +) + +# these are all the public methods on Grouper which don't belong +# in either of the above lists +groupby_other_methods = frozenset( + [ + "agg", + "aggregate", + "apply", + "boxplot", + # corr and cov return ngroups*ncolumns rows, so they + # are neither a transformation nor a reduction + "corr", + "cov", + "describe", + "dtypes", + "expanding", + "ewm", + "filter", + "get_group", + "groups", + "head", + "hist", + "indices", + "ndim", + "ngroups", + "nth", + "ohlc", + "pipe", + "plot", + "resample", + "rolling", + "tail", + "take", + "transform", + "sample", + "value_counts", + ] +) +# Valid values of `name` for `groupby.transform(name)` +# NOTE: do NOT edit this directly. New additions should be inserted +# into the appropriate list above. +transform_kernel_allowlist = reduction_kernels | transformation_kernels diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/categorical.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/categorical.py new file mode 100644 index 0000000000000000000000000000000000000000..6ab98cf4fe55e9b064db99e61d1245cb83b63dc1 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/categorical.py @@ -0,0 +1,87 @@ +from __future__ import annotations + +import numpy as np + +from pandas.core.algorithms import unique1d +from pandas.core.arrays.categorical import ( + Categorical, + CategoricalDtype, + recode_for_categories, +) + + +def recode_for_groupby( + c: Categorical, sort: bool, observed: bool +) -> tuple[Categorical, Categorical | None]: + """ + Code the categories to ensure we can groupby for categoricals. + + If observed=True, we return a new Categorical with the observed + categories only. + + If sort=False, return a copy of self, coded with categories as + returned by .unique(), followed by any categories not appearing in + the data. If sort=True, return self. + + This method is needed solely to ensure the categorical index of the + GroupBy result has categories in the order of appearance in the data + (GH-8868). + + Parameters + ---------- + c : Categorical + sort : bool + The value of the sort parameter groupby was called with. + observed : bool + Account only for the observed values + + Returns + ------- + Categorical + If sort=False, the new categories are set to the order of + appearance in codes (unless ordered=True, in which case the + original order is preserved), followed by any unrepresented + categories in the original order. + Categorical or None + If we are observed, return the original categorical, otherwise None + """ + # we only care about observed values + if observed: + # In cases with c.ordered, this is equivalent to + # return c.remove_unused_categories(), c + + unique_codes = unique1d(c.codes) + + take_codes = unique_codes[unique_codes != -1] + if sort: + take_codes = np.sort(take_codes) + + # we recode according to the uniques + categories = c.categories.take(take_codes) + codes = recode_for_categories(c.codes, c.categories, categories) + + # return a new categorical that maps our new codes + # and categories + dtype = CategoricalDtype(categories, ordered=c.ordered) + return Categorical._simple_new(codes, dtype=dtype), c + + # Already sorted according to c.categories; all is fine + if sort: + return c, None + + # sort=False should order groups in as-encountered order (GH-8868) + + # xref GH:46909: Re-ordering codes faster than using (set|add|reorder)_categories + all_codes = np.arange(c.categories.nunique()) + # GH 38140: exclude nan from indexer for categories + unique_notnan_codes = unique1d(c.codes[c.codes != -1]) + if sort: + unique_notnan_codes = np.sort(unique_notnan_codes) + if len(all_codes) > len(unique_notnan_codes): + # GH 13179: All categories need to be present, even if missing from the data + missing_codes = np.setdiff1d(all_codes, unique_notnan_codes, assume_unique=True) + take_codes = np.concatenate((unique_notnan_codes, missing_codes)) + else: + take_codes = unique_notnan_codes + + return Categorical(c, c.unique().categories.take(take_codes)), None diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/numba_.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/numba_.py new file mode 100644 index 0000000000000000000000000000000000000000..3b7a58e87603e578216c4c80e8c88e06828d5dfa --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/groupby/numba_.py @@ -0,0 +1,181 @@ +"""Common utilities for Numba operations with groupby ops""" +from __future__ import annotations + +import functools +import inspect +from typing import ( + TYPE_CHECKING, + Any, + Callable, +) + +import numpy as np + +from pandas.compat._optional import import_optional_dependency + +from pandas.core.util.numba_ import ( + NumbaUtilError, + jit_user_function, +) + +if TYPE_CHECKING: + from pandas._typing import Scalar + + +def validate_udf(func: Callable) -> None: + """ + Validate user defined function for ops when using Numba with groupby ops. + + The first signature arguments should include: + + def f(values, index, ...): + ... + + Parameters + ---------- + func : function, default False + user defined function + + Returns + ------- + None + + Raises + ------ + NumbaUtilError + """ + if not callable(func): + raise NotImplementedError( + "Numba engine can only be used with a single function." + ) + udf_signature = list(inspect.signature(func).parameters.keys()) + expected_args = ["values", "index"] + min_number_args = len(expected_args) + if ( + len(udf_signature) < min_number_args + or udf_signature[:min_number_args] != expected_args + ): + raise NumbaUtilError( + f"The first {min_number_args} arguments to {func.__name__} must be " + f"{expected_args}" + ) + + +@functools.cache +def generate_numba_agg_func( + func: Callable[..., Scalar], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted agg function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby agg function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each group and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_agg( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((num_groups, num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[i, j] = numba_func(group, group_index, *args) + return result + + return group_agg + + +@functools.cache +def generate_numba_transform_func( + func: Callable[..., np.ndarray], + nopython: bool, + nogil: bool, + parallel: bool, +) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, Any], np.ndarray]: + """ + Generate a numba jitted transform function specified by values from engine_kwargs. + + 1. jit the user's function + 2. Return a groupby transform function with the jitted function inline + + Configurations specified in engine_kwargs apply to both the user's + function _AND_ the groupby evaluation loop. + + Parameters + ---------- + func : function + function to be applied to each window and will be JITed + nopython : bool + nopython to be passed into numba.jit + nogil : bool + nogil to be passed into numba.jit + parallel : bool + parallel to be passed into numba.jit + + Returns + ------- + Numba function + """ + numba_func = jit_user_function(func) + if TYPE_CHECKING: + import numba + else: + numba = import_optional_dependency("numba") + + @numba.jit(nopython=nopython, nogil=nogil, parallel=parallel) + def group_transform( + values: np.ndarray, + index: np.ndarray, + begin: np.ndarray, + end: np.ndarray, + num_columns: int, + *args: Any, + ) -> np.ndarray: + assert len(begin) == len(end) + num_groups = len(begin) + + result = np.empty((len(values), num_columns)) + for i in numba.prange(num_groups): + group_index = index[begin[i] : end[i]] + for j in numba.prange(num_columns): + group = values[begin[i] : end[i], j] + result[begin[i] : end[i], j] = numba_func(group, group_index, *args) + return result + + return group_transform