diff --git a/.gitattributes b/.gitattributes index c1580aa2eae02036b1aef33f437f8ae465a0b16b..a336f4faf67d652c079d4a384247ca3db132bffa 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1244,3 +1244,4 @@ vlmpy310/lib/python3.10/site-packages/pandas/_libs/window/indexers.cpython-310-x vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/conversion.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text vlmpy310/lib/python3.10/site-packages/pandas/_libs/tslibs/offsets.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text parrot/lib/python3.10/site-packages/nvidia/nccl/lib/libnccl.so.2 filter=lfs diff=lfs merge=lfs -text +openflamingo/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim.h new file mode 100644 index 0000000000000000000000000000000000000000..2b3e00c72f87bd8143ff026bb0218e772140d182 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_add_batch_dim.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_add_batch_dim(Tensor self, int batch_dim, int level) -> Tensor +inline at::Tensor _add_batch_dim(const at::Tensor & self, int64_t batch_dim, int64_t level) { + return at::_ops::_add_batch_dim::call(self, batch_dim, level); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a6db99307196e4dfca011b94c6eb0f01514c349b --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_make_dual_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor _make_dual(const at::Tensor & primal, const at::Tensor & tangent, int64_t level); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h new file mode 100644 index 0000000000000000000000000000000000000000..d09e8a602ac74a0eeb74fc962efedb95b89b7304 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_neg_view_copy.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_neg_view_copy(Tensor self) -> Tensor +inline at::Tensor _neg_view_copy(const at::Tensor & self) { + return at::_ops::_neg_view_copy::call(self); +} + +// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _neg_view_copy_out(at::Tensor & out, const at::Tensor & self) { + return at::_ops::_neg_view_copy_out::call(self, out); +} +// aten::_neg_view_copy.out(Tensor self, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _neg_view_copy_outf(const at::Tensor & self, at::Tensor & out) { + return at::_ops::_neg_view_copy_out::call(self, out); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet.h new file mode 100644 index 0000000000000000000000000000000000000000..a912e8945ad3bb8b4770a79da7c0b15ee507b479 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_sample_dirichlet.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sample_dirichlet(Tensor self, Generator? generator=None) -> Tensor +inline at::Tensor _sample_dirichlet(const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet::call(self, generator); +} + +// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sample_dirichlet_out(at::Tensor & out, const at::Tensor & self, c10::optional generator=c10::nullopt) { + return at::_ops::_sample_dirichlet_out::call(self, generator, out); +} +// aten::_sample_dirichlet.out(Tensor self, Generator? generator=None, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & _sample_dirichlet_outf(const at::Tensor & self, c10::optional generator, at::Tensor & out) { + return at::_ops::_sample_dirichlet_out::call(self, generator, out); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl.h new file mode 100644 index 0000000000000000000000000000000000000000..c7199b98606c05be26f9ec213bc5ed4b8da24a1a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_mm_reduce_impl.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_mm_reduce_impl(Tensor self, Tensor other, str reduce) -> (Tensor, Tensor) +inline ::std::tuple _sparse_mm_reduce_impl(const at::Tensor & self, const at::Tensor & other, c10::string_view reduce) { + return at::_ops::_sparse_mm_reduce_impl::call(self, other, reduce); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..75c3ff97ef48782c016460d009b4386445c5cfd4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/_upsample_nearest_exact1d_backward.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & _upsample_nearest_exact1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); +} +namespace symint { + template ::value>> + at::Tensor & _upsample_nearest_exact1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales, at::Tensor & grad_input) { + return at::_ops::_upsample_nearest_exact1d_backward_grad_input::call(grad_output, output_size, input_size, scales, grad_input); + } +} + +// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +inline at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), scales); + } +} + +// aten::_upsample_nearest_exact1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, float? scales=None) -> Tensor +inline at::Tensor _upsample_nearest_exact1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales); +} +namespace symint { + template ::value>> + at::Tensor _upsample_nearest_exact1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, c10::optional scales=c10::nullopt) { + return at::_ops::_upsample_nearest_exact1d_backward::call(grad_output, output_size, input_size, scales); + } +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5346e560bc2449dd2bd0259a11e4fd0af3d56d58 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/addcmul_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_addcmul : public TensorIteratorBase { + + + void meta(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cuda_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..79d1b5ba561d0c05aaacf174095fa8c62e188429 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/addmm_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor addmm(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); +TORCH_API at::Tensor & addmm_outf(const at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out); +TORCH_API at::Tensor & addmm_(at::Tensor & self, const at::Tensor & mat1, const at::Tensor & mat2, const at::Scalar & beta=1, const at::Scalar & alpha=1); + +} // namespace cuda +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..74438d94d66c2f5675528ccb2bec006caec8100c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/aminmax_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_aminmax : public at::impl::MetaBase { + + + void meta(const at::Tensor & self, c10::optional dim, bool keepdim); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..58d9b00c34891c0eb226df3135e66e0379a06361 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/arctan_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor arctan(const at::Tensor & self); +TORCH_API at::Tensor & arctan_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & arctan_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & arctan_(at::Tensor & self); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..5cfca4ec3b1605fec1066484a657b642dffc410c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/conv_transpose2d_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API conv_transpose2d_input { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, at::IntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, int64_t, at::IntArrayRef); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::conv_transpose2d") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "input") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "conv_transpose2d.input(Tensor input, Tensor weight, Tensor? bias=None, int[2] stride=1, SymInt[2] padding=0, SymInt[2] output_padding=0, int groups=1, int[2] dilation=1) -> Tensor") + static at::Tensor call(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::SymIntArrayRef padding, c10::SymIntArrayRef output_padding, int64_t groups, at::IntArrayRef dilation); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid.h new file mode 100644 index 0000000000000000000000000000000000000000..01a21f83723e6c74df839a4fa68d02f2e7a6970f --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/cumulative_trapezoid.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::cumulative_trapezoid.x(Tensor y, Tensor x, *, int dim=-1) -> Tensor +inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Tensor & x, int64_t dim=-1) { + return at::_ops::cumulative_trapezoid_x::call(y, x, dim); +} + +// aten::cumulative_trapezoid.dx(Tensor y, *, Scalar dx=1, int dim=-1) -> Tensor +inline at::Tensor cumulative_trapezoid(const at::Tensor & y, const at::Scalar & dx=1, int64_t dim=-1) { + return at::_ops::cumulative_trapezoid_dx::call(y, dx, dim); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/dstack.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/dstack.h new file mode 100644 index 0000000000000000000000000000000000000000..7d8763a9e099dd2be9ba2267348b81093d5d2eff --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/dstack.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::dstack(Tensor[] tensors) -> Tensor +inline at::Tensor dstack(at::TensorList tensors) { + return at::_ops::dstack::call(tensors); +} + +// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & dstack_out(at::Tensor & out, at::TensorList tensors) { + return at::_ops::dstack_out::call(tensors, out); +} +// aten::dstack.out(Tensor[] tensors, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & dstack_outf(at::TensorList tensors, at::Tensor & out) { + return at::_ops::dstack_out::call(tensors, out); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/erf_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/erf_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..66d7d732728d1735d47db6ade91c9eb7bd665d73 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/erf_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_erf : public TensorIteratorBase { + + + void meta(const at::Tensor & self); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/exp_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/exp_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c769f4ab95877e77733a454bc7574ec849fea5e3 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/exp_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_exp_out : public at::meta::structured_exp { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h new file mode 100644 index 0000000000000000000000000000000000000000..ea16ad5d580e51176c508f6058b4263834b6c044 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/gcd.h @@ -0,0 +1,44 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gcd_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_out::call(self, other, out); +} +// aten::gcd.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & gcd_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out) { + return at::_ops::gcd_out::call(self, other, out); +} + +// aten::gcd(Tensor self, Tensor other) -> Tensor +inline at::Tensor gcd(const at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd::call(self, other); +} + +// aten::gcd_(Tensor(a!) self, Tensor other) -> Tensor(a!) +inline at::Tensor & gcd_(at::Tensor & self, const at::Tensor & other) { + return at::_ops::gcd_::call(self, other); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c17b850d85b8b28e488a54b8875aeaeac8e15023 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/hardtanh_backward_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor hardtanh_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val); +TORCH_API at::Tensor & hardtanh_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & min_val, const at::Scalar & max_val, at::Tensor & grad_input); + +} // namespace cuda +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h new file mode 100644 index 0000000000000000000000000000000000000000..85276e68e42d97d50ded9922d57fb0fffc144cf3 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/im2col_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor im2col_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out_cpu(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +TORCH_API at::Tensor im2col_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride); +TORCH_API at::Tensor & im2col_out_cuda(const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a830e0e3bed954f7fd29a56d052abe49628f856d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_svd_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple linalg_svd(const at::Tensor & A, bool full_matrices=true, c10::optional driver=c10::nullopt); +TORCH_API ::std::tuple linalg_svd_out(at::Tensor & U, at::Tensor & S, at::Tensor & Vh, const at::Tensor & A, bool full_matrices=true, c10::optional driver=c10::nullopt); +TORCH_API ::std::tuple linalg_svd_outf(const at::Tensor & A, bool full_matrices, c10::optional driver, at::Tensor & U, at::Tensor & S, at::Tensor & Vh); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vecdot.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vecdot.h new file mode 100644 index 0000000000000000000000000000000000000000..ce313cdcd03695d4a3c912ca89646c300995a3da --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_vecdot.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::linalg_vecdot(Tensor x, Tensor y, *, int dim=-1) -> Tensor +inline at::Tensor linalg_vecdot(const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot::call(x, y, dim); +} + +// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_vecdot_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & y, int64_t dim=-1) { + return at::_ops::linalg_vecdot_out::call(x, y, dim, out); +} +// aten::linalg_vecdot.out(Tensor x, Tensor y, *, int dim=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & linalg_vecdot_outf(const at::Tensor & x, const at::Tensor & y, int64_t dim, at::Tensor & out) { + return at::_ops::linalg_vecdot_out::call(x, y, dim, out); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..6f8afefb74248ef0566a3066330ad5393c6f9efc --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/log_sigmoid_forward_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API log_sigmoid_forward_output { + using schema = ::std::tuple (const at::Tensor &, at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "output") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_forward.output(Tensor self, *, Tensor(a!) output, Tensor(b!) buffer) -> (Tensor(a!), Tensor(b!))") + static ::std::tuple call(const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, at::Tensor & output, at::Tensor & buffer); +}; + +struct TORCH_API log_sigmoid_forward { + using schema = ::std::tuple (const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::log_sigmoid_forward") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "log_sigmoid_forward(Tensor self) -> (Tensor output, Tensor buffer)") + static ::std::tuple call(const at::Tensor & self); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..dd1e1ce7d21b445bf0dc0cf8f3ff3e55f4d9160f --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/max_pool2d_with_indices_backward_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_max_pool2d_with_indices_backward : public at::impl::MetaBase { + + + void meta(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, const at::Tensor & indices); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..092c13d6725fba99cc11050758197e988f318a2c --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API maximum { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::maximum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "maximum(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API maximum_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::maximum") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "maximum.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cuda_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d23d9963d4dc909ba091a20a941581cc56b2c70e --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/neg_cuda_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor neg(const at::Tensor & self); +TORCH_API at::Tensor & neg_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & neg_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & neg_(at::Tensor & self); + +} // namespace cuda +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..4c320c412a3566746310eaa57d4f8ef4ec42fa43 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nextafter_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor nextafter(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & nextafter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & nextafter_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +TORCH_API at::Tensor & nextafter_(at::Tensor & self, const at::Tensor & other); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c4cdb881e8192d6d387b9808ad9e229677447818 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nll_loss2d_forward_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple nll_loss2d_forward_cpu(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_out_cpu(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight); +TORCH_API ::std::tuple nll_loss2d_forward_cuda(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index); +TORCH_API ::std::tuple nll_loss2d_forward_out_cuda(const at::Tensor & self, const at::Tensor & target, const c10::optional & weight, int64_t reduction, int64_t ignore_index, at::Tensor & output, at::Tensor & total_weight); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static.h new file mode 100644 index 0000000000000000000000000000000000000000..80a6f7d357a59b9597f965d089e7460e01bdbcec --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nonzero_static.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nonzero_static_out(at::Tensor & out, const at::Tensor & self, int64_t size, int64_t fill_value=-1) { + return at::_ops::nonzero_static_out::call(self, size, fill_value, out); +} +// aten::nonzero_static.out(Tensor self, *, int size, int fill_value=-1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & nonzero_static_outf(const at::Tensor & self, int64_t size, int64_t fill_value, at::Tensor & out) { + return at::_ops::nonzero_static_out::call(self, size, fill_value, out); +} + +// aten::nonzero_static(Tensor self, *, int size, int fill_value=-1) -> Tensor +inline at::Tensor nonzero_static(const at::Tensor & self, int64_t size, int64_t fill_value=-1) { + return at::_ops::nonzero_static::call(self, size, fill_value); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_native.h new file mode 100644 index 0000000000000000000000000000000000000000..8e100929d32d2bb1a400f6156576d7a35d0375fe --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/nuclear_norm_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor nuclear_norm(const at::Tensor & self, bool keepdim=false); +TORCH_API at::Tensor & nuclear_norm_out(const at::Tensor & self, bool keepdim, at::Tensor & out); +TORCH_API at::Tensor nuclear_norm(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & nuclear_norm_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pow_cpu_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pow_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..41d76bb8ada02710eb71c57ff21d47f878cd3cc9 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pow_cpu_dispatch.h @@ -0,0 +1,33 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Tensor & exponent); +TORCH_API at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent); +TORCH_API at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +TORCH_API at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent); +TORCH_API at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +TORCH_API at::Tensor & pow_(at::Tensor & self, const at::Scalar & exponent); + +} // namespace cpu +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pow_ops.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pow_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..0c2805b9ee8711849ecf285cec10332944ec15c7 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/pow_ops.h @@ -0,0 +1,105 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API pow_Tensor_Tensor_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out); +}; + +struct TORCH_API pow_Tensor_Tensor { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & exponent); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & exponent); +}; + +struct TORCH_API pow_Scalar_out { + using schema = at::Tensor & (const at::Scalar &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out); +}; + +struct TORCH_API pow_Scalar { + using schema = at::Tensor (const at::Scalar &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Scalar(Scalar self, Tensor exponent) -> Tensor") + static at::Tensor call(const at::Scalar & self, const at::Tensor & exponent); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Scalar & self, const at::Tensor & exponent); +}; + +struct TORCH_API pow_Tensor_Scalar_out { + using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar_out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out); +}; + +struct TORCH_API pow_Tensor_Scalar { + using schema = at::Tensor (const at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Scalar & exponent); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & exponent); +}; + +struct TORCH_API pow__Scalar { + using schema = at::Tensor & (at::Tensor &, const at::Scalar &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow_.Scalar(Tensor(a!) self, Scalar exponent) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Scalar & exponent); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & exponent); +}; + +struct TORCH_API pow__Tensor { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::pow_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "pow_.Tensor(Tensor(a!) self, Tensor exponent) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & exponent); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & exponent); +}; + +}} // namespace at::_ops diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ca843b40a3a2e1f879cd5d3eb2a5d48632faed7a --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/randn_compositeexplicitautograd_dispatch.h @@ -0,0 +1,46 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor randn(at::IntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional names); +TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional names); +TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn(at::IntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::TensorOptions options={}); +TORCH_API at::Tensor randn_symint(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & randn_out(at::Tensor & out, at::IntArrayRef size, c10::optional generator, c10::optional names); +TORCH_API at::Tensor & randn_outf(at::IntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out); +TORCH_API at::Tensor & randn_symint_out(at::Tensor & out, c10::SymIntArrayRef size, c10::optional generator, c10::optional names); +TORCH_API at::Tensor & randn_symint_outf(c10::SymIntArrayRef size, c10::optional generator, c10::optional names, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/round_meta_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/round_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..2c5ea7bc95cf3837176bdda92f107f0065dfb3cb --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/round_meta_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor round(const at::Tensor & self); +TORCH_API at::Tensor & round_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & round_outf(const at::Tensor & self, at::Tensor & out); +TORCH_API at::Tensor & round_(at::Tensor & self); +TORCH_API at::Tensor round(const at::Tensor & self, int64_t decimals); +TORCH_API at::Tensor & round_out(at::Tensor & out, const at::Tensor & self, int64_t decimals); +TORCH_API at::Tensor & round_outf(const at::Tensor & self, int64_t decimals, at::Tensor & out); +TORCH_API at::Tensor & round_(at::Tensor & self, int64_t decimals); + +} // namespace meta +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..863f798352c22e02d1d259652cb3a369ef93e467 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_csc_tensor.h @@ -0,0 +1,43 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::sparse_csc_tensor.ccol_row_value_size(Tensor ccol_indices, Tensor row_indices, Tensor values, int[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csc_tensor_ccol_row_value_size::call(ccol_indices, row_indices, values, size, dtype, layout, device, pin_memory); +} + +// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, at::TensorOptions options) { + return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt()); +} +// aten::sparse_csc_tensor.ccol_row_value(Tensor ccol_indices, Tensor row_indices, Tensor values, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=False) -> Tensor +inline at::Tensor sparse_csc_tensor(const at::Tensor & ccol_indices, const at::Tensor & row_indices, const at::Tensor & values, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory) { + return at::_ops::sparse_csc_tensor_ccol_row_value::call(ccol_indices, row_indices, values, dtype, layout, device, pin_memory); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_meta.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_meta.h new file mode 100644 index 0000000000000000000000000000000000000000..5c50144d11f74dca1f3692d778d4402334b270c2 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_meta.h @@ -0,0 +1,27 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeMetaFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace meta { + +struct TORCH_API structured_special_legendre_polynomial_p : public TensorIteratorBase { + + + void meta(const at::Tensor & x, const at::Tensor & n); +}; + +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tensor.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tensor.h new file mode 100644 index 0000000000000000000000000000000000000000..2f72b7ef02637983d35f5049bdb6d1bd613cd2bd --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tensor.h @@ -0,0 +1,30 @@ +#pragma once +#include +#include + +namespace at { + +// These functions are defined in ATen/Utils.cpp. +#define TENSOR(T, S) \ + TORCH_API Tensor tensor(ArrayRef values, const TensorOptions& options); \ + inline Tensor tensor( \ + std::initializer_list values, const TensorOptions& options) { \ + return at::tensor(ArrayRef(values), options); \ + } \ + inline Tensor tensor(T value, const TensorOptions& options) { \ + return at::tensor(ArrayRef(value), options); \ + } \ + inline Tensor tensor(ArrayRef values) { \ + return at::tensor(std::move(values), at::dtype(k##S)); \ + } \ + inline Tensor tensor(std::initializer_list values) { \ + return at::tensor(ArrayRef(values)); \ + } \ + inline Tensor tensor(T value) { \ + return at::tensor(ArrayRef(value)); \ + } +AT_FORALL_SCALAR_TYPES_AND3(Bool, Half, BFloat16, TENSOR) +AT_FORALL_COMPLEX_TYPES(TENSOR) +#undef TENSOR + +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split.h new file mode 100644 index 0000000000000000000000000000000000000000..dbe39dda0b935f51311a9aaa270fdcfaf6c1465d --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/tensor_split.h @@ -0,0 +1,74 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split(const at::Tensor & self, int64_t sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, int64_t sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); + } +} + +// aten::tensor_split.sections(Tensor(a -> *) self, SymInt sections, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split_symint(const at::Tensor & self, c10::SymInt sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, c10::SymInt sections, int64_t dim=0) { + return at::_ops::tensor_split_sections::call(self, sections, dim); + } +} + +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, c10::fromIntArrayRefSlow(indices), dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, at::IntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, c10::fromIntArrayRefSlow(indices), dim); + } +} + +// aten::tensor_split.indices(Tensor(a -> *) self, SymInt[] indices, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split_symint(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, indices, dim); +} +namespace symint { + template ::value>> + ::std::vector tensor_split(const at::Tensor & self, c10::SymIntArrayRef indices, int64_t dim=0) { + return at::_ops::tensor_split_indices::call(self, indices, dim); + } +} + +// aten::tensor_split.tensor_indices_or_sections(Tensor(a -> *) self, Tensor tensor_indices_or_sections, int dim=0) -> Tensor(a)[] +inline ::std::vector tensor_split(const at::Tensor & self, const at::Tensor & tensor_indices_or_sections, int64_t dim=0) { + return at::_ops::tensor_split_tensor_indices_or_sections::call(self, tensor_indices_or_sections, dim); +} + +} diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_copy_native.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_copy_native.h new file mode 100644 index 0000000000000000000000000000000000000000..6d833095b859aa46440fd4f652e21974bcd36ad4 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/transpose_copy_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & transpose_copy_int_out(const at::Tensor & self, int64_t dim0, int64_t dim1, at::Tensor & out); +TORCH_API at::Tensor transpose_copy_int(const at::Tensor & self, int64_t dim0, int64_t dim1); +} // namespace native +} // namespace at diff --git a/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..b5c6aff8d7028e3070bd3118feb291746d01cfc3 --- /dev/null +++ b/llava_next/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_nearest2d_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor upsample_nearest2d(const at::Tensor & self, at::IntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); +TORCH_API at::Tensor upsample_nearest2d_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, c10::optional scales_h=c10::nullopt, c10::optional scales_w=c10::nullopt); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/openflamingo/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so b/openflamingo/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..6bee618e7d52bb31a9700b33642a1dfbfb02ee91 --- /dev/null +++ b/openflamingo/lib/python3.10/site-packages/scipy/sparse/_sparsetools.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eefa5f8b2a452e465518c29630626ee17c5a5eff022e165520382cff2b1966a8 +size 4466608 diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Central b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Central new file mode 100644 index 0000000000000000000000000000000000000000..7e646d18e18851bfde743b379e52df4ec5b5a20f Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Central differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Eastern b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Eastern new file mode 100644 index 0000000000000000000000000000000000000000..668e70d765dc3fb0eda16fb0f1932af607b53412 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Eastern differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Newfoundland b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Newfoundland new file mode 100644 index 0000000000000000000000000000000000000000..94d790baaccb72298bb577041cf3c8400339a7da Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Newfoundland differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Saskatchewan b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Saskatchewan new file mode 100644 index 0000000000000000000000000000000000000000..a3f8217a544ebb0993473bbffaae8e2d723c4ec3 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/Saskatchewan differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c6f24e5ea65261ff2f635fa52e114eafa132f0ef Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Canada/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Astrakhan b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Astrakhan new file mode 100644 index 0000000000000000000000000000000000000000..aff8d82d2a2de0857f78217cc9d04a112d1e1a08 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Astrakhan differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Athens b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Athens new file mode 100644 index 0000000000000000000000000000000000000000..231bf9c3b713e3676dbd8f3ced867973c601e104 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Athens differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Bucharest b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Bucharest new file mode 100644 index 0000000000000000000000000000000000000000..c4a391e73b97e1342d352c5cc15a0bace202deef Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Bucharest differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Budapest b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Budapest new file mode 100644 index 0000000000000000000000000000000000000000..940be4670a64ece1265dd28523d78a80f9008dd2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Budapest differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Paris b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Paris new file mode 100644 index 0000000000000000000000000000000000000000..00a27264c2cb3e28f2f46e5c267e12d575236a9d Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Paris differ diff --git a/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Vilnius b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Vilnius new file mode 100644 index 0000000000000000000000000000000000000000..43c3d7f1089366e1c48297906c2693712ac6d99c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/tzdata/zoneinfo/Europe/Vilnius differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d320af426b72300ba08441c6cf80366123f9b011 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/__init__.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4be49fcbda662f8affbbd4dcc5c61af44223d34d Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/api.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da090cfbeb10cbc768c1c30870813233d3a04b2f Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/array_manager.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..28f218d97f2cff2aad5836595c93977194dfef18 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/base.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..281a8784e55b2dd9baaaa01c67499a675e6121ee Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/blocks.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3a2692482bf7997fa1e296197493fb32ebe46489 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/concat.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f3d722a7ca18330dea1dbda399f8ac61aac4541e Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/construction.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d585eb45c9ce60f48a3ae413ccc6d72a2b589b56 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/managers.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..db0d95b7db906252772817a53a2525c6cf43aee8 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/__pycache__/ops.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/api.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/api.py new file mode 100644 index 0000000000000000000000000000000000000000..b0b3937ca47ea06c42b4b51964f6a74830a5d9ee --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/api.py @@ -0,0 +1,156 @@ +""" +This is a pseudo-public API for downstream libraries. We ask that downstream +authors + +1) Try to avoid using internals directly altogether, and failing that, +2) Use only functions exposed here (or in core.internals) + +""" +from __future__ import annotations + +from typing import TYPE_CHECKING + +import numpy as np + +from pandas._libs.internals import BlockPlacement + +from pandas.core.dtypes.common import pandas_dtype +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + PeriodDtype, +) + +from pandas.core.arrays import DatetimeArray +from pandas.core.construction import extract_array +from pandas.core.internals.blocks import ( + check_ndim, + ensure_block_shape, + extract_pandas_array, + get_block_type, + maybe_coerce_values, +) + +if TYPE_CHECKING: + from pandas._typing import Dtype + + from pandas.core.internals.blocks import Block + + +def make_block( + values, placement, klass=None, ndim=None, dtype: Dtype | None = None +) -> Block: + """ + This is a pseudo-public analogue to blocks.new_block. + + We ask that downstream libraries use this rather than any fully-internal + APIs, including but not limited to: + + - core.internals.blocks.make_block + - Block.make_block + - Block.make_block_same_class + - Block.__init__ + """ + if dtype is not None: + dtype = pandas_dtype(dtype) + + values, dtype = extract_pandas_array(values, dtype, ndim) + + from pandas.core.internals.blocks import ( + DatetimeTZBlock, + ExtensionBlock, + ) + + if klass is ExtensionBlock and isinstance(values.dtype, PeriodDtype): + # GH-44681 changed PeriodArray to be stored in the 2D + # NDArrayBackedExtensionBlock instead of ExtensionBlock + # -> still allow ExtensionBlock to be passed in this case for back compat + klass = None + + if klass is None: + dtype = dtype or values.dtype + klass = get_block_type(dtype) + + elif klass is DatetimeTZBlock and not isinstance(values.dtype, DatetimeTZDtype): + # pyarrow calls get here + values = DatetimeArray._simple_new( + # error: Argument "dtype" to "_simple_new" of "DatetimeArray" has + # incompatible type "Union[ExtensionDtype, dtype[Any], None]"; + # expected "Union[dtype[datetime64], DatetimeTZDtype]" + values, + dtype=dtype, # type: ignore[arg-type] + ) + + if not isinstance(placement, BlockPlacement): + placement = BlockPlacement(placement) + + ndim = maybe_infer_ndim(values, placement, ndim) + if isinstance(values.dtype, (PeriodDtype, DatetimeTZDtype)): + # GH#41168 ensure we can pass 1D dt64tz values + # More generally, any EA dtype that isn't is_1d_only_ea_dtype + values = extract_array(values, extract_numpy=True) + values = ensure_block_shape(values, ndim) + + check_ndim(values, placement, ndim) + values = maybe_coerce_values(values) + return klass(values, ndim=ndim, placement=placement) + + +def maybe_infer_ndim(values, placement: BlockPlacement, ndim: int | None) -> int: + """ + If `ndim` is not provided, infer it from placement and values. + """ + if ndim is None: + # GH#38134 Block constructor now assumes ndim is not None + if not isinstance(values.dtype, np.dtype): + if len(placement) != 1: + ndim = 1 + else: + ndim = 2 + else: + ndim = values.ndim + return ndim + + +def __getattr__(name: str): + # GH#55139 + import warnings + + if name in [ + "Block", + "ExtensionBlock", + "DatetimeTZBlock", + "create_block_manager_from_blocks", + ]: + # GH#33892 + warnings.warn( + f"{name} is deprecated and will be removed in a future version. " + "Use public APIs instead.", + DeprecationWarning, + # https://github.com/pandas-dev/pandas/pull/55139#pullrequestreview-1720690758 + # on hard-coding stacklevel + stacklevel=2, + ) + + if name == "create_block_manager_from_blocks": + from pandas.core.internals.managers import create_block_manager_from_blocks + + return create_block_manager_from_blocks + + elif name == "Block": + from pandas.core.internals.blocks import Block + + return Block + + elif name == "DatetimeTZBlock": + from pandas.core.internals.blocks import DatetimeTZBlock + + return DatetimeTZBlock + + elif name == "ExtensionBlock": + from pandas.core.internals.blocks import ExtensionBlock + + return ExtensionBlock + + raise AttributeError( + f"module 'pandas.core.internals.api' has no attribute '{name}'" + ) diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/array_manager.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/array_manager.py new file mode 100644 index 0000000000000000000000000000000000000000..e253f82256a5f6dd8b277b576a33597355d69dcc --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/array_manager.py @@ -0,0 +1,1340 @@ +""" +Experimental manager based on storing a collection of 1D arrays +""" +from __future__ import annotations + +import itertools +from typing import ( + TYPE_CHECKING, + Callable, + Literal, +) + +import numpy as np + +from pandas._libs import ( + NaT, + lib, +) + +from pandas.core.dtypes.astype import ( + astype_array, + astype_array_safe, +) +from pandas.core.dtypes.cast import ( + ensure_dtype_can_hold_na, + find_common_type, + infer_dtype_from_scalar, + np_find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_platform_int, + is_datetime64_ns_dtype, + is_integer, + is_numeric_dtype, + is_object_dtype, + is_timedelta64_ns_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + array_equals, + isna, + na_value_for_dtype, +) + +import pandas.core.algorithms as algos +from pandas.core.array_algos.quantile import quantile_compat +from pandas.core.array_algos.take import take_1d +from pandas.core.arrays import ( + DatetimeArray, + ExtensionArray, + NumpyExtensionArray, + TimedeltaArray, +) +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, + sanitize_array, +) +from pandas.core.indexers import ( + maybe_convert_indices, + validate_indices, +) +from pandas.core.indexes.api import ( + Index, + ensure_index, +) +from pandas.core.indexes.base import get_values_for_csv +from pandas.core.internals.base import ( + DataManager, + SingleDataManager, + ensure_np_dtype, + interleaved_dtype, +) +from pandas.core.internals.blocks import ( + BlockPlacement, + ensure_block_shape, + external_values, + extract_pandas_array, + maybe_coerce_values, + new_block, +) +from pandas.core.internals.managers import make_na_array + +if TYPE_CHECKING: + from collections.abc import Hashable + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + QuantileInterpolation, + Self, + npt, + ) + + +class BaseArrayManager(DataManager): + """ + Core internal data structure to implement DataFrame and Series. + + Alternative to the BlockManager, storing a list of 1D arrays instead of + Blocks. + + This is *not* a public API class + + Parameters + ---------- + arrays : Sequence of arrays + axes : Sequence of Index + verify_integrity : bool, default True + + """ + + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: list[np.ndarray | ExtensionArray] + _axes: list[Index] + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + raise NotImplementedError + + def make_empty(self, axes=None) -> Self: + """Return an empty ArrayManager with the items axis of len 0 (no columns)""" + if axes is None: + axes = [self.axes[1:], Index([])] + + arrays: list[np.ndarray | ExtensionArray] = [] + return type(self)(arrays, axes) + + @property + def items(self) -> Index: + return self._axes[-1] + + @property + # error: Signature of "axes" incompatible with supertype "DataManager" + def axes(self) -> list[Index]: # type: ignore[override] + # mypy doesn't work to override attribute with property + # see https://github.com/python/mypy/issues/4125 + """Axes is BlockManager-compatible order (columns, rows)""" + return [self._axes[1], self._axes[0]] + + @property + def shape_proper(self) -> tuple[int, ...]: + # this returns (n_rows, n_columns) + return tuple(len(ax) for ax in self._axes) + + @staticmethod + def _normalize_axis(axis: AxisInt) -> int: + # switch axis + axis = 1 if axis == 0 else 0 + return axis + + def set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + self._validate_set_axis(axis, new_labels) + axis = self._normalize_axis(axis) + self._axes[axis] = new_labels + + def get_dtypes(self) -> npt.NDArray[np.object_]: + return np.array([arr.dtype for arr in self.arrays], dtype="object") + + def add_references(self, mgr: BaseArrayManager) -> None: + """ + Only implemented on the BlockManager level + """ + return + + def __getstate__(self): + return self.arrays, self._axes + + def __setstate__(self, state) -> None: + self.arrays = state[0] + self._axes = state[1] + + def __repr__(self) -> str: + output = type(self).__name__ + output += f"\nIndex: {self._axes[0]}" + if self.ndim == 2: + output += f"\nColumns: {self._axes[1]}" + output += f"\n{len(self.arrays)} arrays:" + for arr in self.arrays: + output += f"\n{arr.dtype}" + return output + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + """ + Iterate over the arrays, collect and create a new ArrayManager. + + Parameters + ---------- + f : str or callable + Name of the Array method to apply. + align_keys: List[str] or None, default None + **kwargs + Keywords to pass to `f` + + Returns + ------- + ArrayManager + """ + assert "filter" not in kwargs + + align_keys = align_keys or [] + result_arrays: list[ArrayLike] = [] + # fillna: Series/DataFrame is responsible for making sure value is aligned + + aligned_args = {k: kwargs[k] for k in align_keys} + + if f == "apply": + f = kwargs.pop("func") + + for i, arr in enumerate(self.arrays): + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + kwargs[k] = obj.iloc[i] + else: + kwargs[k] = obj.iloc[:, i]._values + else: + # otherwise we have an array-like + kwargs[k] = obj[i] + + if callable(f): + applied = f(arr, **kwargs) + else: + applied = getattr(arr, f)(**kwargs) + + result_arrays.append(applied) + + new_axes = self._axes + return type(self)(result_arrays, new_axes) + + def apply_with_block(self, f, align_keys=None, **kwargs) -> Self: + # switch axis to follow BlockManager logic + swap_axis = True + if f == "interpolate": + swap_axis = False + if swap_axis and "axis" in kwargs and self.ndim == 2: + kwargs["axis"] = 1 if kwargs["axis"] == 0 else 0 + + align_keys = align_keys or [] + aligned_args = {k: kwargs[k] for k in align_keys} + + result_arrays = [] + + for i, arr in enumerate(self.arrays): + if aligned_args: + for k, obj in aligned_args.items(): + if isinstance(obj, (ABCSeries, ABCDataFrame)): + # The caller is responsible for ensuring that + # obj.axes[-1].equals(self.items) + if obj.ndim == 1: + if self.ndim == 2: + kwargs[k] = obj.iloc[slice(i, i + 1)]._values + else: + kwargs[k] = obj.iloc[:]._values + else: + kwargs[k] = obj.iloc[:, [i]]._values + else: + # otherwise we have an ndarray + if obj.ndim == 2: + kwargs[k] = obj[[i]] + + if isinstance(arr.dtype, np.dtype) and not isinstance(arr, np.ndarray): + # i.e. TimedeltaArray, DatetimeArray with tz=None. Need to + # convert for the Block constructors. + arr = np.asarray(arr) + + arr = maybe_coerce_values(arr) + if self.ndim == 2: + arr = ensure_block_shape(arr, 2) + bp = BlockPlacement(slice(0, 1, 1)) + block = new_block(arr, placement=bp, ndim=2) + else: + bp = BlockPlacement(slice(0, len(self), 1)) + block = new_block(arr, placement=bp, ndim=1) + + applied = getattr(block, f)(**kwargs) + if isinstance(applied, list): + applied = applied[0] + arr = applied.values + if self.ndim == 2 and arr.ndim == 2: + # 2D for np.ndarray or DatetimeArray/TimedeltaArray + assert len(arr) == 1 + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[int, slice]" + arr = arr[0, :] # type: ignore[call-overload] + result_arrays.append(arr) + + return type(self)(result_arrays, self._axes) + + def setitem(self, indexer, value, warn: bool = True) -> Self: + return self.apply_with_block("setitem", indexer=indexer, value=value) + + def diff(self, n: int) -> Self: + assert self.ndim == 2 # caller ensures + return self.apply(algos.diff, n=n) + + def astype(self, dtype, copy: bool | None = False, errors: str = "raise") -> Self: + if copy is None: + copy = True + + return self.apply(astype_array_safe, dtype=dtype, copy=copy, errors=errors) + + def convert(self, copy: bool | None) -> Self: + if copy is None: + copy = True + + def _convert(arr): + if is_object_dtype(arr.dtype): + # extract NumpyExtensionArray for tests that patch + # NumpyExtensionArray._typ + arr = np.asarray(arr) + result = lib.maybe_convert_objects( + arr, + convert_non_numeric=True, + ) + if result is arr and copy: + return arr.copy() + return result + else: + return arr.copy() if copy else arr + + return self.apply(_convert) + + def get_values_for_csv( + self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None + ) -> Self: + return self.apply( + get_values_for_csv, + na_rep=na_rep, + quoting=quoting, + float_format=float_format, + date_format=date_format, + decimal=decimal, + ) + + @property + def any_extension_types(self) -> bool: + """Whether any of the blocks in this manager are extension blocks""" + return False # any(block.is_extension for block in self.blocks) + + @property + def is_view(self) -> bool: + """return a boolean if we are a single block and are a view""" + # TODO what is this used for? + return False + + @property + def is_single_block(self) -> bool: + return len(self.arrays) == 1 + + def _get_data_subset(self, predicate: Callable) -> Self: + indices = [i for i, arr in enumerate(self.arrays) if predicate(arr)] + arrays = [self.arrays[i] for i in indices] + # TODO copy? + # Note: using Index.take ensures we can retain e.g. DatetimeIndex.freq, + # see test_describe_datetime_columns + taker = np.array(indices, dtype="intp") + new_cols = self._axes[1].take(taker) + new_axes = [self._axes[0], new_cols] + return type(self)(arrays, new_axes, verify_integrity=False) + + def get_bool_data(self, copy: bool = False) -> Self: + """ + Select columns that are bool-dtype and object-dtype columns that are all-bool. + + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + return self._get_data_subset(lambda x: x.dtype == np.dtype(bool)) + + def get_numeric_data(self, copy: bool = False) -> Self: + """ + Select columns that have a numeric dtype. + + Parameters + ---------- + copy : bool, default False + Whether to copy the blocks + """ + return self._get_data_subset( + lambda arr: is_numeric_dtype(arr.dtype) + or getattr(arr.dtype, "_is_numeric", False) + ) + + def copy(self, deep: bool | Literal["all"] | None = True) -> Self: + """ + Make deep or shallow copy of ArrayManager + + Parameters + ---------- + deep : bool or string, default True + If False, return shallow copy (do not copy data) + If 'all', copy data and a deep copy of the index + + Returns + ------- + BlockManager + """ + if deep is None: + # ArrayManager does not yet support CoW, so deep=None always means + # deep=True for now + deep = True + + # this preserves the notion of view copying of axes + if deep: + # hit in e.g. tests.io.json.test_pandas + + def copy_func(ax): + return ax.copy(deep=True) if deep == "all" else ax.view() + + new_axes = [copy_func(ax) for ax in self._axes] + else: + new_axes = list(self._axes) + + if deep: + new_arrays = [arr.copy() for arr in self.arrays] + else: + new_arrays = list(self.arrays) + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def reindex_indexer( + self, + new_axis, + indexer, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + # ignored keywords + only_slice: bool = False, + # ArrayManager specific keywords + use_na_proxy: bool = False, + ) -> Self: + axis = self._normalize_axis(axis) + return self._reindex_indexer( + new_axis, + indexer, + axis, + fill_value, + allow_dups, + copy, + use_na_proxy, + ) + + def _reindex_indexer( + self, + new_axis, + indexer: npt.NDArray[np.intp] | None, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool | None = True, + use_na_proxy: bool = False, + ) -> Self: + """ + Parameters + ---------- + new_axis : Index + indexer : ndarray[intp] or None + axis : int + fill_value : object, default None + allow_dups : bool, default False + copy : bool, default True + + + pandas-indexer with -1's only. + """ + if copy is None: + # ArrayManager does not yet support CoW, so deep=None always means + # deep=True for now + copy = True + + if indexer is None: + if new_axis is self._axes[axis] and not copy: + return self + + result = self.copy(deep=copy) + result._axes = list(self._axes) + result._axes[axis] = new_axis + return result + + # some axes don't allow reindexing with dups + if not allow_dups: + self._axes[axis]._validate_can_reindex(indexer) + + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + if axis == 1: + new_arrays = [] + for i in indexer: + if i == -1: + arr = self._make_na_array( + fill_value=fill_value, use_na_proxy=use_na_proxy + ) + else: + arr = self.arrays[i] + if copy: + arr = arr.copy() + new_arrays.append(arr) + + else: + validate_indices(indexer, len(self._axes[0])) + indexer = ensure_platform_int(indexer) + mask = indexer == -1 + needs_masking = mask.any() + new_arrays = [ + take_1d( + arr, + indexer, + allow_fill=needs_masking, + fill_value=fill_value, + mask=mask, + # if fill_value is not None else blk.fill_value + ) + for arr in self.arrays + ] + + new_axes = list(self._axes) + new_axes[axis] = new_axis + + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def take( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt = 1, + verify: bool = True, + ) -> Self: + """ + Take items along any axis. + """ + assert isinstance(indexer, np.ndarray), type(indexer) + assert indexer.dtype == np.intp, indexer.dtype + + axis = self._normalize_axis(axis) + + if not indexer.ndim == 1: + raise ValueError("indexer should be 1-dimensional") + + n = self.shape_proper[axis] + indexer = maybe_convert_indices(indexer, n, verify=verify) + + new_labels = self._axes[axis].take(indexer) + return self._reindex_indexer( + new_axis=new_labels, indexer=indexer, axis=axis, allow_dups=True + ) + + def _make_na_array(self, fill_value=None, use_na_proxy: bool = False): + if use_na_proxy: + assert fill_value is None + return NullArrayProxy(self.shape_proper[0]) + + if fill_value is None: + fill_value = np.nan + + dtype, fill_value = infer_dtype_from_scalar(fill_value) + array_values = make_na_array(dtype, self.shape_proper[:1], fill_value) + return array_values + + def _equal_values(self, other) -> bool: + """ + Used in .equals defined in base class. Only check the column values + assuming shape and indexes have already been checked. + """ + for left, right in zip(self.arrays, other.arrays): + if not array_equals(left, right): + return False + return True + + # TODO + # to_dict + + +class ArrayManager(BaseArrayManager): + @property + def ndim(self) -> Literal[2]: + return 2 + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + # Note: we are storing the axes in "_axes" in the (row, columns) order + # which contrasts the order how it is stored in BlockManager + self._axes = axes + self.arrays = arrays + + if verify_integrity: + self._axes = [ensure_index(ax) for ax in axes] + arrays = [extract_pandas_array(x, None, 1)[0] for x in arrays] + self.arrays = [maybe_coerce_values(arr) for arr in arrays] + self._verify_integrity() + + def _verify_integrity(self) -> None: + n_rows, n_columns = self.shape_proper + if not len(self.arrays) == n_columns: + raise ValueError( + "Number of passed arrays must equal the size of the column Index: " + f"{len(self.arrays)} arrays vs {n_columns} columns." + ) + for arr in self.arrays: + if not len(arr) == n_rows: + raise ValueError( + "Passed arrays should have the same length as the rows Index: " + f"{len(arr)} vs {n_rows} rows" + ) + if not isinstance(arr, (np.ndarray, ExtensionArray)): + raise ValueError( + "Passed arrays should be np.ndarray or ExtensionArray instances, " + f"got {type(arr)} instead" + ) + if not arr.ndim == 1: + raise ValueError( + "Passed arrays should be 1-dimensional, got array with " + f"{arr.ndim} dimensions instead." + ) + + # -------------------------------------------------------------------- + # Indexing + + def fast_xs(self, loc: int) -> SingleArrayManager: + """ + Return the array corresponding to `frame.iloc[loc]`. + + Parameters + ---------- + loc : int + + Returns + ------- + np.ndarray or ExtensionArray + """ + dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) + + values = [arr[loc] for arr in self.arrays] + if isinstance(dtype, ExtensionDtype): + result = dtype.construct_array_type()._from_sequence(values, dtype=dtype) + # for datetime64/timedelta64, the np.ndarray constructor cannot handle pd.NaT + elif is_datetime64_ns_dtype(dtype): + result = DatetimeArray._from_sequence(values, dtype=dtype)._ndarray + elif is_timedelta64_ns_dtype(dtype): + result = TimedeltaArray._from_sequence(values, dtype=dtype)._ndarray + else: + result = np.array(values, dtype=dtype) + return SingleArrayManager([result], [self._axes[1]]) + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> ArrayManager: + axis = self._normalize_axis(axis) + + if axis == 0: + arrays = [arr[slobj] for arr in self.arrays] + elif axis == 1: + arrays = self.arrays[slobj] + + new_axes = list(self._axes) + new_axes[axis] = new_axes[axis]._getitem_slice(slobj) + + return type(self)(arrays, new_axes, verify_integrity=False) + + def iget(self, i: int) -> SingleArrayManager: + """ + Return the data as a SingleArrayManager. + """ + values = self.arrays[i] + return SingleArrayManager([values], [self._axes[0]]) + + def iget_values(self, i: int) -> ArrayLike: + """ + Return the data for column i as the values (ndarray or ExtensionArray). + """ + return self.arrays[i] + + @property + def column_arrays(self) -> list[ArrayLike]: + """ + Used in the JSON C code to access column arrays. + """ + + return [np.asarray(arr) for arr in self.arrays] + + def iset( + self, + loc: int | slice | np.ndarray, + value: ArrayLike, + inplace: bool = False, + refs=None, + ) -> None: + """ + Set new column(s). + + This changes the ArrayManager in-place, but replaces (an) existing + column(s), not changing column values in-place). + + Parameters + ---------- + loc : integer, slice or boolean mask + Positional location (already bounds checked) + value : np.ndarray or ExtensionArray + inplace : bool, default False + Whether overwrite existing array as opposed to replacing it. + """ + # single column -> single integer index + if lib.is_integer(loc): + # TODO can we avoid needing to unpack this here? That means converting + # DataFrame into 1D array when loc is an integer + if isinstance(value, np.ndarray) and value.ndim == 2: + assert value.shape[1] == 1 + value = value[:, 0] + + # TODO we receive a datetime/timedelta64 ndarray from DataFrame._iset_item + # but we should avoid that and pass directly the proper array + value = maybe_coerce_values(value) + + assert isinstance(value, (np.ndarray, ExtensionArray)) + assert value.ndim == 1 + assert len(value) == len(self._axes[0]) + self.arrays[loc] = value + return + + # multiple columns -> convert slice or array to integer indices + elif isinstance(loc, slice): + indices: range | np.ndarray = range( + loc.start if loc.start is not None else 0, + loc.stop if loc.stop is not None else self.shape_proper[1], + loc.step if loc.step is not None else 1, + ) + else: + assert isinstance(loc, np.ndarray) + assert loc.dtype == "bool" + indices = np.nonzero(loc)[0] + + assert value.ndim == 2 + assert value.shape[0] == len(self._axes[0]) + + for value_idx, mgr_idx in enumerate(indices): + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[slice, int]" + value_arr = value[:, value_idx] # type: ignore[call-overload] + self.arrays[mgr_idx] = value_arr + return + + def column_setitem( + self, loc: int, idx: int | slice | np.ndarray, value, inplace_only: bool = False + ) -> None: + """ + Set values ("setitem") into a single column (not setting the full column). + + This is a method on the ArrayManager level, to avoid creating an + intermediate Series at the DataFrame level (`s = df[loc]; s[idx] = value`) + """ + if not is_integer(loc): + raise TypeError("The column index should be an integer") + arr = self.arrays[loc] + mgr = SingleArrayManager([arr], [self._axes[0]]) + if inplace_only: + mgr.setitem_inplace(idx, value) + else: + new_mgr = mgr.setitem((idx,), value) + # update existing ArrayManager in-place + self.arrays[loc] = new_mgr.arrays[0] + + def insert(self, loc: int, item: Hashable, value: ArrayLike, refs=None) -> None: + """ + Insert item at selected position. + + Parameters + ---------- + loc : int + item : hashable + value : np.ndarray or ExtensionArray + """ + # insert to the axis; this could possibly raise a TypeError + new_axis = self.items.insert(loc, item) + + value = extract_array(value, extract_numpy=True) + if value.ndim == 2: + if value.shape[0] == 1: + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[int, slice]" + value = value[0, :] # type: ignore[call-overload] + else: + raise ValueError( + f"Expected a 1D array, got an array with shape {value.shape}" + ) + value = maybe_coerce_values(value) + + # TODO self.arrays can be empty + # assert len(value) == len(self.arrays[0]) + + # TODO is this copy needed? + arrays = self.arrays.copy() + arrays.insert(loc, value) + + self.arrays = arrays + self._axes[1] = new_axis + + def idelete(self, indexer) -> ArrayManager: + """ + Delete selected locations in-place (new block and array, same BlockManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[i] for i in np.nonzero(to_keep)[0]] + self._axes = [self._axes[0], self._axes[1][to_keep]] + return self + + # -------------------------------------------------------------------- + # Array-wise Operation + + def grouped_reduce(self, func: Callable) -> Self: + """ + Apply grouped reduction function columnwise, returning a new ArrayManager. + + Parameters + ---------- + func : grouped reduction function + + Returns + ------- + ArrayManager + """ + result_arrays: list[np.ndarray] = [] + result_indices: list[int] = [] + + for i, arr in enumerate(self.arrays): + # grouped_reduce functions all expect 2D arrays + arr = ensure_block_shape(arr, ndim=2) + res = func(arr) + if res.ndim == 2: + # reverse of ensure_block_shape + assert res.shape[0] == 1 + res = res[0] + + result_arrays.append(res) + result_indices.append(i) + + if len(result_arrays) == 0: + nrows = 0 + else: + nrows = result_arrays[0].shape[0] + index = Index(range(nrows)) + + columns = self.items + + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + return type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + + def reduce(self, func: Callable) -> Self: + """ + Apply reduction function column-wise, returning a single-row ArrayManager. + + Parameters + ---------- + func : reduction function + + Returns + ------- + ArrayManager + """ + result_arrays: list[np.ndarray] = [] + for i, arr in enumerate(self.arrays): + res = func(arr, axis=0) + + # TODO NaT doesn't preserve dtype, so we need to ensure to create + # a timedelta result array if original was timedelta + # what if datetime results in timedelta? (eg std) + dtype = arr.dtype if res is NaT else None + result_arrays.append( + sanitize_array([res], None, dtype=dtype) # type: ignore[arg-type] + ) + + index = Index._simple_new(np.array([None], dtype=object)) # placeholder + columns = self.items + + # error: Argument 1 to "ArrayManager" has incompatible type "List[ndarray]"; + # expected "List[Union[ndarray, ExtensionArray]]" + new_mgr = type(self)(result_arrays, [index, columns]) # type: ignore[arg-type] + return new_mgr + + def operate_blockwise(self, other: ArrayManager, array_op) -> ArrayManager: + """ + Apply array_op blockwise with another (aligned) BlockManager. + """ + # TODO what if `other` is BlockManager ? + left_arrays = self.arrays + right_arrays = other.arrays + result_arrays = [ + array_op(left, right) for left, right in zip(left_arrays, right_arrays) + ] + return type(self)(result_arrays, self._axes) + + def quantile( + self, + *, + qs: Index, # with dtype float64 + transposed: bool = False, + interpolation: QuantileInterpolation = "linear", + ) -> ArrayManager: + arrs = [ensure_block_shape(x, 2) for x in self.arrays] + new_arrs = [ + quantile_compat(x, np.asarray(qs._values), interpolation) for x in arrs + ] + for i, arr in enumerate(new_arrs): + if arr.ndim == 2: + assert arr.shape[0] == 1, arr.shape + new_arrs[i] = arr[0] + + axes = [qs, self._axes[1]] + return type(self)(new_arrs, axes) + + # ---------------------------------------------------------------- + + def unstack(self, unstacker, fill_value) -> ArrayManager: + """ + Return a BlockManager with all blocks unstacked. + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : Any + fill_value for newly introduced missing values. + + Returns + ------- + unstacked : BlockManager + """ + indexer, _ = unstacker._indexer_and_to_sort + if unstacker.mask.all(): + new_indexer = indexer + allow_fill = False + new_mask2D = None + needs_masking = None + else: + new_indexer = np.full(unstacker.mask.shape, -1) + new_indexer[unstacker.mask] = indexer + allow_fill = True + # calculating the full mask once and passing it to take_1d is faster + # than letting take_1d calculate it in each repeated call + new_mask2D = (~unstacker.mask).reshape(*unstacker.full_shape) + needs_masking = new_mask2D.any(axis=0) + new_indexer2D = new_indexer.reshape(*unstacker.full_shape) + new_indexer2D = ensure_platform_int(new_indexer2D) + + new_arrays = [] + for arr in self.arrays: + for i in range(unstacker.full_shape[1]): + if allow_fill: + # error: Value of type "Optional[Any]" is not indexable [index] + new_arr = take_1d( + arr, + new_indexer2D[:, i], + allow_fill=needs_masking[i], # type: ignore[index] + fill_value=fill_value, + mask=new_mask2D[:, i], # type: ignore[index] + ) + else: + new_arr = take_1d(arr, new_indexer2D[:, i], allow_fill=False) + new_arrays.append(new_arr) + + new_index = unstacker.new_index + new_columns = unstacker.get_new_columns(self._axes[1]) + new_axes = [new_index, new_columns] + + return type(self)(new_arrays, new_axes, verify_integrity=False) + + def as_array( + self, + dtype=None, + copy: bool = False, + na_value: object = lib.no_default, + ) -> np.ndarray: + """ + Convert the blockmanager data into an numpy array. + + Parameters + ---------- + dtype : object, default None + Data type of the return array. + copy : bool, default False + If True then guarantee that a copy is returned. A value of + False does not guarantee that the underlying data is not + copied. + na_value : object, default lib.no_default + Value to be used as the missing value sentinel. + + Returns + ------- + arr : ndarray + """ + if len(self.arrays) == 0: + empty_arr = np.empty(self.shape, dtype=float) + return empty_arr.transpose() + + # We want to copy when na_value is provided to avoid + # mutating the original object + copy = copy or na_value is not lib.no_default + + if not dtype: + dtype = interleaved_dtype([arr.dtype for arr in self.arrays]) + + dtype = ensure_np_dtype(dtype) + + result = np.empty(self.shape_proper, dtype=dtype) + + for i, arr in enumerate(self.arrays): + arr = arr.astype(dtype, copy=copy) + result[:, i] = arr + + if na_value is not lib.no_default: + result[isna(result)] = na_value + + return result + + @classmethod + def concat_horizontal(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed ArrayManagers horizontally. + """ + # concatting along the columns -> combine reindexed arrays in a single manager + arrays = list(itertools.chain.from_iterable([mgr.arrays for mgr in mgrs])) + new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False) + return new_mgr + + @classmethod + def concat_vertical(cls, mgrs: list[Self], axes: list[Index]) -> Self: + """ + Concatenate uniformly-indexed ArrayManagers vertically. + """ + # concatting along the rows -> concat the reindexed arrays + # TODO(ArrayManager) doesn't yet preserve the correct dtype + arrays = [ + concat_arrays([mgrs[i].arrays[j] for i in range(len(mgrs))]) + for j in range(len(mgrs[0].arrays)) + ] + new_mgr = cls(arrays, [axes[1], axes[0]], verify_integrity=False) + return new_mgr + + +class SingleArrayManager(BaseArrayManager, SingleDataManager): + __slots__ = [ + "_axes", # private attribute, because 'axes' has different order, see below + "arrays", + ] + + arrays: list[np.ndarray | ExtensionArray] + _axes: list[Index] + + @property + def ndim(self) -> Literal[1]: + return 1 + + def __init__( + self, + arrays: list[np.ndarray | ExtensionArray], + axes: list[Index], + verify_integrity: bool = True, + ) -> None: + self._axes = axes + self.arrays = arrays + + if verify_integrity: + assert len(axes) == 1 + assert len(arrays) == 1 + self._axes = [ensure_index(ax) for ax in self._axes] + arr = arrays[0] + arr = maybe_coerce_values(arr) + arr = extract_pandas_array(arr, None, 1)[0] + self.arrays = [arr] + self._verify_integrity() + + def _verify_integrity(self) -> None: + (n_rows,) = self.shape + assert len(self.arrays) == 1 + arr = self.arrays[0] + assert len(arr) == n_rows + if not arr.ndim == 1: + raise ValueError( + "Passed array should be 1-dimensional, got array with " + f"{arr.ndim} dimensions instead." + ) + + @staticmethod + def _normalize_axis(axis): + return axis + + def make_empty(self, axes=None) -> Self: + """Return an empty ArrayManager with index/array of length 0""" + if axes is None: + axes = [Index([], dtype=object)] + array: np.ndarray = np.array([], dtype=self.dtype) + return type(self)([array], axes) + + @classmethod + def from_array(cls, array, index) -> SingleArrayManager: + return cls([array], [index]) + + # error: Cannot override writeable attribute with read-only property + @property + def axes(self) -> list[Index]: # type: ignore[override] + return self._axes + + @property + def index(self) -> Index: + return self._axes[0] + + @property + def dtype(self): + return self.array.dtype + + def external_values(self): + """The array that Series.values returns""" + return external_values(self.array) + + def internal_values(self): + """The array that Series._values returns""" + return self.array + + def array_values(self): + """The array that Series.array returns""" + arr = self.array + if isinstance(arr, np.ndarray): + arr = NumpyExtensionArray(arr) + return arr + + @property + def _can_hold_na(self) -> bool: + if isinstance(self.array, np.ndarray): + return self.array.dtype.kind not in "iub" + else: + # ExtensionArray + return self.array._can_hold_na + + @property + def is_single_block(self) -> bool: + return True + + def fast_xs(self, loc: int) -> SingleArrayManager: + raise NotImplementedError("Use series._values[loc] instead") + + def get_slice(self, slobj: slice, axis: AxisInt = 0) -> SingleArrayManager: + if axis >= self.ndim: + raise IndexError("Requested axis not found in manager") + + new_array = self.array[slobj] + new_index = self.index._getitem_slice(slobj) + return type(self)([new_array], [new_index], verify_integrity=False) + + def get_rows_with_mask(self, indexer: npt.NDArray[np.bool_]) -> SingleArrayManager: + new_array = self.array[indexer] + new_index = self.index[indexer] + return type(self)([new_array], [new_index]) + + # error: Signature of "apply" incompatible with supertype "BaseArrayManager" + def apply(self, func, **kwargs) -> Self: # type: ignore[override] + if callable(func): + new_array = func(self.array, **kwargs) + else: + new_array = getattr(self.array, func)(**kwargs) + return type(self)([new_array], self._axes) + + def setitem(self, indexer, value, warn: bool = True) -> SingleArrayManager: + """ + Set values with indexer. + + For SingleArrayManager, this backs s[indexer] = value + + See `setitem_inplace` for a version that works inplace and doesn't + return a new Manager. + """ + if isinstance(indexer, np.ndarray) and indexer.ndim > self.ndim: + raise ValueError(f"Cannot set values with ndim > {self.ndim}") + return self.apply_with_block("setitem", indexer=indexer, value=value) + + def idelete(self, indexer) -> SingleArrayManager: + """ + Delete selected locations in-place (new array, same ArrayManager) + """ + to_keep = np.ones(self.shape[0], dtype=np.bool_) + to_keep[indexer] = False + + self.arrays = [self.arrays[0][to_keep]] + self._axes = [self._axes[0][to_keep]] + return self + + def _get_data_subset(self, predicate: Callable) -> SingleArrayManager: + # used in get_numeric_data / get_bool_data + if predicate(self.array): + return type(self)(self.arrays, self._axes, verify_integrity=False) + else: + return self.make_empty() + + def set_values(self, values: ArrayLike) -> None: + """ + Set (replace) the values of the SingleArrayManager in place. + + Use at your own risk! This does not check if the passed values are + valid for the current SingleArrayManager (length, dtype, etc). + """ + self.arrays[0] = values + + def to_2d_mgr(self, columns: Index) -> ArrayManager: + """ + Manager analogue of Series.to_frame + """ + arrays = [self.arrays[0]] + axes = [self.axes[0], columns] + + return ArrayManager(arrays, axes, verify_integrity=False) + + +class NullArrayProxy: + """ + Proxy object for an all-NA array. + + Only stores the length of the array, and not the dtype. The dtype + will only be known when actually concatenating (after determining the + common dtype, for which this proxy is ignored). + Using this object avoids that the internals/concat.py needs to determine + the proper dtype and array type. + """ + + ndim = 1 + + def __init__(self, n: int) -> None: + self.n = n + + @property + def shape(self) -> tuple[int]: + return (self.n,) + + def to_array(self, dtype: DtypeObj) -> ArrayLike: + """ + Helper function to create the actual all-NA array from the NullArrayProxy + object. + + Parameters + ---------- + arr : NullArrayProxy + dtype : the dtype for the resulting array + + Returns + ------- + np.ndarray or ExtensionArray + """ + if isinstance(dtype, ExtensionDtype): + empty = dtype.construct_array_type()._from_sequence([], dtype=dtype) + indexer = -np.ones(self.n, dtype=np.intp) + return empty.take(indexer, allow_fill=True) + else: + # when introducing missing values, int becomes float, bool becomes object + dtype = ensure_dtype_can_hold_na(dtype) + fill_value = na_value_for_dtype(dtype) + arr = np.empty(self.n, dtype=dtype) + arr.fill(fill_value) + return ensure_wrapped_if_datetimelike(arr) + + +def concat_arrays(to_concat: list) -> ArrayLike: + """ + Alternative for concat_compat but specialized for use in the ArrayManager. + + Differences: only deals with 1D arrays (no axis keyword), assumes + ensure_wrapped_if_datetimelike and does not skip empty arrays to determine + the dtype. + In addition ensures that all NullArrayProxies get replaced with actual + arrays. + + Parameters + ---------- + to_concat : list of arrays + + Returns + ------- + np.ndarray or ExtensionArray + """ + # ignore the all-NA proxies to determine the resulting dtype + to_concat_no_proxy = [x for x in to_concat if not isinstance(x, NullArrayProxy)] + + dtypes = {x.dtype for x in to_concat_no_proxy} + single_dtype = len(dtypes) == 1 + + if single_dtype: + target_dtype = to_concat_no_proxy[0].dtype + elif all(lib.is_np_dtype(x, "iub") for x in dtypes): + # GH#42092 + target_dtype = np_find_common_type(*dtypes) + else: + target_dtype = find_common_type([arr.dtype for arr in to_concat_no_proxy]) + + to_concat = [ + arr.to_array(target_dtype) + if isinstance(arr, NullArrayProxy) + else astype_array(arr, target_dtype, copy=False) + for arr in to_concat + ] + + if isinstance(to_concat[0], ExtensionArray): + cls = type(to_concat[0]) + return cls._concat_same_type(to_concat) + + result = np.concatenate(to_concat) + + # TODO decide on exact behaviour (we shouldn't do this only for empty result) + # see https://github.com/pandas-dev/pandas/issues/39817 + if len(result) == 0: + # all empties -> check for bool to not coerce to float + kinds = {obj.dtype.kind for obj in to_concat_no_proxy} + if len(kinds) != 1: + if "b" in kinds: + result = result.astype(object) + return result diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/base.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/base.py new file mode 100644 index 0000000000000000000000000000000000000000..ae91f167205a0628c4bcf9b61ce58e888fe6ec8e --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/base.py @@ -0,0 +1,407 @@ +""" +Base class for the internal managers. Both BlockManager and ArrayManager +inherit from this class. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, + Literal, + cast, + final, +) + +import numpy as np + +from pandas._config import ( + using_copy_on_write, + warn_copy_on_write, +) + +from pandas._libs import ( + algos as libalgos, + lib, +) +from pandas.errors import AbstractMethodError +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.cast import ( + find_common_type, + np_can_hold_element, +) +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + SparseDtype, +) + +from pandas.core.base import PandasObject +from pandas.core.construction import extract_array +from pandas.core.indexes.api import ( + Index, + default_index, +) + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + Self, + Shape, + ) + + +class _AlreadyWarned: + def __init__(self): + # This class is used on the manager level to the block level to + # ensure that we warn only once. The block method can update the + # warned_already option without returning a value to keep the + # interface consistent. This is only a temporary solution for + # CoW warnings. + self.warned_already = False + + +class DataManager(PandasObject): + # TODO share more methods/attributes + + axes: list[Index] + + @property + def items(self) -> Index: + raise AbstractMethodError(self) + + @final + def __len__(self) -> int: + return len(self.items) + + @property + def ndim(self) -> int: + return len(self.axes) + + @property + def shape(self) -> Shape: + return tuple(len(ax) for ax in self.axes) + + @final + def _validate_set_axis(self, axis: AxisInt, new_labels: Index) -> None: + # Caller is responsible for ensuring we have an Index object. + old_len = len(self.axes[axis]) + new_len = len(new_labels) + + if axis == 1 and len(self.items) == 0: + # If we are setting the index on a DataFrame with no columns, + # it is OK to change the length. + pass + + elif new_len != old_len: + raise ValueError( + f"Length mismatch: Expected axis has {old_len} elements, new " + f"values have {new_len} elements" + ) + + def reindex_indexer( + self, + new_axis, + indexer, + axis: AxisInt, + fill_value=None, + allow_dups: bool = False, + copy: bool = True, + only_slice: bool = False, + ) -> Self: + raise AbstractMethodError(self) + + @final + def reindex_axis( + self, + new_index: Index, + axis: AxisInt, + fill_value=None, + only_slice: bool = False, + ) -> Self: + """ + Conform data manager to new index. + """ + new_index, indexer = self.axes[axis].reindex(new_index) + + return self.reindex_indexer( + new_index, + indexer, + axis=axis, + fill_value=fill_value, + copy=False, + only_slice=only_slice, + ) + + def _equal_values(self, other: Self) -> bool: + """ + To be implemented by the subclasses. Only check the column values + assuming shape and indexes have already been checked. + """ + raise AbstractMethodError(self) + + @final + def equals(self, other: object) -> bool: + """ + Implementation for DataFrame.equals + """ + if not isinstance(other, type(self)): + return False + + self_axes, other_axes = self.axes, other.axes + if len(self_axes) != len(other_axes): + return False + if not all(ax1.equals(ax2) for ax1, ax2 in zip(self_axes, other_axes)): + return False + + return self._equal_values(other) + + def apply( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + raise AbstractMethodError(self) + + def apply_with_block( + self, + f, + align_keys: list[str] | None = None, + **kwargs, + ) -> Self: + raise AbstractMethodError(self) + + @final + def isna(self, func) -> Self: + return self.apply("apply", func=func) + + @final + def fillna(self, value, limit: int | None, inplace: bool, downcast) -> Self: + if limit is not None: + # Do this validation even if we go through one of the no-op paths + limit = libalgos.validate_limit(None, limit=limit) + + return self.apply_with_block( + "fillna", + value=value, + limit=limit, + inplace=inplace, + downcast=downcast, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + @final + def where(self, other, cond, align: bool) -> Self: + if align: + align_keys = ["other", "cond"] + else: + align_keys = ["cond"] + other = extract_array(other, extract_numpy=True) + + return self.apply_with_block( + "where", + align_keys=align_keys, + other=other, + cond=cond, + using_cow=using_copy_on_write(), + ) + + @final + def putmask(self, mask, new, align: bool = True, warn: bool = True) -> Self: + if align: + align_keys = ["new", "mask"] + else: + align_keys = ["mask"] + new = extract_array(new, extract_numpy=True) + + already_warned = None + if warn_copy_on_write(): + already_warned = _AlreadyWarned() + if not warn: + already_warned.warned_already = True + + return self.apply_with_block( + "putmask", + align_keys=align_keys, + mask=mask, + new=new, + using_cow=using_copy_on_write(), + already_warned=already_warned, + ) + + @final + def round(self, decimals: int, using_cow: bool = False) -> Self: + return self.apply_with_block( + "round", + decimals=decimals, + using_cow=using_cow, + ) + + @final + def replace(self, to_replace, value, inplace: bool) -> Self: + inplace = validate_bool_kwarg(inplace, "inplace") + # NDFrame.replace ensures the not-is_list_likes here + assert not lib.is_list_like(to_replace) + assert not lib.is_list_like(value) + return self.apply_with_block( + "replace", + to_replace=to_replace, + value=value, + inplace=inplace, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + @final + def replace_regex(self, **kwargs) -> Self: + return self.apply_with_block( + "_replace_regex", + **kwargs, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + @final + def replace_list( + self, + src_list: list[Any], + dest_list: list[Any], + inplace: bool = False, + regex: bool = False, + ) -> Self: + """do a list replace""" + inplace = validate_bool_kwarg(inplace, "inplace") + + bm = self.apply_with_block( + "replace_list", + src_list=src_list, + dest_list=dest_list, + inplace=inplace, + regex=regex, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + bm._consolidate_inplace() + return bm + + def interpolate(self, inplace: bool, **kwargs) -> Self: + return self.apply_with_block( + "interpolate", + inplace=inplace, + **kwargs, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + def pad_or_backfill(self, inplace: bool, **kwargs) -> Self: + return self.apply_with_block( + "pad_or_backfill", + inplace=inplace, + **kwargs, + using_cow=using_copy_on_write(), + already_warned=_AlreadyWarned(), + ) + + def shift(self, periods: int, fill_value) -> Self: + if fill_value is lib.no_default: + fill_value = None + + return self.apply_with_block("shift", periods=periods, fill_value=fill_value) + + # -------------------------------------------------------------------- + # Consolidation: No-ops for all but BlockManager + + def is_consolidated(self) -> bool: + return True + + def consolidate(self) -> Self: + return self + + def _consolidate_inplace(self) -> None: + return + + +class SingleDataManager(DataManager): + @property + def ndim(self) -> Literal[1]: + return 1 + + @final + @property + def array(self) -> ArrayLike: + """ + Quick access to the backing array of the Block or SingleArrayManager. + """ + # error: "SingleDataManager" has no attribute "arrays"; maybe "array" + return self.arrays[0] # type: ignore[attr-defined] + + def setitem_inplace(self, indexer, value, warn: bool = True) -> None: + """ + Set values with indexer. + + For Single[Block/Array]Manager, this backs s[indexer] = value + + This is an inplace version of `setitem()`, mutating the manager/values + in place, not returning a new Manager (and Block), and thus never changing + the dtype. + """ + arr = self.array + + # EAs will do this validation in their own __setitem__ methods. + if isinstance(arr, np.ndarray): + # Note: checking for ndarray instead of np.dtype means we exclude + # dt64/td64, which do their own validation. + value = np_can_hold_element(arr.dtype, value) + + if isinstance(value, np.ndarray) and value.ndim == 1 and len(value) == 1: + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + value = value[0, ...] + + arr[indexer] = value + + def grouped_reduce(self, func): + arr = self.array + res = func(arr) + index = default_index(len(res)) + + mgr = type(self).from_array(res, index) + return mgr + + @classmethod + def from_array(cls, arr: ArrayLike, index: Index): + raise AbstractMethodError(cls) + + +def interleaved_dtype(dtypes: list[DtypeObj]) -> DtypeObj | None: + """ + Find the common dtype for `blocks`. + + Parameters + ---------- + blocks : List[DtypeObj] + + Returns + ------- + dtype : np.dtype, ExtensionDtype, or None + None is returned when `blocks` is empty. + """ + if not len(dtypes): + return None + + return find_common_type(dtypes) + + +def ensure_np_dtype(dtype: DtypeObj) -> np.dtype: + # TODO: https://github.com/pandas-dev/pandas/issues/22791 + # Give EAs some input on what happens here. Sparse needs this. + if isinstance(dtype, SparseDtype): + dtype = dtype.subtype + dtype = cast(np.dtype, dtype) + elif isinstance(dtype, ExtensionDtype): + dtype = np.dtype("object") + elif dtype == np.dtype(str): + dtype = np.dtype("object") + return dtype diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/blocks.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/blocks.py new file mode 100644 index 0000000000000000000000000000000000000000..259e969112dd7506b97520f3f2a683cbf7f7ef5d --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/blocks.py @@ -0,0 +1,2850 @@ +from __future__ import annotations + +from functools import wraps +import inspect +import re +from typing import ( + TYPE_CHECKING, + Any, + Callable, + Literal, + cast, + final, +) +import warnings +import weakref + +import numpy as np + +from pandas._config import ( + get_option, + using_copy_on_write, + warn_copy_on_write, +) + +from pandas._libs import ( + NaT, + internals as libinternals, + lib, +) +from pandas._libs.internals import ( + BlockPlacement, + BlockValuesRefs, +) +from pandas._libs.missing import NA +from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeBackend, + DtypeObj, + F, + FillnaOptions, + IgnoreRaise, + InterpolateOptions, + QuantileInterpolation, + Self, + Shape, + npt, +) +from pandas.errors import AbstractMethodError +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level +from pandas.util._validators import validate_bool_kwarg + +from pandas.core.dtypes.astype import ( + astype_array_safe, + astype_is_view, +) +from pandas.core.dtypes.cast import ( + LossySetitemError, + can_hold_element, + convert_dtypes, + find_result_type, + maybe_downcast_to_dtype, + np_can_hold_element, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_float_dtype, + is_integer_dtype, + is_list_like, + is_scalar, + is_string_dtype, +) +from pandas.core.dtypes.dtypes import ( + DatetimeTZDtype, + ExtensionDtype, + IntervalDtype, + NumpyEADtype, + PeriodDtype, +) +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCIndex, + ABCNumpyExtensionArray, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + na_value_for_dtype, +) + +from pandas.core import missing +import pandas.core.algorithms as algos +from pandas.core.array_algos.putmask import ( + extract_bool_array, + putmask_inplace, + putmask_without_repeat, + setitem_datetimelike_compat, + validate_putmask, +) +from pandas.core.array_algos.quantile import quantile_compat +from pandas.core.array_algos.replace import ( + compare_or_regex_search, + replace_regex, + should_use_regex, +) +from pandas.core.array_algos.transforms import shift +from pandas.core.arrays import ( + Categorical, + DatetimeArray, + ExtensionArray, + IntervalArray, + NumpyExtensionArray, + PeriodArray, + TimedeltaArray, +) +from pandas.core.base import PandasObject +import pandas.core.common as com +from pandas.core.computation import expressions +from pandas.core.construction import ( + ensure_wrapped_if_datetimelike, + extract_array, +) +from pandas.core.indexers import check_setitem_lengths +from pandas.core.indexes.base import get_values_for_csv + +if TYPE_CHECKING: + from collections.abc import ( + Iterable, + Sequence, + ) + + from pandas.core.api import Index + from pandas.core.arrays._mixins import NDArrayBackedExtensionArray + +# comparison is faster than is_object_dtype +_dtype_obj = np.dtype("object") + + +COW_WARNING_GENERAL_MSG = """\ +Setting a value on a view: behaviour will change in pandas 3.0. +You are mutating a Series or DataFrame object, and currently this mutation will +also have effect on other Series or DataFrame objects that share data with this +object. In pandas 3.0 (with Copy-on-Write), updating one Series or DataFrame object +will never modify another. +""" + + +COW_WARNING_SETITEM_MSG = """\ +Setting a value on a view: behaviour will change in pandas 3.0. +Currently, the mutation will also have effect on the object that shares data +with this object. For example, when setting a value in a Series that was +extracted from a column of a DataFrame, that DataFrame will also be updated: + + ser = df["col"] + ser[0] = 0 <--- in pandas 2, this also updates `df` + +In pandas 3.0 (with Copy-on-Write), updating one Series/DataFrame will never +modify another, and thus in the example above, `df` will not be changed. +""" + + +def maybe_split(meth: F) -> F: + """ + If we have a multi-column block, split and operate block-wise. Otherwise + use the original method. + """ + + @wraps(meth) + def newfunc(self, *args, **kwargs) -> list[Block]: + if self.ndim == 1 or self.shape[0] == 1: + return meth(self, *args, **kwargs) + else: + # Split and operate column-by-column + return self.split_and_operate(meth, *args, **kwargs) + + return cast(F, newfunc) + + +class Block(PandasObject, libinternals.Block): + """ + Canonical n-dimensional unit of homogeneous dtype contained in a pandas + data structure + + Index-ignorant; let the container take care of that + """ + + values: np.ndarray | ExtensionArray + ndim: int + refs: BlockValuesRefs + __init__: Callable + + __slots__ = () + is_numeric = False + + @final + @cache_readonly + def _validate_ndim(self) -> bool: + """ + We validate dimension for blocks that can hold 2D values, which for now + means numpy dtypes or DatetimeTZDtype. + """ + dtype = self.dtype + return not isinstance(dtype, ExtensionDtype) or isinstance( + dtype, DatetimeTZDtype + ) + + @final + @cache_readonly + def is_object(self) -> bool: + return self.values.dtype == _dtype_obj + + @final + @cache_readonly + def is_extension(self) -> bool: + return not lib.is_np_dtype(self.values.dtype) + + @final + @cache_readonly + def _can_consolidate(self) -> bool: + # We _could_ consolidate for DatetimeTZDtype but don't for now. + return not self.is_extension + + @final + @cache_readonly + def _consolidate_key(self): + return self._can_consolidate, self.dtype.name + + @final + @cache_readonly + def _can_hold_na(self) -> bool: + """ + Can we store NA values in this Block? + """ + dtype = self.dtype + if isinstance(dtype, np.dtype): + return dtype.kind not in "iub" + return dtype._can_hold_na + + @final + @property + def is_bool(self) -> bool: + """ + We can be bool if a) we are bool dtype or b) object dtype with bool objects. + """ + return self.values.dtype == np.dtype(bool) + + @final + def external_values(self): + return external_values(self.values) + + @final + @cache_readonly + def fill_value(self): + # Used in reindex_indexer + return na_value_for_dtype(self.dtype, compat=False) + + @final + def _standardize_fill_value(self, value): + # if we are passed a scalar None, convert it here + if self.dtype != _dtype_obj and is_valid_na_for_dtype(value, self.dtype): + value = self.fill_value + return value + + @property + def mgr_locs(self) -> BlockPlacement: + return self._mgr_locs + + @mgr_locs.setter + def mgr_locs(self, new_mgr_locs: BlockPlacement) -> None: + self._mgr_locs = new_mgr_locs + + @final + def make_block( + self, + values, + placement: BlockPlacement | None = None, + refs: BlockValuesRefs | None = None, + ) -> Block: + """ + Create a new block, with type inference propagate any values that are + not specified + """ + if placement is None: + placement = self._mgr_locs + if self.is_extension: + values = ensure_block_shape(values, ndim=self.ndim) + + return new_block(values, placement=placement, ndim=self.ndim, refs=refs) + + @final + def make_block_same_class( + self, + values, + placement: BlockPlacement | None = None, + refs: BlockValuesRefs | None = None, + ) -> Self: + """Wrap given values in a block of same type as self.""" + # Pre-2.0 we called ensure_wrapped_if_datetimelike because fastparquet + # relied on it, as of 2.0 the caller is responsible for this. + if placement is None: + placement = self._mgr_locs + + # We assume maybe_coerce_values has already been called + return type(self)(values, placement=placement, ndim=self.ndim, refs=refs) + + @final + def __repr__(self) -> str: + # don't want to print out all of the items here + name = type(self).__name__ + if self.ndim == 1: + result = f"{name}: {len(self)} dtype: {self.dtype}" + else: + shape = " x ".join([str(s) for s in self.shape]) + result = f"{name}: {self.mgr_locs.indexer}, {shape}, dtype: {self.dtype}" + + return result + + @final + def __len__(self) -> int: + return len(self.values) + + @final + def slice_block_columns(self, slc: slice) -> Self: + """ + Perform __getitem__-like, return result as block. + """ + new_mgr_locs = self._mgr_locs[slc] + + new_values = self._slice(slc) + refs = self.refs + return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) + + @final + def take_block_columns(self, indices: npt.NDArray[np.intp]) -> Self: + """ + Perform __getitem__-like, return result as block. + + Only supports slices that preserve dimensionality. + """ + # Note: only called from is from internals.concat, and we can verify + # that never happens with 1-column blocks, i.e. never for ExtensionBlock. + + new_mgr_locs = self._mgr_locs[indices] + + new_values = self._slice(indices) + return type(self)(new_values, new_mgr_locs, self.ndim, refs=None) + + @final + def getitem_block_columns( + self, slicer: slice, new_mgr_locs: BlockPlacement, ref_inplace_op: bool = False + ) -> Self: + """ + Perform __getitem__-like, return result as block. + + Only supports slices that preserve dimensionality. + """ + new_values = self._slice(slicer) + refs = self.refs if not ref_inplace_op or self.refs.has_reference() else None + return type(self)(new_values, new_mgr_locs, self.ndim, refs=refs) + + @final + def _can_hold_element(self, element: Any) -> bool: + """require the same dtype as ourselves""" + element = extract_array(element, extract_numpy=True) + return can_hold_element(self.values, element) + + @final + def should_store(self, value: ArrayLike) -> bool: + """ + Should we set self.values[indexer] = value inplace or do we need to cast? + + Parameters + ---------- + value : np.ndarray or ExtensionArray + + Returns + ------- + bool + """ + return value.dtype == self.dtype + + # --------------------------------------------------------------------- + # Apply/Reduce and Helpers + + @final + def apply(self, func, **kwargs) -> list[Block]: + """ + apply the function to my values; return a block if we are not + one + """ + result = func(self.values, **kwargs) + + result = maybe_coerce_values(result) + return self._split_op_result(result) + + @final + def reduce(self, func) -> list[Block]: + # We will apply the function and reshape the result into a single-row + # Block with the same mgr_locs; squeezing will be done at a higher level + assert self.ndim == 2 + + result = func(self.values) + + if self.values.ndim == 1: + res_values = result + else: + res_values = result.reshape(-1, 1) + + nb = self.make_block(res_values) + return [nb] + + @final + def _split_op_result(self, result: ArrayLike) -> list[Block]: + # See also: split_and_operate + if result.ndim > 1 and isinstance(result.dtype, ExtensionDtype): + # TODO(EA2D): unnecessary with 2D EAs + # if we get a 2D ExtensionArray, we need to split it into 1D pieces + nbs = [] + for i, loc in enumerate(self._mgr_locs): + if not is_1d_only_ea_dtype(result.dtype): + vals = result[i : i + 1] + else: + vals = result[i] + + bp = BlockPlacement(loc) + block = self.make_block(values=vals, placement=bp) + nbs.append(block) + return nbs + + nb = self.make_block(result) + + return [nb] + + @final + def _split(self) -> list[Block]: + """ + Split a block into a list of single-column blocks. + """ + assert self.ndim == 2 + + new_blocks = [] + for i, ref_loc in enumerate(self._mgr_locs): + vals = self.values[slice(i, i + 1)] + + bp = BlockPlacement(ref_loc) + nb = type(self)(vals, placement=bp, ndim=2, refs=self.refs) + new_blocks.append(nb) + return new_blocks + + @final + def split_and_operate(self, func, *args, **kwargs) -> list[Block]: + """ + Split the block and apply func column-by-column. + + Parameters + ---------- + func : Block method + *args + **kwargs + + Returns + ------- + List[Block] + """ + assert self.ndim == 2 and self.shape[0] != 1 + + res_blocks = [] + for nb in self._split(): + rbs = func(nb, *args, **kwargs) + res_blocks.extend(rbs) + return res_blocks + + # --------------------------------------------------------------------- + # Up/Down-casting + + @final + def coerce_to_target_dtype(self, other, warn_on_upcast: bool = False) -> Block: + """ + coerce the current block to a dtype compat for other + we will return a block, possibly object, and not raise + + we can also safely try to coerce to the same dtype + and will receive the same block + """ + new_dtype = find_result_type(self.values.dtype, other) + if new_dtype == self.dtype: + # GH#52927 avoid RecursionError + raise AssertionError( + "Something has gone wrong, please report a bug at " + "https://github.com/pandas-dev/pandas/issues" + ) + + # In a future version of pandas, the default will be that + # setting `nan` into an integer series won't raise. + if ( + is_scalar(other) + and is_integer_dtype(self.values.dtype) + and isna(other) + and other is not NaT + and not ( + isinstance(other, (np.datetime64, np.timedelta64)) and np.isnat(other) + ) + ): + warn_on_upcast = False + elif ( + isinstance(other, np.ndarray) + and other.ndim == 1 + and is_integer_dtype(self.values.dtype) + and is_float_dtype(other.dtype) + and lib.has_only_ints_or_nan(other) + ): + warn_on_upcast = False + + if warn_on_upcast: + warnings.warn( + f"Setting an item of incompatible dtype is deprecated " + "and will raise an error in a future version of pandas. " + f"Value '{other}' has dtype incompatible with {self.values.dtype}, " + "please explicitly cast to a compatible dtype first.", + FutureWarning, + stacklevel=find_stack_level(), + ) + if self.values.dtype == new_dtype: + raise AssertionError( + f"Did not expect new dtype {new_dtype} to equal self.dtype " + f"{self.values.dtype}. Please report a bug at " + "https://github.com/pandas-dev/pandas/issues." + ) + return self.astype(new_dtype, copy=False) + + @final + def _maybe_downcast( + self, + blocks: list[Block], + downcast, + using_cow: bool, + caller: str, + ) -> list[Block]: + if downcast is False: + return blocks + + if self.dtype == _dtype_obj: + # TODO: does it matter that self.dtype might not match blocks[i].dtype? + # GH#44241 We downcast regardless of the argument; + # respecting 'downcast=None' may be worthwhile at some point, + # but ATM it breaks too much existing code. + # split and convert the blocks + + if caller == "fillna" and get_option("future.no_silent_downcasting"): + return blocks + + nbs = extend_blocks( + [blk.convert(using_cow=using_cow, copy=not using_cow) for blk in blocks] + ) + if caller == "fillna": + if len(nbs) != len(blocks) or not all( + x.dtype == y.dtype for x, y in zip(nbs, blocks) + ): + # GH#54261 + warnings.warn( + "Downcasting object dtype arrays on .fillna, .ffill, .bfill " + "is deprecated and will change in a future version. " + "Call result.infer_objects(copy=False) instead. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return nbs + + elif downcast is None: + return blocks + elif caller == "where" and get_option("future.no_silent_downcasting") is True: + return blocks + else: + nbs = extend_blocks([b._downcast_2d(downcast, using_cow) for b in blocks]) + + # When _maybe_downcast is called with caller="where", it is either + # a) with downcast=False, which is a no-op (the desired future behavior) + # b) with downcast="infer", which is _not_ passed by the user. + # In the latter case the future behavior is to stop doing inference, + # so we issue a warning if and only if some inference occurred. + if caller == "where": + # GH#53656 + if len(blocks) != len(nbs) or any( + left.dtype != right.dtype for left, right in zip(blocks, nbs) + ): + # In this case _maybe_downcast was _not_ a no-op, so the behavior + # will change, so we issue a warning. + warnings.warn( + "Downcasting behavior in Series and DataFrame methods 'where', " + "'mask', and 'clip' is deprecated. In a future " + "version this will not infer object dtypes or cast all-round " + "floats to integers. Instead call " + "result.infer_objects(copy=False) for object inference, " + "or cast round floats explicitly. To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + + return nbs + + @final + @maybe_split + def _downcast_2d(self, dtype, using_cow: bool = False) -> list[Block]: + """ + downcast specialized to 2D case post-validation. + + Refactored to allow use of maybe_split. + """ + new_values = maybe_downcast_to_dtype(self.values, dtype=dtype) + new_values = maybe_coerce_values(new_values) + refs = self.refs if new_values is self.values else None + return [self.make_block(new_values, refs=refs)] + + @final + def convert( + self, + *, + copy: bool = True, + using_cow: bool = False, + ) -> list[Block]: + """ + Attempt to coerce any object types to better types. Return a copy + of the block (if copy = True). + """ + if not self.is_object: + if not copy and using_cow: + return [self.copy(deep=False)] + return [self.copy()] if copy else [self] + + if self.ndim != 1 and self.shape[0] != 1: + blocks = self.split_and_operate( + Block.convert, copy=copy, using_cow=using_cow + ) + if all(blk.dtype.kind == "O" for blk in blocks): + # Avoid fragmenting the block if convert is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] if copy else [self] + return blocks + + values = self.values + if values.ndim == 2: + # the check above ensures we only get here with values.shape[0] == 1, + # avoid doing .ravel as that might make a copy + values = values[0] + + res_values = lib.maybe_convert_objects( + values, # type: ignore[arg-type] + convert_non_numeric=True, + ) + refs = None + if copy and res_values is values: + res_values = values.copy() + elif res_values is values: + refs = self.refs + + res_values = ensure_block_shape(res_values, self.ndim) + res_values = maybe_coerce_values(res_values) + return [self.make_block(res_values, refs=refs)] + + def convert_dtypes( + self, + copy: bool, + using_cow: bool, + infer_objects: bool = True, + convert_string: bool = True, + convert_integer: bool = True, + convert_boolean: bool = True, + convert_floating: bool = True, + dtype_backend: DtypeBackend = "numpy_nullable", + ) -> list[Block]: + if infer_objects and self.is_object: + blks = self.convert(copy=False, using_cow=using_cow) + else: + blks = [self] + + if not any( + [convert_floating, convert_integer, convert_boolean, convert_string] + ): + return [b.copy(deep=copy) for b in blks] + + rbs = [] + for blk in blks: + # Determine dtype column by column + sub_blks = [blk] if blk.ndim == 1 or self.shape[0] == 1 else blk._split() + dtypes = [ + convert_dtypes( + b.values, + convert_string, + convert_integer, + convert_boolean, + convert_floating, + infer_objects, + dtype_backend, + ) + for b in sub_blks + ] + if all(dtype == self.dtype for dtype in dtypes): + # Avoid block splitting if no dtype changes + rbs.append(blk.copy(deep=copy)) + continue + + for dtype, b in zip(dtypes, sub_blks): + rbs.append(b.astype(dtype=dtype, copy=copy, squeeze=b.ndim != 1)) + return rbs + + # --------------------------------------------------------------------- + # Array-Like Methods + + @final + @cache_readonly + def dtype(self) -> DtypeObj: + return self.values.dtype + + @final + def astype( + self, + dtype: DtypeObj, + copy: bool = False, + errors: IgnoreRaise = "raise", + using_cow: bool = False, + squeeze: bool = False, + ) -> Block: + """ + Coerce to the new dtype. + + Parameters + ---------- + dtype : np.dtype or ExtensionDtype + copy : bool, default False + copy if indicated + errors : str, {'raise', 'ignore'}, default 'raise' + - ``raise`` : allow exceptions to be raised + - ``ignore`` : suppress exceptions. On error return original object + using_cow: bool, default False + Signaling if copy on write copy logic is used. + squeeze : bool, default False + squeeze values to ndim=1 if only one column is given + + Returns + ------- + Block + """ + values = self.values + if squeeze and values.ndim == 2 and is_1d_only_ea_dtype(dtype): + if values.shape[0] != 1: + raise ValueError("Can not squeeze with more than one column.") + values = values[0, :] # type: ignore[call-overload] + + new_values = astype_array_safe(values, dtype, copy=copy, errors=errors) + + new_values = maybe_coerce_values(new_values) + + refs = None + if (using_cow or not copy) and astype_is_view(values.dtype, new_values.dtype): + refs = self.refs + + newb = self.make_block(new_values, refs=refs) + if newb.shape != self.shape: + raise TypeError( + f"cannot set astype for copy = [{copy}] for dtype " + f"({self.dtype.name} [{self.shape}]) to different shape " + f"({newb.dtype.name} [{newb.shape}])" + ) + return newb + + @final + def get_values_for_csv( + self, *, float_format, date_format, decimal, na_rep: str = "nan", quoting=None + ) -> Block: + """convert to our native types format""" + result = get_values_for_csv( + self.values, + na_rep=na_rep, + quoting=quoting, + float_format=float_format, + date_format=date_format, + decimal=decimal, + ) + return self.make_block(result) + + @final + def copy(self, deep: bool = True) -> Self: + """copy constructor""" + values = self.values + refs: BlockValuesRefs | None + if deep: + values = values.copy() + refs = None + else: + refs = self.refs + return type(self)(values, placement=self._mgr_locs, ndim=self.ndim, refs=refs) + + # --------------------------------------------------------------------- + # Copy-on-Write Helpers + + @final + def _maybe_copy(self, using_cow: bool, inplace: bool) -> Self: + if using_cow and inplace: + deep = self.refs.has_reference() + blk = self.copy(deep=deep) + else: + blk = self if inplace else self.copy() + return blk + + @final + def _get_refs_and_copy(self, using_cow: bool, inplace: bool): + refs = None + copy = not inplace + if inplace: + if using_cow and self.refs.has_reference(): + copy = True + else: + refs = self.refs + return copy, refs + + # --------------------------------------------------------------------- + # Replace + + @final + def replace( + self, + to_replace, + value, + inplace: bool = False, + # mask may be pre-computed if we're called from replace_list + mask: npt.NDArray[np.bool_] | None = None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + replace the to_replace value with value, possible to create new + blocks here this is just a call to putmask. + """ + + # Note: the checks we do in NDFrame.replace ensure we never get + # here with listlike to_replace or value, as those cases + # go through replace_list + values = self.values + + if isinstance(values, Categorical): + # TODO: avoid special-casing + # GH49404 + blk = self._maybe_copy(using_cow, inplace) + values = cast(Categorical, blk.values) + values._replace(to_replace=to_replace, value=value, inplace=True) + return [blk] + + if not self._can_hold_element(to_replace): + # We cannot hold `to_replace`, so we know immediately that + # replacing it is a no-op. + # Note: If to_replace were a list, NDFrame.replace would call + # replace_list instead of replace. + if using_cow: + return [self.copy(deep=False)] + else: + return [self] if inplace else [self.copy()] + + if mask is None: + mask = missing.mask_missing(values, to_replace) + if not mask.any(): + # Note: we get here with test_replace_extension_other incorrectly + # bc _can_hold_element is incorrect. + if using_cow: + return [self.copy(deep=False)] + else: + return [self] if inplace else [self.copy()] + + elif self._can_hold_element(value): + # TODO(CoW): Maybe split here as well into columns where mask has True + # and rest? + blk = self._maybe_copy(using_cow, inplace) + putmask_inplace(blk.values, mask, value) + if ( + inplace + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + if not (self.is_object and value is None): + # if the user *explicitly* gave None, we keep None, otherwise + # may downcast to NaN + if get_option("future.no_silent_downcasting") is True: + blocks = [blk] + else: + blocks = blk.convert(copy=False, using_cow=using_cow) + if len(blocks) > 1 or blocks[0].dtype != blk.dtype: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated and " + "will be removed in a future version. To retain the old " + "behavior, explicitly call " + "`result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + else: + blocks = [blk] + return blocks + + elif self.ndim == 1 or self.shape[0] == 1: + if value is None or value is NA: + blk = self.astype(np.dtype(object)) + else: + blk = self.coerce_to_target_dtype(value) + return blk.replace( + to_replace=to_replace, + value=value, + inplace=True, + mask=mask, + ) + + else: + # split so that we only upcast where necessary + blocks = [] + for i, nb in enumerate(self._split()): + blocks.extend( + type(self).replace( + nb, + to_replace=to_replace, + value=value, + inplace=True, + mask=mask[i : i + 1], + using_cow=using_cow, + ) + ) + return blocks + + @final + def _replace_regex( + self, + to_replace, + value, + inplace: bool = False, + mask=None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + Replace elements by the given value. + + Parameters + ---------- + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + inplace : bool, default False + Perform inplace modification. + mask : array-like of bool, optional + True indicate corresponding element is ignored. + using_cow: bool, default False + Specifying if copy on write is enabled. + + Returns + ------- + List[Block] + """ + if not self._can_hold_element(to_replace): + # i.e. only if self.is_object is True, but could in principle include a + # String ExtensionBlock + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + rx = re.compile(to_replace) + + block = self._maybe_copy(using_cow, inplace) + + replace_regex(block.values, rx, value, mask) + + if ( + inplace + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + nbs = block.convert(copy=False, using_cow=using_cow) + opt = get_option("future.no_silent_downcasting") + if (len(nbs) > 1 or nbs[0].dtype != block.dtype) and not opt: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated and " + "will be removed in a future version. To retain the old " + "behavior, explicitly call `result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + return nbs + + @final + def replace_list( + self, + src_list: Iterable[Any], + dest_list: Sequence[Any], + inplace: bool = False, + regex: bool = False, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + See BlockManager.replace_list docstring. + """ + values = self.values + + if isinstance(values, Categorical): + # TODO: avoid special-casing + # GH49404 + blk = self._maybe_copy(using_cow, inplace) + values = cast(Categorical, blk.values) + values._replace(to_replace=src_list, value=dest_list, inplace=True) + return [blk] + + # Exclude anything that we know we won't contain + pairs = [ + (x, y) for x, y in zip(src_list, dest_list) if self._can_hold_element(x) + ] + if not len(pairs): + if using_cow: + return [self.copy(deep=False)] + # shortcut, nothing to replace + return [self] if inplace else [self.copy()] + + src_len = len(pairs) - 1 + + if is_string_dtype(values.dtype): + # Calculate the mask once, prior to the call of comp + # in order to avoid repeating the same computations + na_mask = ~isna(values) + masks: Iterable[npt.NDArray[np.bool_]] = ( + extract_bool_array( + cast( + ArrayLike, + compare_or_regex_search( + values, s[0], regex=regex, mask=na_mask + ), + ) + ) + for s in pairs + ) + else: + # GH#38086 faster if we know we dont need to check for regex + masks = (missing.mask_missing(values, s[0]) for s in pairs) + # Materialize if inplace = True, since the masks can change + # as we replace + if inplace: + masks = list(masks) + + if using_cow: + # Don't set up refs here, otherwise we will think that we have + # references when we check again later + rb = [self] + else: + rb = [self if inplace else self.copy()] + + if ( + inplace + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + opt = get_option("future.no_silent_downcasting") + for i, ((src, dest), mask) in enumerate(zip(pairs, masks)): + convert = i == src_len # only convert once at the end + new_rb: list[Block] = [] + + # GH-39338: _replace_coerce can split a block into + # single-column blocks, so track the index so we know + # where to index into the mask + for blk_num, blk in enumerate(rb): + if len(rb) == 1: + m = mask + else: + mib = mask + assert not isinstance(mib, bool) + m = mib[blk_num : blk_num + 1] + + # error: Argument "mask" to "_replace_coerce" of "Block" has + # incompatible type "Union[ExtensionArray, ndarray[Any, Any], bool]"; + # expected "ndarray[Any, dtype[bool_]]" + result = blk._replace_coerce( + to_replace=src, + value=dest, + mask=m, + inplace=inplace, + regex=regex, + using_cow=using_cow, + ) + + if using_cow and i != src_len: + # This is ugly, but we have to get rid of intermediate refs + # that did not go out of scope yet, otherwise we will trigger + # many unnecessary copies + for b in result: + ref = weakref.ref(b) + b.refs.referenced_blocks.pop( + b.refs.referenced_blocks.index(ref) + ) + + if ( + not opt + and convert + and blk.is_object + and not all(x is None for x in dest_list) + ): + # GH#44498 avoid unwanted cast-back + nbs = [] + for res_blk in result: + converted = res_blk.convert( + copy=True and not using_cow, using_cow=using_cow + ) + if len(converted) > 1 or converted[0].dtype != res_blk.dtype: + warnings.warn( + # GH#54710 + "Downcasting behavior in `replace` is deprecated " + "and will be removed in a future version. To " + "retain the old behavior, explicitly call " + "`result.infer_objects(copy=False)`. " + "To opt-in to the future " + "behavior, set " + "`pd.set_option('future.no_silent_downcasting', True)`", + FutureWarning, + stacklevel=find_stack_level(), + ) + nbs.extend(converted) + result = nbs + new_rb.extend(result) + rb = new_rb + return rb + + @final + def _replace_coerce( + self, + to_replace, + value, + mask: npt.NDArray[np.bool_], + inplace: bool = True, + regex: bool = False, + using_cow: bool = False, + ) -> list[Block]: + """ + Replace value corresponding to the given boolean array with another + value. + + Parameters + ---------- + to_replace : object or pattern + Scalar to replace or regular expression to match. + value : object + Replacement object. + mask : np.ndarray[bool] + True indicate corresponding element is ignored. + inplace : bool, default True + Perform inplace modification. + regex : bool, default False + If true, perform regular expression substitution. + + Returns + ------- + List[Block] + """ + if should_use_regex(regex, to_replace): + return self._replace_regex( + to_replace, + value, + inplace=inplace, + mask=mask, + ) + else: + if value is None: + # gh-45601, gh-45836, gh-46634 + if mask.any(): + has_ref = self.refs.has_reference() + nb = self.astype(np.dtype(object), copy=False, using_cow=using_cow) + if (nb is self or using_cow) and not inplace: + nb = nb.copy() + elif inplace and has_ref and nb.refs.has_reference() and using_cow: + # no copy in astype and we had refs before + nb = nb.copy() + putmask_inplace(nb.values, mask, value) + return [nb] + if using_cow: + return [self] + return [self] if inplace else [self.copy()] + return self.replace( + to_replace=to_replace, + value=value, + inplace=inplace, + mask=mask, + using_cow=using_cow, + ) + + # --------------------------------------------------------------------- + # 2D Methods - Shared by NumpyBlock and NDArrayBackedExtensionBlock + # but not ExtensionBlock + + def _maybe_squeeze_arg(self, arg: np.ndarray) -> np.ndarray: + """ + For compatibility with 1D-only ExtensionArrays. + """ + return arg + + def _unwrap_setitem_indexer(self, indexer): + """ + For compatibility with 1D-only ExtensionArrays. + """ + return indexer + + # NB: this cannot be made cache_readonly because in mgr.set_values we pin + # new .values that can have different shape GH#42631 + @property + def shape(self) -> Shape: + return self.values.shape + + def iget(self, i: int | tuple[int, int] | tuple[slice, int]) -> np.ndarray: + # In the case where we have a tuple[slice, int], the slice will always + # be slice(None) + # Note: only reached with self.ndim == 2 + # Invalid index type "Union[int, Tuple[int, int], Tuple[slice, int]]" + # for "Union[ndarray[Any, Any], ExtensionArray]"; expected type + # "Union[int, integer[Any]]" + return self.values[i] # type: ignore[index] + + def _slice( + self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] + ) -> ArrayLike: + """return a slice of my values""" + + return self.values[slicer] + + def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: + """ + Modify block values in-place with new item value. + + If copy=True, first copy the underlying values in place before modifying + (for Copy-on-Write). + + Notes + ----- + `set_inplace` never creates a new array or new Block, whereas `setitem` + _may_ create a new array and always creates a new Block. + + Caller is responsible for checking values.dtype == self.dtype. + """ + if copy: + self.values = self.values.copy() + self.values[locs] = values + + @final + def take_nd( + self, + indexer: npt.NDArray[np.intp], + axis: AxisInt, + new_mgr_locs: BlockPlacement | None = None, + fill_value=lib.no_default, + ) -> Block: + """ + Take values according to indexer and return them as a block. + """ + values = self.values + + if fill_value is lib.no_default: + fill_value = self.fill_value + allow_fill = False + else: + allow_fill = True + + # Note: algos.take_nd has upcast logic similar to coerce_to_target_dtype + new_values = algos.take_nd( + values, indexer, axis=axis, allow_fill=allow_fill, fill_value=fill_value + ) + + # Called from three places in managers, all of which satisfy + # these assertions + if isinstance(self, ExtensionBlock): + # NB: in this case, the 'axis' kwarg will be ignored in the + # algos.take_nd call above. + assert not (self.ndim == 1 and new_mgr_locs is None) + assert not (axis == 0 and new_mgr_locs is None) + + if new_mgr_locs is None: + new_mgr_locs = self._mgr_locs + + if new_values.dtype != self.dtype: + return self.make_block(new_values, new_mgr_locs) + else: + return self.make_block_same_class(new_values, new_mgr_locs) + + def _unstack( + self, + unstacker, + fill_value, + new_placement: npt.NDArray[np.intp], + needs_masking: npt.NDArray[np.bool_], + ): + """ + Return a list of unstacked blocks of self + + Parameters + ---------- + unstacker : reshape._Unstacker + fill_value : int + Only used in ExtensionBlock._unstack + new_placement : np.ndarray[np.intp] + allow_fill : bool + needs_masking : np.ndarray[bool] + + Returns + ------- + blocks : list of Block + New blocks of unstacked values. + mask : array-like of bool + The mask of columns of `blocks` we should keep. + """ + new_values, mask = unstacker.get_new_values( + self.values.T, fill_value=fill_value + ) + + mask = mask.any(0) + # TODO: in all tests we have mask.all(); can we rely on that? + + # Note: these next two lines ensure that + # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) + # which the calling function needs in order to pass verify_integrity=False + # to the BlockManager constructor + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + bp = BlockPlacement(new_placement) + blocks = [new_block_2d(new_values, placement=bp)] + return blocks, mask + + # --------------------------------------------------------------------- + + def setitem(self, indexer, value, using_cow: bool = False) -> Block: + """ + Attempt self.values[indexer] = value, possibly creating a new array. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice, int + The subset of self.values to set + value : object + The value being set + using_cow: bool, default False + Signaling if CoW is used. + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + + value = self._standardize_fill_value(value) + + values = cast(np.ndarray, self.values) + if self.ndim == 2: + values = values.T + + # length checking + check_setitem_lengths(indexer, value, values) + + if self.dtype != _dtype_obj: + # GH48933: extract_array would convert a pd.Series value to np.ndarray + value = extract_array(value, extract_numpy=True) + try: + casted = np_can_hold_element(values.dtype, value) + except LossySetitemError: + # current dtype cannot store value, coerce to common dtype + nb = self.coerce_to_target_dtype(value, warn_on_upcast=True) + return nb.setitem(indexer, value) + else: + if self.dtype == _dtype_obj: + # TODO: avoid having to construct values[indexer] + vi = values[indexer] + if lib.is_list_like(vi): + # checking lib.is_scalar here fails on + # test_iloc_setitem_custom_object + casted = setitem_datetimelike_compat(values, len(vi), casted) + + self = self._maybe_copy(using_cow, inplace=True) + values = cast(np.ndarray, self.values.T) + if isinstance(casted, np.ndarray) and casted.ndim == 1 and len(casted) == 1: + # NumPy 1.25 deprecation: https://github.com/numpy/numpy/pull/10615 + casted = casted[0, ...] + try: + values[indexer] = casted + except (TypeError, ValueError) as err: + if is_list_like(casted): + raise ValueError( + "setting an array element with a sequence." + ) from err + raise + return self + + def putmask( + self, mask, new, using_cow: bool = False, already_warned=None + ) -> list[Block]: + """ + putmask the data to the block; it is possible that we may create a + new dtype of block + + Return the resulting block(s). + + Parameters + ---------- + mask : np.ndarray[bool], SparseArray[bool], or BooleanArray + new : a ndarray/object + using_cow: bool, default False + + Returns + ------- + List[Block] + """ + orig_mask = mask + values = cast(np.ndarray, self.values) + mask, noop = validate_putmask(values.T, mask) + assert not isinstance(new, (ABCIndex, ABCSeries, ABCDataFrame)) + + if new is lib.no_default: + new = self.fill_value + + new = self._standardize_fill_value(new) + new = extract_array(new, extract_numpy=True) + + if noop: + if using_cow: + return [self.copy(deep=False)] + return [self] + + if ( + warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + try: + casted = np_can_hold_element(values.dtype, new) + + self = self._maybe_copy(using_cow, inplace=True) + values = cast(np.ndarray, self.values) + + putmask_without_repeat(values.T, mask, casted) + return [self] + except LossySetitemError: + if self.ndim == 1 or self.shape[0] == 1: + # no need to split columns + + if not is_list_like(new): + # using just new[indexer] can't save us the need to cast + return self.coerce_to_target_dtype( + new, warn_on_upcast=True + ).putmask(mask, new) + else: + indexer = mask.nonzero()[0] + nb = self.setitem(indexer, new[indexer], using_cow=using_cow) + return [nb] + + else: + is_array = isinstance(new, np.ndarray) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = new + if is_array: + # we have a different value per-column + n = new[:, i : i + 1] + + submask = orig_mask[:, i : i + 1] + rbs = nb.putmask(submask, n, using_cow=using_cow) + res_blocks.extend(rbs) + return res_blocks + + def where( + self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False + ) -> list[Block]: + """ + evaluate the block; return result block(s) from the result + + Parameters + ---------- + other : a ndarray/object + cond : np.ndarray[bool], SparseArray[bool], or BooleanArray + _downcast : str or None, default "infer" + Private because we only specify it when calling from fillna. + + Returns + ------- + List[Block] + """ + assert cond.ndim == self.ndim + assert not isinstance(other, (ABCIndex, ABCSeries, ABCDataFrame)) + + transpose = self.ndim == 2 + + cond = extract_bool_array(cond) + + # EABlocks override where + values = cast(np.ndarray, self.values) + orig_other = other + if transpose: + values = values.T + + icond, noop = validate_putmask(values, ~cond) + if noop: + # GH-39595: Always return a copy; short-circuit up/downcasting + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] + + if other is lib.no_default: + other = self.fill_value + + other = self._standardize_fill_value(other) + + try: + # try/except here is equivalent to a self._can_hold_element check, + # but this gets us back 'casted' which we will reuse below; + # without using 'casted', expressions.where may do unwanted upcasts. + casted = np_can_hold_element(values.dtype, other) + except (ValueError, TypeError, LossySetitemError): + # we cannot coerce, return a compat dtype + + if self.ndim == 1 or self.shape[0] == 1: + # no need to split columns + + block = self.coerce_to_target_dtype(other) + blocks = block.where(orig_other, cond, using_cow=using_cow) + return self._maybe_downcast( + blocks, downcast=_downcast, using_cow=using_cow, caller="where" + ) + + else: + # since _maybe_downcast would split blocks anyway, we + # can avoid some potential upcast/downcast by splitting + # on the front end. + is_array = isinstance(other, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + oth = other + if is_array: + # we have a different value per-column + oth = other[:, i : i + 1] + + submask = cond[:, i : i + 1] + rbs = nb.where( + oth, submask, _downcast=_downcast, using_cow=using_cow + ) + res_blocks.extend(rbs) + return res_blocks + + else: + other = casted + alt = setitem_datetimelike_compat(values, icond.sum(), other) + if alt is not other: + if is_list_like(other) and len(other) < len(values): + # call np.where with other to get the appropriate ValueError + np.where(~icond, values, other) + raise NotImplementedError( + "This should not be reached; call to np.where above is " + "expected to raise ValueError. Please report a bug at " + "github.com/pandas-dev/pandas" + ) + result = values.copy() + np.putmask(result, icond, alt) + else: + # By the time we get here, we should have all Series/Index + # args extracted to ndarray + if ( + is_list_like(other) + and not isinstance(other, np.ndarray) + and len(other) == self.shape[-1] + ): + # If we don't do this broadcasting here, then expressions.where + # will broadcast a 1D other to be row-like instead of + # column-like. + other = np.array(other).reshape(values.shape) + # If lengths don't match (or len(other)==1), we will raise + # inside expressions.where, see test_series_where + + # Note: expressions.where may upcast. + result = expressions.where(~icond, values, other) + # The np_can_hold_element check _should_ ensure that we always + # have result.dtype == self.dtype here. + + if transpose: + result = result.T + + return [self.make_block(result)] + + def fillna( + self, + value, + limit: int | None = None, + inplace: bool = False, + downcast=None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + """ + fillna on the block with the value. If we fail, then convert to + block to hold objects instead and try again + """ + # Caller is responsible for validating limit; if int it is strictly positive + inplace = validate_bool_kwarg(inplace, "inplace") + + if not self._can_hold_na: + # can short-circuit the isna call + noop = True + else: + mask = isna(self.values) + mask, noop = validate_putmask(self.values, mask) + + if noop: + # we can't process the value, but nothing to do + if inplace: + if using_cow: + return [self.copy(deep=False)] + # Arbitrarily imposing the convention that we ignore downcast + # on no-op when inplace=True + return [self] + else: + # GH#45423 consistent downcasting on no-ops. + nb = self.copy(deep=not using_cow) + nbs = nb._maybe_downcast( + [nb], downcast=downcast, using_cow=using_cow, caller="fillna" + ) + return nbs + + if limit is not None: + mask[mask.cumsum(self.ndim - 1) > limit] = False + + if inplace: + nbs = self.putmask( + mask.T, value, using_cow=using_cow, already_warned=already_warned + ) + else: + # without _downcast, we would break + # test_fillna_dtype_conversion_equiv_replace + nbs = self.where(value, ~mask.T, _downcast=False) + + # Note: blk._maybe_downcast vs self._maybe_downcast(nbs) + # makes a difference bc blk may have object dtype, which has + # different behavior in _maybe_downcast. + return extend_blocks( + [ + blk._maybe_downcast( + [blk], downcast=downcast, using_cow=using_cow, caller="fillna" + ) + for blk in nbs + ] + ) + + def pad_or_backfill( + self, + *, + method: FillnaOptions, + axis: AxisInt = 0, + inplace: bool = False, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + if not self._can_hold_na: + # If there are no NAs, then interpolate is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + # Dispatch to the NumpyExtensionArray method. + # We know self.array_values is a NumpyExtensionArray bc EABlock overrides + vals = cast(NumpyExtensionArray, self.array_values) + if axis == 1: + vals = vals.T + new_values = vals._pad_or_backfill( + method=method, + limit=limit, + limit_area=limit_area, + copy=copy, + ) + if ( + not copy + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + if axis == 1: + new_values = new_values.T + + data = extract_array(new_values, extract_numpy=True) + + nb = self.make_block_same_class(data, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow, caller="fillna") + + @final + def interpolate( + self, + *, + method: InterpolateOptions, + index: Index, + inplace: bool = False, + limit: int | None = None, + limit_direction: Literal["forward", "backward", "both"] = "forward", + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + already_warned=None, + **kwargs, + ) -> list[Block]: + inplace = validate_bool_kwarg(inplace, "inplace") + # error: Non-overlapping equality check [...] + if method == "asfreq": # type: ignore[comparison-overlap] + # clean_fill_method used to allow this + missing.clean_fill_method(method) + + if not self._can_hold_na: + # If there are no NAs, then interpolate is a no-op + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + # TODO(3.0): this case will not be reachable once GH#53638 is enforced + if self.dtype == _dtype_obj: + # only deal with floats + # bc we already checked that can_hold_na, we don't have int dtype here + # test_interp_basic checks that we make a copy here + if using_cow: + return [self.copy(deep=False)] + return [self] if inplace else [self.copy()] + + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + # Dispatch to the EA method. + new_values = self.array_values.interpolate( + method=method, + axis=self.ndim - 1, + index=index, + limit=limit, + limit_direction=limit_direction, + limit_area=limit_area, + copy=copy, + **kwargs, + ) + data = extract_array(new_values, extract_numpy=True) + + if ( + not copy + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + nb = self.make_block_same_class(data, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow, caller="interpolate") + + @final + def diff(self, n: int) -> list[Block]: + """return block for the diff of the values""" + # only reached with ndim == 2 + # TODO(EA2D): transpose will be unnecessary with 2D EAs + new_values = algos.diff(self.values.T, n, axis=0).T + return [self.make_block(values=new_values)] + + def shift(self, periods: int, fill_value: Any = None) -> list[Block]: + """shift the block by periods, possibly upcast""" + # convert integer to float if necessary. need to do a lot more than + # that, handle boolean etc also + axis = self.ndim - 1 + + # Note: periods is never 0 here, as that is handled at the top of + # NDFrame.shift. If that ever changes, we can do a check for periods=0 + # and possibly avoid coercing. + + if not lib.is_scalar(fill_value) and self.dtype != _dtype_obj: + # with object dtype there is nothing to promote, and the user can + # pass pretty much any weird fill_value they like + # see test_shift_object_non_scalar_fill + raise ValueError("fill_value must be a scalar") + + fill_value = self._standardize_fill_value(fill_value) + + try: + # error: Argument 1 to "np_can_hold_element" has incompatible type + # "Union[dtype[Any], ExtensionDtype]"; expected "dtype[Any]" + casted = np_can_hold_element( + self.dtype, fill_value # type: ignore[arg-type] + ) + except LossySetitemError: + nb = self.coerce_to_target_dtype(fill_value) + return nb.shift(periods, fill_value=fill_value) + + else: + values = cast(np.ndarray, self.values) + new_values = shift(values, periods, axis, casted) + return [self.make_block_same_class(new_values)] + + @final + def quantile( + self, + qs: Index, # with dtype float64 + interpolation: QuantileInterpolation = "linear", + ) -> Block: + """ + compute the quantiles of the + + Parameters + ---------- + qs : Index + The quantiles to be computed in float64. + interpolation : str, default 'linear' + Type of interpolation. + + Returns + ------- + Block + """ + # We should always have ndim == 2 because Series dispatches to DataFrame + assert self.ndim == 2 + assert is_list_like(qs) # caller is responsible for this + + result = quantile_compat(self.values, np.asarray(qs._values), interpolation) + # ensure_block_shape needed for cases where we start with EA and result + # is ndarray, e.g. IntegerArray, SparseArray + result = ensure_block_shape(result, ndim=2) + return new_block_2d(result, placement=self._mgr_locs) + + @final + def round(self, decimals: int, using_cow: bool = False) -> Self: + """ + Rounds the values. + If the block is not of an integer or float dtype, nothing happens. + This is consistent with DataFrame.round behavivor. + (Note: Series.round would raise) + + Parameters + ---------- + decimals: int, + Number of decimal places to round to. + Caller is responsible for validating this + using_cow: bool, + Whether Copy on Write is enabled right now + """ + if not self.is_numeric or self.is_bool: + return self.copy(deep=not using_cow) + refs = None + # TODO: round only defined on BaseMaskedArray + # Series also does this, so would need to fix both places + # error: Item "ExtensionArray" of "Union[ndarray[Any, Any], ExtensionArray]" + # has no attribute "round" + values = self.values.round(decimals) # type: ignore[union-attr] + if values is self.values: + if not using_cow: + # Normally would need to do this before, but + # numpy only returns same array when round operation + # is no-op + # https://github.com/numpy/numpy/blob/486878b37fc7439a3b2b87747f50db9b62fea8eb/numpy/core/src/multiarray/calculation.c#L625-L636 + values = values.copy() + else: + refs = self.refs + return self.make_block_same_class(values, refs=refs) + + # --------------------------------------------------------------------- + # Abstract Methods Overridden By EABackedBlock and NumpyBlock + + def delete(self, loc) -> list[Block]: + """Deletes the locs from the block. + + We split the block to avoid copying the underlying data. We create new + blocks for every connected segment of the initial block that is not deleted. + The new blocks point to the initial array. + """ + if not is_list_like(loc): + loc = [loc] + + if self.ndim == 1: + values = cast(np.ndarray, self.values) + values = np.delete(values, loc) + mgr_locs = self._mgr_locs.delete(loc) + return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] + + if np.max(loc) >= self.values.shape[0]: + raise IndexError + + # Add one out-of-bounds indexer as maximum to collect + # all columns after our last indexer if any + loc = np.concatenate([loc, [self.values.shape[0]]]) + mgr_locs_arr = self._mgr_locs.as_array + new_blocks: list[Block] = [] + + previous_loc = -1 + # TODO(CoW): This is tricky, if parent block goes out of scope + # all split blocks are referencing each other even though they + # don't share data + refs = self.refs if self.refs.has_reference() else None + for idx in loc: + if idx == previous_loc + 1: + # There is no column between current and last idx + pass + else: + # No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[slice, slice]" + values = self.values[previous_loc + 1 : idx, :] # type: ignore[call-overload] + locs = mgr_locs_arr[previous_loc + 1 : idx] + nb = type(self)( + values, placement=BlockPlacement(locs), ndim=self.ndim, refs=refs + ) + new_blocks.append(nb) + + previous_loc = idx + + return new_blocks + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + raise AbstractMethodError(self) + + @property + def array_values(self) -> ExtensionArray: + """ + The array that Series.array returns. Always an ExtensionArray. + """ + raise AbstractMethodError(self) + + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + """ + return an internal format, currently just the ndarray + this is often overridden to handle to_dense like operations + """ + raise AbstractMethodError(self) + + +class EABackedBlock(Block): + """ + Mixin for Block subclasses backed by ExtensionArray. + """ + + values: ExtensionArray + + @final + def shift(self, periods: int, fill_value: Any = None) -> list[Block]: + """ + Shift the block by `periods`. + + Dispatches to underlying ExtensionArray and re-boxes in an + ExtensionBlock. + """ + # Transpose since EA.shift is always along axis=0, while we want to shift + # along rows. + new_values = self.values.T.shift(periods=periods, fill_value=fill_value).T + return [self.make_block_same_class(new_values)] + + @final + def setitem(self, indexer, value, using_cow: bool = False): + """ + Attempt self.values[indexer] = value, possibly creating a new array. + + This differs from Block.setitem by not allowing setitem to change + the dtype of the Block. + + Parameters + ---------- + indexer : tuple, list-like, array-like, slice, int + The subset of self.values to set + value : object + The value being set + using_cow: bool, default False + Signaling if CoW is used. + + Returns + ------- + Block + + Notes + ----- + `indexer` is a direct slice/positional indexer. `value` must + be a compatible shape. + """ + orig_indexer = indexer + orig_value = value + + indexer = self._unwrap_setitem_indexer(indexer) + value = self._maybe_squeeze_arg(value) + + values = self.values + if values.ndim == 2: + # TODO(GH#45419): string[pyarrow] tests break if we transpose + # unconditionally + values = values.T + check_setitem_lengths(indexer, value, values) + + try: + values[indexer] = value + except (ValueError, TypeError): + if isinstance(self.dtype, IntervalDtype): + # see TestSetitemFloatIntervalWithIntIntervalValues + nb = self.coerce_to_target_dtype(orig_value, warn_on_upcast=True) + return nb.setitem(orig_indexer, orig_value) + + elif isinstance(self, NDArrayBackedExtensionBlock): + nb = self.coerce_to_target_dtype(orig_value, warn_on_upcast=True) + return nb.setitem(orig_indexer, orig_value) + + else: + raise + + else: + return self + + @final + def where( + self, other, cond, _downcast: str | bool = "infer", using_cow: bool = False + ) -> list[Block]: + # _downcast private bc we only specify it when calling from fillna + arr = self.values.T + + cond = extract_bool_array(cond) + + orig_other = other + orig_cond = cond + other = self._maybe_squeeze_arg(other) + cond = self._maybe_squeeze_arg(cond) + + if other is lib.no_default: + other = self.fill_value + + icond, noop = validate_putmask(arr, ~cond) + if noop: + # GH#44181, GH#45135 + # Avoid a) raising for Interval/PeriodDtype and b) unnecessary object upcast + if using_cow: + return [self.copy(deep=False)] + return [self.copy()] + + try: + res_values = arr._where(cond, other).T + except (ValueError, TypeError): + if self.ndim == 1 or self.shape[0] == 1: + if isinstance(self.dtype, IntervalDtype): + # TestSetitemFloatIntervalWithIntIntervalValues + blk = self.coerce_to_target_dtype(orig_other) + nbs = blk.where(orig_other, orig_cond, using_cow=using_cow) + return self._maybe_downcast( + nbs, downcast=_downcast, using_cow=using_cow, caller="where" + ) + + elif isinstance(self, NDArrayBackedExtensionBlock): + # NB: not (yet) the same as + # isinstance(values, NDArrayBackedExtensionArray) + blk = self.coerce_to_target_dtype(orig_other) + nbs = blk.where(orig_other, orig_cond, using_cow=using_cow) + return self._maybe_downcast( + nbs, downcast=_downcast, using_cow=using_cow, caller="where" + ) + + else: + raise + + else: + # Same pattern we use in Block.putmask + is_array = isinstance(orig_other, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = orig_other + if is_array: + # we have a different value per-column + n = orig_other[:, i : i + 1] + + submask = orig_cond[:, i : i + 1] + rbs = nb.where(n, submask, using_cow=using_cow) + res_blocks.extend(rbs) + return res_blocks + + nb = self.make_block_same_class(res_values) + return [nb] + + @final + def putmask( + self, mask, new, using_cow: bool = False, already_warned=None + ) -> list[Block]: + """ + See Block.putmask.__doc__ + """ + mask = extract_bool_array(mask) + if new is lib.no_default: + new = self.fill_value + + orig_new = new + orig_mask = mask + new = self._maybe_squeeze_arg(new) + mask = self._maybe_squeeze_arg(mask) + + if not mask.any(): + if using_cow: + return [self.copy(deep=False)] + return [self] + + if ( + warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + self = self._maybe_copy(using_cow, inplace=True) + values = self.values + if values.ndim == 2: + values = values.T + + try: + # Caller is responsible for ensuring matching lengths + values._putmask(mask, new) + except (TypeError, ValueError): + if self.ndim == 1 or self.shape[0] == 1: + if isinstance(self.dtype, IntervalDtype): + # Discussion about what we want to support in the general + # case GH#39584 + blk = self.coerce_to_target_dtype(orig_new, warn_on_upcast=True) + return blk.putmask(orig_mask, orig_new) + + elif isinstance(self, NDArrayBackedExtensionBlock): + # NB: not (yet) the same as + # isinstance(values, NDArrayBackedExtensionArray) + blk = self.coerce_to_target_dtype(orig_new, warn_on_upcast=True) + return blk.putmask(orig_mask, orig_new) + + else: + raise + + else: + # Same pattern we use in Block.putmask + is_array = isinstance(orig_new, (np.ndarray, ExtensionArray)) + + res_blocks = [] + nbs = self._split() + for i, nb in enumerate(nbs): + n = orig_new + if is_array: + # we have a different value per-column + n = orig_new[:, i : i + 1] + + submask = orig_mask[:, i : i + 1] + rbs = nb.putmask(submask, n) + res_blocks.extend(rbs) + return res_blocks + + return [self] + + @final + def delete(self, loc) -> list[Block]: + # This will be unnecessary if/when __array_function__ is implemented + if self.ndim == 1: + values = self.values.delete(loc) + mgr_locs = self._mgr_locs.delete(loc) + return [type(self)(values, placement=mgr_locs, ndim=self.ndim)] + elif self.values.ndim == 1: + # We get here through to_stata + return [] + return super().delete(loc) + + @final + @cache_readonly + def array_values(self) -> ExtensionArray: + return self.values + + @final + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + """ + return object dtype as boxed values, such as Timestamps/Timedelta + """ + values: ArrayLike = self.values + if dtype == _dtype_obj: + values = values.astype(object) + # TODO(EA2D): reshape not needed with 2D EAs + return np.asarray(values).reshape(self.shape) + + @final + def pad_or_backfill( + self, + *, + method: FillnaOptions, + axis: AxisInt = 0, + inplace: bool = False, + limit: int | None = None, + limit_area: Literal["inside", "outside"] | None = None, + downcast: Literal["infer"] | None = None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + values = self.values + + kwargs: dict[str, Any] = {"method": method, "limit": limit} + if "limit_area" in inspect.signature(values._pad_or_backfill).parameters: + kwargs["limit_area"] = limit_area + elif limit_area is not None: + raise NotImplementedError( + f"{type(values).__name__} does not implement limit_area " + "(added in pandas 2.2). 3rd-party ExtnsionArray authors " + "need to add this argument to _pad_or_backfill." + ) + + if values.ndim == 2 and axis == 1: + # NDArrayBackedExtensionArray.fillna assumes axis=0 + new_values = values.T._pad_or_backfill(**kwargs).T + else: + new_values = values._pad_or_backfill(**kwargs) + return [self.make_block_same_class(new_values)] + + +class ExtensionBlock(EABackedBlock): + """ + Block for holding extension types. + + Notes + ----- + This holds all 3rd-party extension array types. It's also the immediate + parent class for our internal extension types' blocks. + + ExtensionArrays are limited to 1-D. + """ + + values: ExtensionArray + + def fillna( + self, + value, + limit: int | None = None, + inplace: bool = False, + downcast=None, + using_cow: bool = False, + already_warned=None, + ) -> list[Block]: + if isinstance(self.dtype, IntervalDtype): + # Block.fillna handles coercion (test_fillna_interval) + return super().fillna( + value=value, + limit=limit, + inplace=inplace, + downcast=downcast, + using_cow=using_cow, + already_warned=already_warned, + ) + if using_cow and self._can_hold_na and not self.values._hasna: + refs = self.refs + new_values = self.values + else: + copy, refs = self._get_refs_and_copy(using_cow, inplace) + + try: + new_values = self.values.fillna( + value=value, method=None, limit=limit, copy=copy + ) + except TypeError: + # 3rd party EA that has not implemented copy keyword yet + refs = None + new_values = self.values.fillna(value=value, method=None, limit=limit) + # issue the warning *after* retrying, in case the TypeError + # was caused by an invalid fill_value + warnings.warn( + # GH#53278 + "ExtensionArray.fillna added a 'copy' keyword in pandas " + "2.1.0. In a future version, ExtensionArray subclasses will " + "need to implement this keyword or an exception will be " + "raised. In the interim, the keyword is ignored by " + f"{type(self.values).__name__}.", + DeprecationWarning, + stacklevel=find_stack_level(), + ) + else: + if ( + not copy + and warn_copy_on_write() + and already_warned is not None + and not already_warned.warned_already + ): + if self.refs.has_reference(): + warnings.warn( + COW_WARNING_GENERAL_MSG, + FutureWarning, + stacklevel=find_stack_level(), + ) + already_warned.warned_already = True + + nb = self.make_block_same_class(new_values, refs=refs) + return nb._maybe_downcast([nb], downcast, using_cow=using_cow, caller="fillna") + + @cache_readonly + def shape(self) -> Shape: + # TODO(EA2D): override unnecessary with 2D EAs + if self.ndim == 1: + return (len(self.values),) + return len(self._mgr_locs), len(self.values) + + def iget(self, i: int | tuple[int, int] | tuple[slice, int]): + # In the case where we have a tuple[slice, int], the slice will always + # be slice(None) + # We _could_ make the annotation more specific, but mypy would + # complain about override mismatch: + # Literal[0] | tuple[Literal[0], int] | tuple[slice, int] + + # Note: only reached with self.ndim == 2 + + if isinstance(i, tuple): + # TODO(EA2D): unnecessary with 2D EAs + col, loc = i + if not com.is_null_slice(col) and col != 0: + raise IndexError(f"{self} only contains one item") + if isinstance(col, slice): + # the is_null_slice check above assures that col is slice(None) + # so what we want is a view on all our columns and row loc + if loc < 0: + loc += len(self.values) + # Note: loc:loc+1 vs [[loc]] makes a difference when called + # from fast_xs because we want to get a view back. + return self.values[loc : loc + 1] + return self.values[loc] + else: + if i != 0: + raise IndexError(f"{self} only contains one item") + return self.values + + def set_inplace(self, locs, values: ArrayLike, copy: bool = False) -> None: + # When an ndarray, we should have locs.tolist() == [0] + # When a BlockPlacement we should have list(locs) == [0] + if copy: + self.values = self.values.copy() + self.values[:] = values + + def _maybe_squeeze_arg(self, arg): + """ + If necessary, squeeze a (N, 1) ndarray to (N,) + """ + # e.g. if we are passed a 2D mask for putmask + if ( + isinstance(arg, (np.ndarray, ExtensionArray)) + and arg.ndim == self.values.ndim + 1 + ): + # TODO(EA2D): unnecessary with 2D EAs + assert arg.shape[1] == 1 + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[slice, int]" + arg = arg[:, 0] # type: ignore[call-overload] + elif isinstance(arg, ABCDataFrame): + # 2022-01-06 only reached for setitem + # TODO: should we avoid getting here with DataFrame? + assert arg.shape[1] == 1 + arg = arg._ixs(0, axis=1)._values + + return arg + + def _unwrap_setitem_indexer(self, indexer): + """ + Adapt a 2D-indexer to our 1D values. + + This is intended for 'setitem', not 'iget' or '_slice'. + """ + # TODO: ATM this doesn't work for iget/_slice, can we change that? + + if isinstance(indexer, tuple) and len(indexer) == 2: + # TODO(EA2D): not needed with 2D EAs + # Should never have length > 2. Caller is responsible for checking. + # Length 1 is reached vis setitem_single_block and setitem_single_column + # each of which pass indexer=(pi,) + if all(isinstance(x, np.ndarray) and x.ndim == 2 for x in indexer): + # GH#44703 went through indexing.maybe_convert_ix + first, second = indexer + if not ( + second.size == 1 and (second == 0).all() and first.shape[1] == 1 + ): + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/" + ) + indexer = first[:, 0] + + elif lib.is_integer(indexer[1]) and indexer[1] == 0: + # reached via setitem_single_block passing the whole indexer + indexer = indexer[0] + + elif com.is_null_slice(indexer[1]): + indexer = indexer[0] + + elif is_list_like(indexer[1]) and indexer[1][0] == 0: + indexer = indexer[0] + + else: + raise NotImplementedError( + "This should not be reached. Please report a bug at " + "github.com/pandas-dev/pandas/" + ) + return indexer + + @property + def is_view(self) -> bool: + """Extension arrays are never treated as views.""" + return False + + # error: Cannot override writeable attribute with read-only property + @cache_readonly + def is_numeric(self) -> bool: # type: ignore[override] + return self.values.dtype._is_numeric + + def _slice( + self, slicer: slice | npt.NDArray[np.bool_] | npt.NDArray[np.intp] + ) -> ExtensionArray: + """ + Return a slice of my values. + + Parameters + ---------- + slicer : slice, ndarray[int], or ndarray[bool] + Valid (non-reducing) indexer for self.values. + + Returns + ------- + ExtensionArray + """ + # Notes: ndarray[bool] is only reachable when via get_rows_with_mask, which + # is only for Series, i.e. self.ndim == 1. + + # return same dims as we currently have + if self.ndim == 2: + # reached via getitem_block via _slice_take_blocks_ax0 + # TODO(EA2D): won't be necessary with 2D EAs + + if not isinstance(slicer, slice): + raise AssertionError( + "invalid slicing for a 1-ndim ExtensionArray", slicer + ) + # GH#32959 only full-slicers along fake-dim0 are valid + # TODO(EA2D): won't be necessary with 2D EAs + # range(1) instead of self._mgr_locs to avoid exception on [::-1] + # see test_iloc_getitem_slice_negative_step_ea_block + new_locs = range(1)[slicer] + if not len(new_locs): + raise AssertionError( + "invalid slicing for a 1-ndim ExtensionArray", slicer + ) + slicer = slice(None) + + return self.values[slicer] + + @final + def slice_block_rows(self, slicer: slice) -> Self: + """ + Perform __getitem__-like specialized to slicing along index. + """ + # GH#42787 in principle this is equivalent to values[..., slicer], but we don't + # require subclasses of ExtensionArray to support that form (for now). + new_values = self.values[slicer] + return type(self)(new_values, self._mgr_locs, ndim=self.ndim, refs=self.refs) + + def _unstack( + self, + unstacker, + fill_value, + new_placement: npt.NDArray[np.intp], + needs_masking: npt.NDArray[np.bool_], + ): + # ExtensionArray-safe unstack. + # We override Block._unstack, which unstacks directly on the + # values of the array. For EA-backed blocks, this would require + # converting to a 2-D ndarray of objects. + # Instead, we unstack an ndarray of integer positions, followed by + # a `take` on the actual values. + + # Caller is responsible for ensuring self.shape[-1] == len(unstacker.index) + new_values, mask = unstacker.arange_result + + # Note: these next two lines ensure that + # mask.sum() == sum(len(nb.mgr_locs) for nb in blocks) + # which the calling function needs in order to pass verify_integrity=False + # to the BlockManager constructor + new_values = new_values.T[mask] + new_placement = new_placement[mask] + + # needs_masking[i] calculated once in BlockManager.unstack tells + # us if there are any -1s in the relevant indices. When False, + # that allows us to go through a faster path in 'take', among + # other things avoiding e.g. Categorical._validate_scalar. + blocks = [ + # TODO: could cast to object depending on fill_value? + type(self)( + self.values.take( + indices, allow_fill=needs_masking[i], fill_value=fill_value + ), + BlockPlacement(place), + ndim=2, + ) + for i, (indices, place) in enumerate(zip(new_values, new_placement)) + ] + return blocks, mask + + +class NumpyBlock(Block): + values: np.ndarray + __slots__ = () + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + return self.values.base is not None + + @property + def array_values(self) -> ExtensionArray: + return NumpyExtensionArray(self.values) + + def get_values(self, dtype: DtypeObj | None = None) -> np.ndarray: + if dtype == _dtype_obj: + return self.values.astype(_dtype_obj) + return self.values + + @cache_readonly + def is_numeric(self) -> bool: # type: ignore[override] + dtype = self.values.dtype + kind = dtype.kind + + return kind in "fciub" + + +class NumericBlock(NumpyBlock): + # this Block type is kept for backwards-compatibility + # TODO(3.0): delete and remove deprecation in __init__.py. + __slots__ = () + + +class ObjectBlock(NumpyBlock): + # this Block type is kept for backwards-compatibility + # TODO(3.0): delete and remove deprecation in __init__.py. + __slots__ = () + + +class NDArrayBackedExtensionBlock(EABackedBlock): + """ + Block backed by an NDArrayBackedExtensionArray + """ + + values: NDArrayBackedExtensionArray + + @property + def is_view(self) -> bool: + """return a boolean if I am possibly a view""" + # check the ndarray values of the DatetimeIndex values + return self.values._ndarray.base is not None + + +class DatetimeLikeBlock(NDArrayBackedExtensionBlock): + """Block for datetime64[ns], timedelta64[ns].""" + + __slots__ = () + is_numeric = False + values: DatetimeArray | TimedeltaArray + + +class DatetimeTZBlock(DatetimeLikeBlock): + """implement a datetime64 block with a tz attribute""" + + values: DatetimeArray + + __slots__ = () + + +# ----------------------------------------------------------------- +# Constructor Helpers + + +def maybe_coerce_values(values: ArrayLike) -> ArrayLike: + """ + Input validation for values passed to __init__. Ensure that + any datetime64/timedelta64 dtypes are in nanoseconds. Ensure + that we do not have string dtypes. + + Parameters + ---------- + values : np.ndarray or ExtensionArray + + Returns + ------- + values : np.ndarray or ExtensionArray + """ + # Caller is responsible for ensuring NumpyExtensionArray is already extracted. + + if isinstance(values, np.ndarray): + values = ensure_wrapped_if_datetimelike(values) + + if issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + if isinstance(values, (DatetimeArray, TimedeltaArray)) and values.freq is not None: + # freq is only stored in DatetimeIndex/TimedeltaIndex, not in Series/DataFrame + values = values._with_freq(None) + + return values + + +def get_block_type(dtype: DtypeObj) -> type[Block]: + """ + Find the appropriate Block subclass to use for the given values and dtype. + + Parameters + ---------- + dtype : numpy or pandas dtype + + Returns + ------- + cls : class, subclass of Block + """ + if isinstance(dtype, DatetimeTZDtype): + return DatetimeTZBlock + elif isinstance(dtype, PeriodDtype): + return NDArrayBackedExtensionBlock + elif isinstance(dtype, ExtensionDtype): + # Note: need to be sure NumpyExtensionArray is unwrapped before we get here + return ExtensionBlock + + # We use kind checks because it is much more performant + # than is_foo_dtype + kind = dtype.kind + if kind in "Mm": + return DatetimeLikeBlock + + return NumpyBlock + + +def new_block_2d( + values: ArrayLike, placement: BlockPlacement, refs: BlockValuesRefs | None = None +): + # new_block specialized to case with + # ndim=2 + # isinstance(placement, BlockPlacement) + # check_ndim/ensure_block_shape already checked + klass = get_block_type(values.dtype) + + values = maybe_coerce_values(values) + return klass(values, ndim=2, placement=placement, refs=refs) + + +def new_block( + values, + placement: BlockPlacement, + *, + ndim: int, + refs: BlockValuesRefs | None = None, +) -> Block: + # caller is responsible for ensuring: + # - values is NOT a NumpyExtensionArray + # - check_ndim/ensure_block_shape already checked + # - maybe_coerce_values already called/unnecessary + klass = get_block_type(values.dtype) + return klass(values, ndim=ndim, placement=placement, refs=refs) + + +def check_ndim(values, placement: BlockPlacement, ndim: int) -> None: + """ + ndim inference and validation. + + Validates that values.ndim and ndim are consistent. + Validates that len(values) and len(placement) are consistent. + + Parameters + ---------- + values : array-like + placement : BlockPlacement + ndim : int + + Raises + ------ + ValueError : the number of dimensions do not match + """ + + if values.ndim > ndim: + # Check for both np.ndarray and ExtensionArray + raise ValueError( + "Wrong number of dimensions. " + f"values.ndim > ndim [{values.ndim} > {ndim}]" + ) + + if not is_1d_only_ea_dtype(values.dtype): + # TODO(EA2D): special case not needed with 2D EAs + if values.ndim != ndim: + raise ValueError( + "Wrong number of dimensions. " + f"values.ndim != ndim [{values.ndim} != {ndim}]" + ) + if len(placement) != len(values): + raise ValueError( + f"Wrong number of items passed {len(values)}, " + f"placement implies {len(placement)}" + ) + elif ndim == 2 and len(placement) != 1: + # TODO(EA2D): special case unnecessary with 2D EAs + raise ValueError("need to split") + + +def extract_pandas_array( + values: ArrayLike, dtype: DtypeObj | None, ndim: int +) -> tuple[ArrayLike, DtypeObj | None]: + """ + Ensure that we don't allow NumpyExtensionArray / NumpyEADtype in internals. + """ + # For now, blocks should be backed by ndarrays when possible. + if isinstance(values, ABCNumpyExtensionArray): + values = values.to_numpy() + if ndim and ndim > 1: + # TODO(EA2D): special case not needed with 2D EAs + values = np.atleast_2d(values) + + if isinstance(dtype, NumpyEADtype): + dtype = dtype.numpy_dtype + + return values, dtype + + +# ----------------------------------------------------------------- + + +def extend_blocks(result, blocks=None) -> list[Block]: + """return a new extended blocks, given the result""" + if blocks is None: + blocks = [] + if isinstance(result, list): + for r in result: + if isinstance(r, list): + blocks.extend(r) + else: + blocks.append(r) + else: + assert isinstance(result, Block), type(result) + blocks.append(result) + return blocks + + +def ensure_block_shape(values: ArrayLike, ndim: int = 1) -> ArrayLike: + """ + Reshape if possible to have values.ndim == ndim. + """ + + if values.ndim < ndim: + if not is_1d_only_ea_dtype(values.dtype): + # TODO(EA2D): https://github.com/pandas-dev/pandas/issues/23023 + # block.shape is incorrect for "2D" ExtensionArrays + # We can't, and don't need to, reshape. + values = cast("np.ndarray | DatetimeArray | TimedeltaArray", values) + values = values.reshape(1, -1) + + return values + + +def external_values(values: ArrayLike) -> ArrayLike: + """ + The array that Series.values returns (public attribute). + + This has some historical constraints, and is overridden in block + subclasses to return the correct array (e.g. period returns + object ndarray and datetimetz a datetime64[ns] ndarray instead of + proper extension array). + """ + if isinstance(values, (PeriodArray, IntervalArray)): + return values.astype(object) + elif isinstance(values, (DatetimeArray, TimedeltaArray)): + # NB: for datetime64tz this is different from np.asarray(values), since + # that returns an object-dtype ndarray of Timestamps. + # Avoid raising in .astype in casting from dt64tz to dt64 + values = values._ndarray + + if isinstance(values, np.ndarray) and using_copy_on_write(): + values = values.view() + values.flags.writeable = False + + # TODO(CoW) we should also mark our ExtensionArrays as read-only + + return values diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/concat.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/concat.py new file mode 100644 index 0000000000000000000000000000000000000000..b2d463a8c6c26f62ded5a06283f29275612c9b40 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/concat.py @@ -0,0 +1,598 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + cast, +) +import warnings + +import numpy as np + +from pandas._libs import ( + NaT, + algos as libalgos, + internals as libinternals, + lib, +) +from pandas._libs.missing import NA +from pandas.util._decorators import cache_readonly +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + ensure_dtype_can_hold_na, + find_common_type, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_scalar, + needs_i8_conversion, +) +from pandas.core.dtypes.concat import concat_compat +from pandas.core.dtypes.dtypes import ( + ExtensionDtype, + SparseDtype, +) +from pandas.core.dtypes.missing import ( + is_valid_na_for_dtype, + isna, + isna_all, +) + +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.internals.array_manager import ArrayManager +from pandas.core.internals.blocks import ( + ensure_block_shape, + new_block_2d, +) +from pandas.core.internals.managers import ( + BlockManager, + make_na_array, +) + +if TYPE_CHECKING: + from collections.abc import Sequence + + from pandas._typing import ( + ArrayLike, + AxisInt, + DtypeObj, + Manager2D, + Shape, + ) + + from pandas import Index + from pandas.core.internals.blocks import ( + Block, + BlockPlacement, + ) + + +def _concatenate_array_managers( + mgrs: list[ArrayManager], axes: list[Index], concat_axis: AxisInt +) -> Manager2D: + """ + Concatenate array managers into one. + + Parameters + ---------- + mgrs_indexers : list of (ArrayManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + + Returns + ------- + ArrayManager + """ + if concat_axis == 1: + return mgrs[0].concat_vertical(mgrs, axes) + else: + # concatting along the columns -> combine reindexed arrays in a single manager + assert concat_axis == 0 + return mgrs[0].concat_horizontal(mgrs, axes) + + +def concatenate_managers( + mgrs_indexers, axes: list[Index], concat_axis: AxisInt, copy: bool +) -> Manager2D: + """ + Concatenate block managers into one. + + Parameters + ---------- + mgrs_indexers : list of (BlockManager, {axis: indexer,...}) tuples + axes : list of Index + concat_axis : int + copy : bool + + Returns + ------- + BlockManager + """ + + needs_copy = copy and concat_axis == 0 + + # TODO(ArrayManager) this assumes that all managers are of the same type + if isinstance(mgrs_indexers[0][0], ArrayManager): + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + # error: Argument 1 to "_concatenate_array_managers" has incompatible + # type "List[BlockManager]"; expected "List[Union[ArrayManager, + # SingleArrayManager, BlockManager, SingleBlockManager]]" + return _concatenate_array_managers( + mgrs, axes, concat_axis # type: ignore[arg-type] + ) + + # Assertions disabled for performance + # for tup in mgrs_indexers: + # # caller is responsible for ensuring this + # indexers = tup[1] + # assert concat_axis not in indexers + + if concat_axis == 0: + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + return mgrs[0].concat_horizontal(mgrs, axes) + + if len(mgrs_indexers) > 0 and mgrs_indexers[0][0].nblocks > 0: + first_dtype = mgrs_indexers[0][0].blocks[0].dtype + if first_dtype in [np.float64, np.float32]: + # TODO: support more dtypes here. This will be simpler once + # JoinUnit.is_na behavior is deprecated. + if ( + all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in mgrs_indexers) + and len(mgrs_indexers) > 1 + ): + # Fastpath! + # Length restriction is just to avoid having to worry about 'copy' + shape = tuple(len(x) for x in axes) + nb = _concat_homogeneous_fastpath(mgrs_indexers, shape, first_dtype) + return BlockManager((nb,), axes) + + mgrs = _maybe_reindex_columns_na_proxy(axes, mgrs_indexers, needs_copy) + + if len(mgrs) == 1: + mgr = mgrs[0] + out = mgr.copy(deep=False) + out.axes = axes + return out + + concat_plan = _get_combined_plan(mgrs) + + blocks = [] + values: ArrayLike + + for placement, join_units in concat_plan: + unit = join_units[0] + blk = unit.block + + if _is_uniform_join_units(join_units): + vals = [ju.block.values for ju in join_units] + + if not blk.is_extension: + # _is_uniform_join_units ensures a single dtype, so + # we can use np.concatenate, which is more performant + # than concat_compat + # error: Argument 1 to "concatenate" has incompatible type + # "List[Union[ndarray[Any, Any], ExtensionArray]]"; + # expected "Union[_SupportsArray[dtype[Any]], + # _NestedSequence[_SupportsArray[dtype[Any]]]]" + values = np.concatenate(vals, axis=1) # type: ignore[arg-type] + elif is_1d_only_ea_dtype(blk.dtype): + # TODO(EA2D): special-casing not needed with 2D EAs + values = concat_compat(vals, axis=0, ea_compat_axis=True) + values = ensure_block_shape(values, ndim=2) + else: + values = concat_compat(vals, axis=1) + + values = ensure_wrapped_if_datetimelike(values) + + fastpath = blk.values.dtype == values.dtype + else: + values = _concatenate_join_units(join_units, copy=copy) + fastpath = False + + if fastpath: + b = blk.make_block_same_class(values, placement=placement) + else: + b = new_block_2d(values, placement=placement) + + blocks.append(b) + + return BlockManager(tuple(blocks), axes) + + +def _maybe_reindex_columns_na_proxy( + axes: list[Index], + mgrs_indexers: list[tuple[BlockManager, dict[int, np.ndarray]]], + needs_copy: bool, +) -> list[BlockManager]: + """ + Reindex along columns so that all of the BlockManagers being concatenated + have matching columns. + + Columns added in this reindexing have dtype=np.void, indicating they + should be ignored when choosing a column's final dtype. + """ + new_mgrs = [] + + for mgr, indexers in mgrs_indexers: + # For axis=0 (i.e. columns) we use_na_proxy and only_slice, so this + # is a cheap reindexing. + for i, indexer in indexers.items(): + mgr = mgr.reindex_indexer( + axes[i], + indexers[i], + axis=i, + copy=False, + only_slice=True, # only relevant for i==0 + allow_dups=True, + use_na_proxy=True, # only relevant for i==0 + ) + if needs_copy and not indexers: + mgr = mgr.copy() + + new_mgrs.append(mgr) + return new_mgrs + + +def _is_homogeneous_mgr(mgr: BlockManager, first_dtype: DtypeObj) -> bool: + """ + Check if this Manager can be treated as a single ndarray. + """ + if mgr.nblocks != 1: + return False + blk = mgr.blocks[0] + if not (blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1): + return False + + return blk.dtype == first_dtype + + +def _concat_homogeneous_fastpath( + mgrs_indexers, shape: Shape, first_dtype: np.dtype +) -> Block: + """ + With single-Block managers with homogeneous dtypes (that can already hold nan), + we avoid [...] + """ + # assumes + # all(_is_homogeneous_mgr(mgr, first_dtype) for mgr, _ in in mgrs_indexers) + + if all(not indexers for _, indexers in mgrs_indexers): + # https://github.com/pandas-dev/pandas/pull/52685#issuecomment-1523287739 + arrs = [mgr.blocks[0].values.T for mgr, _ in mgrs_indexers] + arr = np.concatenate(arrs).T + bp = libinternals.BlockPlacement(slice(shape[0])) + nb = new_block_2d(arr, bp) + return nb + + arr = np.empty(shape, dtype=first_dtype) + + if first_dtype == np.float64: + take_func = libalgos.take_2d_axis0_float64_float64 + else: + take_func = libalgos.take_2d_axis0_float32_float32 + + start = 0 + for mgr, indexers in mgrs_indexers: + mgr_len = mgr.shape[1] + end = start + mgr_len + + if 0 in indexers: + take_func( + mgr.blocks[0].values, + indexers[0], + arr[:, start:end], + ) + else: + # No reindexing necessary, we can copy values directly + arr[:, start:end] = mgr.blocks[0].values + + start += mgr_len + + bp = libinternals.BlockPlacement(slice(shape[0])) + nb = new_block_2d(arr, bp) + return nb + + +def _get_combined_plan( + mgrs: list[BlockManager], +) -> list[tuple[BlockPlacement, list[JoinUnit]]]: + plan = [] + + max_len = mgrs[0].shape[0] + + blknos_list = [mgr.blknos for mgr in mgrs] + pairs = libinternals.get_concat_blkno_indexers(blknos_list) + for ind, (blknos, bp) in enumerate(pairs): + # assert bp.is_slice_like + # assert len(bp) > 0 + + units_for_bp = [] + for k, mgr in enumerate(mgrs): + blkno = blknos[k] + + nb = _get_block_for_concat_plan(mgr, bp, blkno, max_len=max_len) + unit = JoinUnit(nb) + units_for_bp.append(unit) + + plan.append((bp, units_for_bp)) + + return plan + + +def _get_block_for_concat_plan( + mgr: BlockManager, bp: BlockPlacement, blkno: int, *, max_len: int +) -> Block: + blk = mgr.blocks[blkno] + # Assertions disabled for performance: + # assert bp.is_slice_like + # assert blkno != -1 + # assert (mgr.blknos[bp] == blkno).all() + + if len(bp) == len(blk.mgr_locs) and ( + blk.mgr_locs.is_slice_like and blk.mgr_locs.as_slice.step == 1 + ): + nb = blk + else: + ax0_blk_indexer = mgr.blklocs[bp.indexer] + + slc = lib.maybe_indices_to_slice(ax0_blk_indexer, max_len) + # TODO: in all extant test cases 2023-04-08 we have a slice here. + # Will this always be the case? + if isinstance(slc, slice): + nb = blk.slice_block_columns(slc) + else: + nb = blk.take_block_columns(slc) + + # assert nb.shape == (len(bp), mgr.shape[1]) + return nb + + +class JoinUnit: + def __init__(self, block: Block) -> None: + self.block = block + + def __repr__(self) -> str: + return f"{type(self).__name__}({repr(self.block)})" + + def _is_valid_na_for(self, dtype: DtypeObj) -> bool: + """ + Check that we are all-NA of a type/dtype that is compatible with this dtype. + Augments `self.is_na` with an additional check of the type of NA values. + """ + if not self.is_na: + return False + + blk = self.block + if blk.dtype.kind == "V": + return True + + if blk.dtype == object: + values = blk.values + return all(is_valid_na_for_dtype(x, dtype) for x in values.ravel(order="K")) + + na_value = blk.fill_value + if na_value is NaT and blk.dtype != dtype: + # e.g. we are dt64 and other is td64 + # fill_values match but we should not cast blk.values to dtype + # TODO: this will need updating if we ever have non-nano dt64/td64 + return False + + if na_value is NA and needs_i8_conversion(dtype): + # FIXME: kludge; test_append_empty_frame_with_timedelta64ns_nat + # e.g. blk.dtype == "Int64" and dtype is td64, we dont want + # to consider these as matching + return False + + # TODO: better to use can_hold_element? + return is_valid_na_for_dtype(na_value, dtype) + + @cache_readonly + def is_na(self) -> bool: + blk = self.block + if blk.dtype.kind == "V": + return True + + if not blk._can_hold_na: + return False + + values = blk.values + if values.size == 0: + # GH#39122 this case will return False once deprecation is enforced + return True + + if isinstance(values.dtype, SparseDtype): + return False + + if values.ndim == 1: + # TODO(EA2D): no need for special case with 2D EAs + val = values[0] + if not is_scalar(val) or not isna(val): + # ideally isna_all would do this short-circuiting + return False + return isna_all(values) + else: + val = values[0][0] + if not is_scalar(val) or not isna(val): + # ideally isna_all would do this short-circuiting + return False + return all(isna_all(row) for row in values) + + @cache_readonly + def is_na_after_size_and_isna_all_deprecation(self) -> bool: + """ + Will self.is_na be True after values.size == 0 deprecation and isna_all + deprecation are enforced? + """ + blk = self.block + if blk.dtype.kind == "V": + return True + return False + + def get_reindexed_values(self, empty_dtype: DtypeObj, upcasted_na) -> ArrayLike: + values: ArrayLike + + if upcasted_na is None and self.block.dtype.kind != "V": + # No upcasting is necessary + return self.block.values + else: + fill_value = upcasted_na + + if self._is_valid_na_for(empty_dtype): + # note: always holds when self.block.dtype.kind == "V" + blk_dtype = self.block.dtype + + if blk_dtype == np.dtype("object"): + # we want to avoid filling with np.nan if we are + # using None; we already know that we are all + # nulls + values = cast(np.ndarray, self.block.values) + if values.size and values[0, 0] is None: + fill_value = None + + return make_na_array(empty_dtype, self.block.shape, fill_value) + + return self.block.values + + +def _concatenate_join_units(join_units: list[JoinUnit], copy: bool) -> ArrayLike: + """ + Concatenate values from several join units along axis=1. + """ + empty_dtype, empty_dtype_future = _get_empty_dtype(join_units) + + has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) + upcasted_na = _dtype_to_na_value(empty_dtype, has_none_blocks) + + to_concat = [ + ju.get_reindexed_values(empty_dtype=empty_dtype, upcasted_na=upcasted_na) + for ju in join_units + ] + + if any(is_1d_only_ea_dtype(t.dtype) for t in to_concat): + # TODO(EA2D): special case not needed if all EAs used HybridBlocks + + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[int, slice]" + to_concat = [ + t + if is_1d_only_ea_dtype(t.dtype) + else t[0, :] # type: ignore[call-overload] + for t in to_concat + ] + concat_values = concat_compat(to_concat, axis=0, ea_compat_axis=True) + concat_values = ensure_block_shape(concat_values, 2) + + else: + concat_values = concat_compat(to_concat, axis=1) + + if empty_dtype != empty_dtype_future: + if empty_dtype == concat_values.dtype: + # GH#39122, GH#40893 + warnings.warn( + "The behavior of DataFrame concatenation with empty or all-NA " + "entries is deprecated. In a future version, this will no longer " + "exclude empty or all-NA columns when determining the result dtypes. " + "To retain the old behavior, exclude the relevant entries before " + "the concat operation.", + FutureWarning, + stacklevel=find_stack_level(), + ) + return concat_values + + +def _dtype_to_na_value(dtype: DtypeObj, has_none_blocks: bool): + """ + Find the NA value to go with this dtype. + """ + if isinstance(dtype, ExtensionDtype): + return dtype.na_value + elif dtype.kind in "mM": + return dtype.type("NaT") + elif dtype.kind in "fc": + return dtype.type("NaN") + elif dtype.kind == "b": + # different from missing.na_value_for_dtype + return None + elif dtype.kind in "iu": + if not has_none_blocks: + # different from missing.na_value_for_dtype + return None + return np.nan + elif dtype.kind == "O": + return np.nan + raise NotImplementedError + + +def _get_empty_dtype(join_units: Sequence[JoinUnit]) -> tuple[DtypeObj, DtypeObj]: + """ + Return dtype and N/A values to use when concatenating specified units. + + Returned N/A value may be None which means there was no casting involved. + + Returns + ------- + dtype + """ + if lib.dtypes_all_equal([ju.block.dtype for ju in join_units]): + empty_dtype = join_units[0].block.dtype + return empty_dtype, empty_dtype + + has_none_blocks = any(unit.block.dtype.kind == "V" for unit in join_units) + + dtypes = [unit.block.dtype for unit in join_units if not unit.is_na] + if not len(dtypes): + dtypes = [ + unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V" + ] + + dtype = find_common_type(dtypes) + if has_none_blocks: + dtype = ensure_dtype_can_hold_na(dtype) + + dtype_future = dtype + if len(dtypes) != len(join_units): + dtypes_future = [ + unit.block.dtype + for unit in join_units + if not unit.is_na_after_size_and_isna_all_deprecation + ] + if not len(dtypes_future): + dtypes_future = [ + unit.block.dtype for unit in join_units if unit.block.dtype.kind != "V" + ] + + if len(dtypes) != len(dtypes_future): + dtype_future = find_common_type(dtypes_future) + if has_none_blocks: + dtype_future = ensure_dtype_can_hold_na(dtype_future) + + return dtype, dtype_future + + +def _is_uniform_join_units(join_units: list[JoinUnit]) -> bool: + """ + Check if the join units consist of blocks of uniform type that can + be concatenated using Block.concat_same_type instead of the generic + _concatenate_join_units (which uses `concat_compat`). + + """ + first = join_units[0].block + if first.dtype.kind == "V": + return False + return ( + # exclude cases where a) ju.block is None or b) we have e.g. Int64+int64 + all(type(ju.block) is type(first) for ju in join_units) + and + # e.g. DatetimeLikeBlock can be dt64 or td64, but these are not uniform + all( + ju.block.dtype == first.dtype + # GH#42092 we only want the dtype_equal check for non-numeric blocks + # (for now, may change but that would need a deprecation) + or ju.block.dtype.kind in "iub" + for ju in join_units + ) + and + # no blocks that would get missing values (can lead to type upcasts) + # unless we're an extension dtype. + all(not ju.is_na or ju.block.is_extension for ju in join_units) + ) diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/construction.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/construction.py new file mode 100644 index 0000000000000000000000000000000000000000..609d2c9a7a285ec23569f9fa06067f0a5b0a00cc --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/construction.py @@ -0,0 +1,1072 @@ +""" +Functions for preparing various inputs passed to the DataFrame or Series +constructors before passing them to a BlockManager. +""" +from __future__ import annotations + +from collections import abc +from typing import ( + TYPE_CHECKING, + Any, +) + +import numpy as np +from numpy import ma + +from pandas._config import using_pyarrow_string_dtype + +from pandas._libs import lib + +from pandas.core.dtypes.astype import astype_is_view +from pandas.core.dtypes.cast import ( + construct_1d_arraylike_from_scalar, + dict_compat, + maybe_cast_to_datetime, + maybe_convert_platform, + maybe_infer_to_datetimelike, +) +from pandas.core.dtypes.common import ( + is_1d_only_ea_dtype, + is_integer_dtype, + is_list_like, + is_named_tuple, + is_object_dtype, +) +from pandas.core.dtypes.dtypes import ExtensionDtype +from pandas.core.dtypes.generic import ( + ABCDataFrame, + ABCSeries, +) + +from pandas.core import ( + algorithms, + common as com, +) +from pandas.core.arrays import ExtensionArray +from pandas.core.arrays.string_ import StringDtype +from pandas.core.construction import ( + array as pd_array, + ensure_wrapped_if_datetimelike, + extract_array, + range_to_ndarray, + sanitize_array, +) +from pandas.core.indexes.api import ( + DatetimeIndex, + Index, + TimedeltaIndex, + default_index, + ensure_index, + get_objs_combined_axis, + union_indexes, +) +from pandas.core.internals.array_manager import ( + ArrayManager, + SingleArrayManager, +) +from pandas.core.internals.blocks import ( + BlockPlacement, + ensure_block_shape, + new_block, + new_block_2d, +) +from pandas.core.internals.managers import ( + BlockManager, + SingleBlockManager, + create_block_manager_from_blocks, + create_block_manager_from_column_arrays, +) + +if TYPE_CHECKING: + from collections.abc import ( + Hashable, + Sequence, + ) + + from pandas._typing import ( + ArrayLike, + DtypeObj, + Manager, + npt, + ) +# --------------------------------------------------------------------- +# BlockManager Interface + + +def arrays_to_mgr( + arrays, + columns: Index, + index, + *, + dtype: DtypeObj | None = None, + verify_integrity: bool = True, + typ: str | None = None, + consolidate: bool = True, +) -> Manager: + """ + Segregate Series based on type and coerce into matrices. + + Needs to handle a lot of exceptional cases. + """ + if verify_integrity: + # figure out the index, if necessary + if index is None: + index = _extract_index(arrays) + else: + index = ensure_index(index) + + # don't force copy because getting jammed in an ndarray anyway + arrays, refs = _homogenize(arrays, index, dtype) + # _homogenize ensures + # - all(len(x) == len(index) for x in arrays) + # - all(x.ndim == 1 for x in arrays) + # - all(isinstance(x, (np.ndarray, ExtensionArray)) for x in arrays) + # - all(type(x) is not NumpyExtensionArray for x in arrays) + + else: + index = ensure_index(index) + arrays = [extract_array(x, extract_numpy=True) for x in arrays] + # with _from_arrays, the passed arrays should never be Series objects + refs = [None] * len(arrays) + + # Reached via DataFrame._from_arrays; we do minimal validation here + for arr in arrays: + if ( + not isinstance(arr, (np.ndarray, ExtensionArray)) + or arr.ndim != 1 + or len(arr) != len(index) + ): + raise ValueError( + "Arrays must be 1-dimensional np.ndarray or ExtensionArray " + "with length matching len(index)" + ) + + columns = ensure_index(columns) + if len(columns) != len(arrays): + raise ValueError("len(arrays) must match len(columns)") + + # from BlockManager perspective + axes = [columns, index] + + if typ == "block": + return create_block_manager_from_column_arrays( + arrays, axes, consolidate=consolidate, refs=refs + ) + elif typ == "array": + return ArrayManager(arrays, [index, columns]) + else: + raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") + + +def rec_array_to_mgr( + data: np.rec.recarray | np.ndarray, + index, + columns, + dtype: DtypeObj | None, + copy: bool, + typ: str, +) -> Manager: + """ + Extract from a masked rec array and create the manager. + """ + # essentially process a record array then fill it + fdata = ma.getdata(data) + if index is None: + index = default_index(len(fdata)) + else: + index = ensure_index(index) + + if columns is not None: + columns = ensure_index(columns) + arrays, arr_columns = to_arrays(fdata, columns) + + # create the manager + + arrays, arr_columns = reorder_arrays(arrays, arr_columns, columns, len(index)) + if columns is None: + columns = arr_columns + + mgr = arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ) + + if copy: + mgr = mgr.copy() + return mgr + + +def mgr_to_mgr(mgr, typ: str, copy: bool = True) -> Manager: + """ + Convert to specific type of Manager. Does not copy if the type is already + correct. Does not guarantee a copy otherwise. `copy` keyword only controls + whether conversion from Block->ArrayManager copies the 1D arrays. + """ + new_mgr: Manager + + if typ == "block": + if isinstance(mgr, BlockManager): + new_mgr = mgr + else: + if mgr.ndim == 2: + new_mgr = arrays_to_mgr( + mgr.arrays, mgr.axes[0], mgr.axes[1], typ="block" + ) + else: + new_mgr = SingleBlockManager.from_array(mgr.arrays[0], mgr.index) + elif typ == "array": + if isinstance(mgr, ArrayManager): + new_mgr = mgr + else: + if mgr.ndim == 2: + arrays = [mgr.iget_values(i) for i in range(len(mgr.axes[0]))] + if copy: + arrays = [arr.copy() for arr in arrays] + new_mgr = ArrayManager(arrays, [mgr.axes[1], mgr.axes[0]]) + else: + array = mgr.internal_values() + if copy: + array = array.copy() + new_mgr = SingleArrayManager([array], [mgr.index]) + else: + raise ValueError(f"'typ' needs to be one of {{'block', 'array'}}, got '{typ}'") + return new_mgr + + +# --------------------------------------------------------------------- +# DataFrame Constructor Interface + + +def ndarray_to_mgr( + values, index, columns, dtype: DtypeObj | None, copy: bool, typ: str +) -> Manager: + # used in DataFrame.__init__ + # input must be a ndarray, list, Series, Index, ExtensionArray + + if isinstance(values, ABCSeries): + if columns is None: + if values.name is not None: + columns = Index([values.name]) + if index is None: + index = values.index + else: + values = values.reindex(index) + + # zero len case (GH #2234) + if not len(values) and columns is not None and len(columns): + values = np.empty((0, 1), dtype=object) + + # if the array preparation does a copy -> avoid this for ArrayManager, + # since the copy is done on conversion to 1D arrays + copy_on_sanitize = False if typ == "array" else copy + + vdtype = getattr(values, "dtype", None) + refs = None + if is_1d_only_ea_dtype(vdtype) or is_1d_only_ea_dtype(dtype): + # GH#19157 + + if isinstance(values, (np.ndarray, ExtensionArray)) and values.ndim > 1: + # GH#12513 a EA dtype passed with a 2D array, split into + # multiple EAs that view the values + # error: No overload variant of "__getitem__" of "ExtensionArray" + # matches argument type "Tuple[slice, int]" + values = [ + values[:, n] # type: ignore[call-overload] + for n in range(values.shape[1]) + ] + else: + values = [values] + + if columns is None: + columns = Index(range(len(values))) + else: + columns = ensure_index(columns) + + return arrays_to_mgr(values, columns, index, dtype=dtype, typ=typ) + + elif isinstance(vdtype, ExtensionDtype): + # i.e. Datetime64TZ, PeriodDtype; cases with is_1d_only_ea_dtype(vdtype) + # are already caught above + values = extract_array(values, extract_numpy=True) + if copy: + values = values.copy() + if values.ndim == 1: + values = values.reshape(-1, 1) + + elif isinstance(values, (ABCSeries, Index)): + if not copy_on_sanitize and ( + dtype is None or astype_is_view(values.dtype, dtype) + ): + refs = values._references + + if copy_on_sanitize: + values = values._values.copy() + else: + values = values._values + + values = _ensure_2d(values) + + elif isinstance(values, (np.ndarray, ExtensionArray)): + # drop subclass info + _copy = ( + copy_on_sanitize + if (dtype is None or astype_is_view(values.dtype, dtype)) + else False + ) + values = np.array(values, copy=_copy) + values = _ensure_2d(values) + + else: + # by definition an array here + # the dtypes will be coerced to a single dtype + values = _prep_ndarraylike(values, copy=copy_on_sanitize) + + if dtype is not None and values.dtype != dtype: + # GH#40110 see similar check inside sanitize_array + values = sanitize_array( + values, + None, + dtype=dtype, + copy=copy_on_sanitize, + allow_2d=True, + ) + + # _prep_ndarraylike ensures that values.ndim == 2 at this point + index, columns = _get_axes( + values.shape[0], values.shape[1], index=index, columns=columns + ) + + _check_values_indices_shape_match(values, index, columns) + + if typ == "array": + if issubclass(values.dtype.type, str): + values = np.array(values, dtype=object) + + if dtype is None and is_object_dtype(values.dtype): + arrays = [ + ensure_wrapped_if_datetimelike( + maybe_infer_to_datetimelike(values[:, i]) + ) + for i in range(values.shape[1]) + ] + else: + if lib.is_np_dtype(values.dtype, "mM"): + values = ensure_wrapped_if_datetimelike(values) + arrays = [values[:, i] for i in range(values.shape[1])] + + if copy: + arrays = [arr.copy() for arr in arrays] + + return ArrayManager(arrays, [index, columns], verify_integrity=False) + + values = values.T + + # if we don't have a dtype specified, then try to convert objects + # on the entire block; this is to convert if we have datetimelike's + # embedded in an object type + if dtype is None and is_object_dtype(values.dtype): + obj_columns = list(values) + maybe_datetime = [maybe_infer_to_datetimelike(x) for x in obj_columns] + # don't convert (and copy) the objects if no type inference occurs + if any(x is not y for x, y in zip(obj_columns, maybe_datetime)): + dvals_list = [ensure_block_shape(dval, 2) for dval in maybe_datetime] + block_values = [ + new_block_2d(dvals_list[n], placement=BlockPlacement(n)) + for n in range(len(dvals_list)) + ] + else: + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp, refs=refs) + block_values = [nb] + elif dtype is None and values.dtype.kind == "U" and using_pyarrow_string_dtype(): + dtype = StringDtype(storage="pyarrow_numpy") + + obj_columns = list(values) + block_values = [ + new_block( + dtype.construct_array_type()._from_sequence(data, dtype=dtype), + BlockPlacement(slice(i, i + 1)), + ndim=2, + ) + for i, data in enumerate(obj_columns) + ] + + else: + bp = BlockPlacement(slice(len(columns))) + nb = new_block_2d(values, placement=bp, refs=refs) + block_values = [nb] + + if len(columns) == 0: + # TODO: check len(values) == 0? + block_values = [] + + return create_block_manager_from_blocks( + block_values, [columns, index], verify_integrity=False + ) + + +def _check_values_indices_shape_match( + values: np.ndarray, index: Index, columns: Index +) -> None: + """ + Check that the shape implied by our axes matches the actual shape of the + data. + """ + if values.shape[1] != len(columns) or values.shape[0] != len(index): + # Could let this raise in Block constructor, but we get a more + # helpful exception message this way. + if values.shape[0] == 0 < len(index): + raise ValueError("Empty data passed with indices specified.") + + passed = values.shape + implied = (len(index), len(columns)) + raise ValueError(f"Shape of passed values is {passed}, indices imply {implied}") + + +def dict_to_mgr( + data: dict, + index, + columns, + *, + dtype: DtypeObj | None = None, + typ: str = "block", + copy: bool = True, +) -> Manager: + """ + Segregate Series based on type and coerce into matrices. + Needs to handle a lot of exceptional cases. + + Used in DataFrame.__init__ + """ + arrays: Sequence[Any] | Series + + if columns is not None: + from pandas.core.series import Series + + arrays = Series(data, index=columns, dtype=object) + missing = arrays.isna() + if index is None: + # GH10856 + # raise ValueError if only scalars in dict + index = _extract_index(arrays[~missing]) + else: + index = ensure_index(index) + + # no obvious "empty" int column + if missing.any() and not is_integer_dtype(dtype): + nan_dtype: DtypeObj + + if dtype is not None: + # calling sanitize_array ensures we don't mix-and-match + # NA dtypes + midxs = missing.values.nonzero()[0] + for i in midxs: + arr = sanitize_array(arrays.iat[i], index, dtype=dtype) + arrays.iat[i] = arr + else: + # GH#1783 + nan_dtype = np.dtype("object") + val = construct_1d_arraylike_from_scalar(np.nan, len(index), nan_dtype) + nmissing = missing.sum() + if copy: + rhs = [val] * nmissing + else: + # GH#45369 + rhs = [val.copy() for _ in range(nmissing)] + arrays.loc[missing] = rhs + + arrays = list(arrays) + columns = ensure_index(columns) + + else: + keys = list(data.keys()) + columns = Index(keys) if keys else default_index(0) + arrays = [com.maybe_iterable_to_list(data[k]) for k in keys] + + if copy: + if typ == "block": + # We only need to copy arrays that will not get consolidated, i.e. + # only EA arrays + arrays = [ + x.copy() + if isinstance(x, ExtensionArray) + else x.copy(deep=True) + if ( + isinstance(x, Index) + or isinstance(x, ABCSeries) + and is_1d_only_ea_dtype(x.dtype) + ) + else x + for x in arrays + ] + else: + # dtype check to exclude e.g. range objects, scalars + arrays = [x.copy() if hasattr(x, "dtype") else x for x in arrays] + + return arrays_to_mgr(arrays, columns, index, dtype=dtype, typ=typ, consolidate=copy) + + +def nested_data_to_arrays( + data: Sequence, + columns: Index | None, + index: Index | None, + dtype: DtypeObj | None, +) -> tuple[list[ArrayLike], Index, Index]: + """ + Convert a single sequence of arrays to multiple arrays. + """ + # By the time we get here we have already checked treat_as_nested(data) + + if is_named_tuple(data[0]) and columns is None: + columns = ensure_index(data[0]._fields) + + arrays, columns = to_arrays(data, columns, dtype=dtype) + columns = ensure_index(columns) + + if index is None: + if isinstance(data[0], ABCSeries): + index = _get_names_from_index(data) + else: + index = default_index(len(data)) + + return arrays, columns, index + + +def treat_as_nested(data) -> bool: + """ + Check if we should use nested_data_to_arrays. + """ + return ( + len(data) > 0 + and is_list_like(data[0]) + and getattr(data[0], "ndim", 1) == 1 + and not (isinstance(data, ExtensionArray) and data.ndim == 2) + ) + + +# --------------------------------------------------------------------- + + +def _prep_ndarraylike(values, copy: bool = True) -> np.ndarray: + # values is specifically _not_ ndarray, EA, Index, or Series + # We only get here with `not treat_as_nested(values)` + + if len(values) == 0: + # TODO: check for length-zero range, in which case return int64 dtype? + # TODO: reuse anything in try_cast? + return np.empty((0, 0), dtype=object) + elif isinstance(values, range): + arr = range_to_ndarray(values) + return arr[..., np.newaxis] + + def convert(v): + if not is_list_like(v) or isinstance(v, ABCDataFrame): + return v + + v = extract_array(v, extract_numpy=True) + res = maybe_convert_platform(v) + # We don't do maybe_infer_to_datetimelike here bc we will end up doing + # it column-by-column in ndarray_to_mgr + return res + + # we could have a 1-dim or 2-dim list here + # this is equiv of np.asarray, but does object conversion + # and platform dtype preservation + # does not convert e.g. [1, "a", True] to ["1", "a", "True"] like + # np.asarray would + if is_list_like(values[0]): + values = np.array([convert(v) for v in values]) + elif isinstance(values[0], np.ndarray) and values[0].ndim == 0: + # GH#21861 see test_constructor_list_of_lists + values = np.array([convert(v) for v in values]) + else: + values = convert(values) + + return _ensure_2d(values) + + +def _ensure_2d(values: np.ndarray) -> np.ndarray: + """ + Reshape 1D values, raise on anything else other than 2D. + """ + if values.ndim == 1: + values = values.reshape((values.shape[0], 1)) + elif values.ndim != 2: + raise ValueError(f"Must pass 2-d input. shape={values.shape}") + return values + + +def _homogenize( + data, index: Index, dtype: DtypeObj | None +) -> tuple[list[ArrayLike], list[Any]]: + oindex = None + homogenized = [] + # if the original array-like in `data` is a Series, keep track of this Series' refs + refs: list[Any] = [] + + for val in data: + if isinstance(val, (ABCSeries, Index)): + if dtype is not None: + val = val.astype(dtype, copy=False) + if isinstance(val, ABCSeries) and val.index is not index: + # Forces alignment. No need to copy data since we + # are putting it into an ndarray later + val = val.reindex(index, copy=False) + refs.append(val._references) + val = val._values + else: + if isinstance(val, dict): + # GH#41785 this _should_ be equivalent to (but faster than) + # val = Series(val, index=index)._values + if oindex is None: + oindex = index.astype("O") + + if isinstance(index, (DatetimeIndex, TimedeltaIndex)): + # see test_constructor_dict_datetime64_index + val = dict_compat(val) + else: + # see test_constructor_subclass_dict + val = dict(val) + val = lib.fast_multiget(val, oindex._values, default=np.nan) + + val = sanitize_array(val, index, dtype=dtype, copy=False) + com.require_length_match(val, index) + refs.append(None) + + homogenized.append(val) + + return homogenized, refs + + +def _extract_index(data) -> Index: + """ + Try to infer an Index from the passed data, raise ValueError on failure. + """ + index: Index + if len(data) == 0: + return default_index(0) + + raw_lengths = [] + indexes: list[list[Hashable] | Index] = [] + + have_raw_arrays = False + have_series = False + have_dicts = False + + for val in data: + if isinstance(val, ABCSeries): + have_series = True + indexes.append(val.index) + elif isinstance(val, dict): + have_dicts = True + indexes.append(list(val.keys())) + elif is_list_like(val) and getattr(val, "ndim", 1) == 1: + have_raw_arrays = True + raw_lengths.append(len(val)) + elif isinstance(val, np.ndarray) and val.ndim > 1: + raise ValueError("Per-column arrays must each be 1-dimensional") + + if not indexes and not raw_lengths: + raise ValueError("If using all scalar values, you must pass an index") + + if have_series: + index = union_indexes(indexes) + elif have_dicts: + index = union_indexes(indexes, sort=False) + + if have_raw_arrays: + lengths = list(set(raw_lengths)) + if len(lengths) > 1: + raise ValueError("All arrays must be of the same length") + + if have_dicts: + raise ValueError( + "Mixing dicts with non-Series may lead to ambiguous ordering." + ) + + if have_series: + if lengths[0] != len(index): + msg = ( + f"array length {lengths[0]} does not match index " + f"length {len(index)}" + ) + raise ValueError(msg) + else: + index = default_index(lengths[0]) + + return ensure_index(index) + + +def reorder_arrays( + arrays: list[ArrayLike], arr_columns: Index, columns: Index | None, length: int +) -> tuple[list[ArrayLike], Index]: + """ + Pre-emptively (cheaply) reindex arrays with new columns. + """ + # reorder according to the columns + if columns is not None: + if not columns.equals(arr_columns): + # if they are equal, there is nothing to do + new_arrays: list[ArrayLike] = [] + indexer = arr_columns.get_indexer(columns) + for i, k in enumerate(indexer): + if k == -1: + # by convention default is all-NaN object dtype + arr = np.empty(length, dtype=object) + arr.fill(np.nan) + else: + arr = arrays[k] + new_arrays.append(arr) + + arrays = new_arrays + arr_columns = columns + + return arrays, arr_columns + + +def _get_names_from_index(data) -> Index: + has_some_name = any(getattr(s, "name", None) is not None for s in data) + if not has_some_name: + return default_index(len(data)) + + index: list[Hashable] = list(range(len(data))) + count = 0 + for i, s in enumerate(data): + n = getattr(s, "name", None) + if n is not None: + index[i] = n + else: + index[i] = f"Unnamed {count}" + count += 1 + + return Index(index) + + +def _get_axes( + N: int, K: int, index: Index | None, columns: Index | None +) -> tuple[Index, Index]: + # helper to create the axes as indexes + # return axes or defaults + + if index is None: + index = default_index(N) + else: + index = ensure_index(index) + + if columns is None: + columns = default_index(K) + else: + columns = ensure_index(columns) + return index, columns + + +def dataclasses_to_dicts(data): + """ + Converts a list of dataclass instances to a list of dictionaries. + + Parameters + ---------- + data : List[Type[dataclass]] + + Returns + -------- + list_dict : List[dict] + + Examples + -------- + >>> from dataclasses import dataclass + >>> @dataclass + ... class Point: + ... x: int + ... y: int + + >>> dataclasses_to_dicts([Point(1, 2), Point(2, 3)]) + [{'x': 1, 'y': 2}, {'x': 2, 'y': 3}] + + """ + from dataclasses import asdict + + return list(map(asdict, data)) + + +# --------------------------------------------------------------------- +# Conversion of Inputs to Arrays + + +def to_arrays( + data, columns: Index | None, dtype: DtypeObj | None = None +) -> tuple[list[ArrayLike], Index]: + """ + Return list of arrays, columns. + + Returns + ------- + list[ArrayLike] + These will become columns in a DataFrame. + Index + This will become frame.columns. + + Notes + ----- + Ensures that len(result_arrays) == len(result_index). + """ + + if not len(data): + if isinstance(data, np.ndarray): + if data.dtype.names is not None: + # i.e. numpy structured array + columns = ensure_index(data.dtype.names) + arrays = [data[name] for name in columns] + + if len(data) == 0: + # GH#42456 the indexing above results in list of 2D ndarrays + # TODO: is that an issue with numpy? + for i, arr in enumerate(arrays): + if arr.ndim == 2: + arrays[i] = arr[:, 0] + + return arrays, columns + return [], ensure_index([]) + + elif isinstance(data, np.ndarray) and data.dtype.names is not None: + # e.g. recarray + columns = Index(list(data.dtype.names)) + arrays = [data[k] for k in columns] + return arrays, columns + + if isinstance(data[0], (list, tuple)): + arr = _list_to_arrays(data) + elif isinstance(data[0], abc.Mapping): + arr, columns = _list_of_dict_to_arrays(data, columns) + elif isinstance(data[0], ABCSeries): + arr, columns = _list_of_series_to_arrays(data, columns) + else: + # last ditch effort + data = [tuple(x) for x in data] + arr = _list_to_arrays(data) + + content, columns = _finalize_columns_and_data(arr, columns, dtype) + return content, columns + + +def _list_to_arrays(data: list[tuple | list]) -> np.ndarray: + # Returned np.ndarray has ndim = 2 + # Note: we already check len(data) > 0 before getting hre + if isinstance(data[0], tuple): + content = lib.to_object_array_tuples(data) + else: + # list of lists + content = lib.to_object_array(data) + return content + + +def _list_of_series_to_arrays( + data: list, + columns: Index | None, +) -> tuple[np.ndarray, Index]: + # returned np.ndarray has ndim == 2 + + if columns is None: + # We know pass_data is non-empty because data[0] is a Series + pass_data = [x for x in data if isinstance(x, (ABCSeries, ABCDataFrame))] + columns = get_objs_combined_axis(pass_data, sort=False) + + indexer_cache: dict[int, np.ndarray] = {} + + aligned_values = [] + for s in data: + index = getattr(s, "index", None) + if index is None: + index = default_index(len(s)) + + if id(index) in indexer_cache: + indexer = indexer_cache[id(index)] + else: + indexer = indexer_cache[id(index)] = index.get_indexer(columns) + + values = extract_array(s, extract_numpy=True) + aligned_values.append(algorithms.take_nd(values, indexer)) + + content = np.vstack(aligned_values) + return content, columns + + +def _list_of_dict_to_arrays( + data: list[dict], + columns: Index | None, +) -> tuple[np.ndarray, Index]: + """ + Convert list of dicts to numpy arrays + + if `columns` is not passed, column names are inferred from the records + - for OrderedDict and dicts, the column names match + the key insertion-order from the first record to the last. + - For other kinds of dict-likes, the keys are lexically sorted. + + Parameters + ---------- + data : iterable + collection of records (OrderedDict, dict) + columns: iterables or None + + Returns + ------- + content : np.ndarray[object, ndim=2] + columns : Index + """ + if columns is None: + gen = (list(x.keys()) for x in data) + sort = not any(isinstance(d, dict) for d in data) + pre_cols = lib.fast_unique_multiple_list_gen(gen, sort=sort) + columns = ensure_index(pre_cols) + + # assure that they are of the base dict class and not of derived + # classes + data = [d if type(d) is dict else dict(d) for d in data] # noqa: E721 + + content = lib.dicts_to_array(data, list(columns)) + return content, columns + + +def _finalize_columns_and_data( + content: np.ndarray, # ndim == 2 + columns: Index | None, + dtype: DtypeObj | None, +) -> tuple[list[ArrayLike], Index]: + """ + Ensure we have valid columns, cast object dtypes if possible. + """ + contents = list(content.T) + + try: + columns = _validate_or_indexify_columns(contents, columns) + except AssertionError as err: + # GH#26429 do not raise user-facing AssertionError + raise ValueError(err) from err + + if len(contents) and contents[0].dtype == np.object_: + contents = convert_object_array(contents, dtype=dtype) + + return contents, columns + + +def _validate_or_indexify_columns( + content: list[np.ndarray], columns: Index | None +) -> Index: + """ + If columns is None, make numbers as column names; Otherwise, validate that + columns have valid length. + + Parameters + ---------- + content : list of np.ndarrays + columns : Index or None + + Returns + ------- + Index + If columns is None, assign positional column index value as columns. + + Raises + ------ + 1. AssertionError when content is not composed of list of lists, and if + length of columns is not equal to length of content. + 2. ValueError when content is list of lists, but length of each sub-list + is not equal + 3. ValueError when content is list of lists, but length of sub-list is + not equal to length of content + """ + if columns is None: + columns = default_index(len(content)) + else: + # Add mask for data which is composed of list of lists + is_mi_list = isinstance(columns, list) and all( + isinstance(col, list) for col in columns + ) + + if not is_mi_list and len(columns) != len(content): # pragma: no cover + # caller's responsibility to check for this... + raise AssertionError( + f"{len(columns)} columns passed, passed data had " + f"{len(content)} columns" + ) + if is_mi_list: + # check if nested list column, length of each sub-list should be equal + if len({len(col) for col in columns}) > 1: + raise ValueError( + "Length of columns passed for MultiIndex columns is different" + ) + + # if columns is not empty and length of sublist is not equal to content + if columns and len(columns[0]) != len(content): + raise ValueError( + f"{len(columns[0])} columns passed, passed data had " + f"{len(content)} columns" + ) + return columns + + +def convert_object_array( + content: list[npt.NDArray[np.object_]], + dtype: DtypeObj | None, + dtype_backend: str = "numpy", + coerce_float: bool = False, +) -> list[ArrayLike]: + """ + Internal function to convert object array. + + Parameters + ---------- + content: List[np.ndarray] + dtype: np.dtype or ExtensionDtype + dtype_backend: Controls if nullable/pyarrow dtypes are returned. + coerce_float: Cast floats that are integers to int. + + Returns + ------- + List[ArrayLike] + """ + # provide soft conversion of object dtypes + + def convert(arr): + if dtype != np.dtype("O"): + arr = lib.maybe_convert_objects( + arr, + try_float=coerce_float, + convert_to_nullable_dtype=dtype_backend != "numpy", + ) + # Notes on cases that get here 2023-02-15 + # 1) we DO get here when arr is all Timestamps and dtype=None + # 2) disabling this doesn't break the world, so this must be + # getting caught at a higher level + # 3) passing convert_non_numeric to maybe_convert_objects get this right + # 4) convert_non_numeric? + + if dtype is None: + if arr.dtype == np.dtype("O"): + # i.e. maybe_convert_objects didn't convert + arr = maybe_infer_to_datetimelike(arr) + if dtype_backend != "numpy" and arr.dtype == np.dtype("O"): + new_dtype = StringDtype() + arr_cls = new_dtype.construct_array_type() + arr = arr_cls._from_sequence(arr, dtype=new_dtype) + elif dtype_backend != "numpy" and isinstance(arr, np.ndarray): + if arr.dtype.kind in "iufb": + arr = pd_array(arr, copy=False) + + elif isinstance(dtype, ExtensionDtype): + # TODO: test(s) that get here + # TODO: try to de-duplicate this convert function with + # core.construction functions + cls = dtype.construct_array_type() + arr = cls._from_sequence(arr, dtype=dtype, copy=False) + elif dtype.kind in "mM": + # This restriction is harmless bc these are the only cases + # where maybe_cast_to_datetime is not a no-op. + # Here we know: + # 1) dtype.kind in "mM" and + # 2) arr is either object or numeric dtype + arr = maybe_cast_to_datetime(arr, dtype) + + return arr + + arrays = [convert(arr) for arr in content] + + return arrays diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/ops.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/ops.py new file mode 100644 index 0000000000000000000000000000000000000000..cf9466c0bdf0bf4df623e2d819faf3ea7b36c878 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/internals/ops.py @@ -0,0 +1,154 @@ +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + NamedTuple, +) + +from pandas.core.dtypes.common import is_1d_only_ea_dtype + +if TYPE_CHECKING: + from collections.abc import Iterator + + from pandas._libs.internals import BlockPlacement + from pandas._typing import ArrayLike + + from pandas.core.internals.blocks import Block + from pandas.core.internals.managers import BlockManager + + +class BlockPairInfo(NamedTuple): + lvals: ArrayLike + rvals: ArrayLike + locs: BlockPlacement + left_ea: bool + right_ea: bool + rblk: Block + + +def _iter_block_pairs( + left: BlockManager, right: BlockManager +) -> Iterator[BlockPairInfo]: + # At this point we have already checked the parent DataFrames for + # assert rframe._indexed_same(lframe) + + for blk in left.blocks: + locs = blk.mgr_locs + blk_vals = blk.values + + left_ea = blk_vals.ndim == 1 + + rblks = right._slice_take_blocks_ax0(locs.indexer, only_slice=True) + + # Assertions are disabled for performance, but should hold: + # if left_ea: + # assert len(locs) == 1, locs + # assert len(rblks) == 1, rblks + # assert rblks[0].shape[0] == 1, rblks[0].shape + + for rblk in rblks: + right_ea = rblk.values.ndim == 1 + + lvals, rvals = _get_same_shape_values(blk, rblk, left_ea, right_ea) + info = BlockPairInfo(lvals, rvals, locs, left_ea, right_ea, rblk) + yield info + + +def operate_blockwise( + left: BlockManager, right: BlockManager, array_op +) -> BlockManager: + # At this point we have already checked the parent DataFrames for + # assert rframe._indexed_same(lframe) + + res_blks: list[Block] = [] + for lvals, rvals, locs, left_ea, right_ea, rblk in _iter_block_pairs(left, right): + res_values = array_op(lvals, rvals) + if ( + left_ea + and not right_ea + and hasattr(res_values, "reshape") + and not is_1d_only_ea_dtype(res_values.dtype) + ): + res_values = res_values.reshape(1, -1) + nbs = rblk._split_op_result(res_values) + + # Assertions are disabled for performance, but should hold: + # if right_ea or left_ea: + # assert len(nbs) == 1 + # else: + # assert res_values.shape == lvals.shape, (res_values.shape, lvals.shape) + + _reset_block_mgr_locs(nbs, locs) + + res_blks.extend(nbs) + + # Assertions are disabled for performance, but should hold: + # slocs = {y for nb in res_blks for y in nb.mgr_locs.as_array} + # nlocs = sum(len(nb.mgr_locs.as_array) for nb in res_blks) + # assert nlocs == len(left.items), (nlocs, len(left.items)) + # assert len(slocs) == nlocs, (len(slocs), nlocs) + # assert slocs == set(range(nlocs)), slocs + + new_mgr = type(right)(tuple(res_blks), axes=right.axes, verify_integrity=False) + return new_mgr + + +def _reset_block_mgr_locs(nbs: list[Block], locs) -> None: + """ + Reset mgr_locs to correspond to our original DataFrame. + """ + for nb in nbs: + nblocs = locs[nb.mgr_locs.indexer] + nb.mgr_locs = nblocs + # Assertions are disabled for performance, but should hold: + # assert len(nblocs) == nb.shape[0], (len(nblocs), nb.shape) + # assert all(x in locs.as_array for x in nb.mgr_locs.as_array) + + +def _get_same_shape_values( + lblk: Block, rblk: Block, left_ea: bool, right_ea: bool +) -> tuple[ArrayLike, ArrayLike]: + """ + Slice lblk.values to align with rblk. Squeeze if we have EAs. + """ + lvals = lblk.values + rvals = rblk.values + + # Require that the indexing into lvals be slice-like + assert rblk.mgr_locs.is_slice_like, rblk.mgr_locs + + # TODO(EA2D): with 2D EAs only this first clause would be needed + if not (left_ea or right_ea): + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[Union[ndarray, slice], slice]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload] + assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) + elif left_ea and right_ea: + assert lvals.shape == rvals.shape, (lvals.shape, rvals.shape) + elif right_ea: + # lvals are 2D, rvals are 1D + + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[Union[ndarray, slice], slice]" + lvals = lvals[rblk.mgr_locs.indexer, :] # type: ignore[call-overload] + assert lvals.shape[0] == 1, lvals.shape + lvals = lvals[0, :] + else: + # lvals are 1D, rvals are 2D + assert rvals.shape[0] == 1, rvals.shape + # error: No overload variant of "__getitem__" of "ExtensionArray" matches + # argument type "Tuple[int, slice]" + rvals = rvals[0, :] # type: ignore[call-overload] + + return lvals, rvals + + +def blockwise_all(left: BlockManager, right: BlockManager, op) -> bool: + """ + Blockwise `all` reduction. + """ + for info in _iter_block_pairs(left, right): + res = op(info.lvals, info.rvals) + if not res: + return False + return True diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__init__.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..ae889a7fdbc24935c0884e8bbcdf56bde8946460 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__init__.py @@ -0,0 +1,93 @@ +""" +Arithmetic operations for PandasObjects + +This is not a public API. +""" +from __future__ import annotations + +from pandas.core.ops.array_ops import ( + arithmetic_op, + comp_method_OBJECT_ARRAY, + comparison_op, + fill_binop, + get_array_op, + logical_op, + maybe_prepare_scalar_for_op, +) +from pandas.core.ops.common import ( + get_op_result_name, + unpack_zerodim_and_defer, +) +from pandas.core.ops.docstrings import make_flex_doc +from pandas.core.ops.invalid import invalid_comparison +from pandas.core.ops.mask_ops import ( + kleene_and, + kleene_or, + kleene_xor, +) +from pandas.core.roperator import ( + radd, + rand_, + rdiv, + rdivmod, + rfloordiv, + rmod, + rmul, + ror_, + rpow, + rsub, + rtruediv, + rxor, +) + +# ----------------------------------------------------------------------------- +# constants +ARITHMETIC_BINOPS: set[str] = { + "add", + "sub", + "mul", + "pow", + "mod", + "floordiv", + "truediv", + "divmod", + "radd", + "rsub", + "rmul", + "rpow", + "rmod", + "rfloordiv", + "rtruediv", + "rdivmod", +} + + +__all__ = [ + "ARITHMETIC_BINOPS", + "arithmetic_op", + "comparison_op", + "comp_method_OBJECT_ARRAY", + "invalid_comparison", + "fill_binop", + "kleene_and", + "kleene_or", + "kleene_xor", + "logical_op", + "make_flex_doc", + "radd", + "rand_", + "rdiv", + "rdivmod", + "rfloordiv", + "rmod", + "rmul", + "ror_", + "rpow", + "rsub", + "rtruediv", + "rxor", + "unpack_zerodim_and_defer", + "get_op_result_name", + "maybe_prepare_scalar_for_op", + "get_array_op", +] diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9bb442045d9bff54c601cf9fc3bd9b4c9e8cb4a8 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__pycache__/array_ops.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..550371c3a0582a09aa55178ee2f45987a191a929 Binary files /dev/null and b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/__pycache__/common.cpython-310.pyc differ diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/array_ops.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/array_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..4b762a359d321ea61660e78ec63d392f8436939d --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/array_ops.py @@ -0,0 +1,604 @@ +""" +Functions for arithmetic and comparison operations on NumPy arrays and +ExtensionArrays. +""" +from __future__ import annotations + +import datetime +from functools import partial +import operator +from typing import ( + TYPE_CHECKING, + Any, +) +import warnings + +import numpy as np + +from pandas._libs import ( + NaT, + Timedelta, + Timestamp, + lib, + ops as libops, +) +from pandas._libs.tslibs import ( + BaseOffset, + get_supported_dtype, + is_supported_dtype, + is_unitless, +) +from pandas.util._exceptions import find_stack_level + +from pandas.core.dtypes.cast import ( + construct_1d_object_array_from_listlike, + find_common_type, +) +from pandas.core.dtypes.common import ( + ensure_object, + is_bool_dtype, + is_list_like, + is_numeric_v_string_like, + is_object_dtype, + is_scalar, +) +from pandas.core.dtypes.generic import ( + ABCExtensionArray, + ABCIndex, + ABCSeries, +) +from pandas.core.dtypes.missing import ( + isna, + notna, +) + +from pandas.core import roperator +from pandas.core.computation import expressions +from pandas.core.construction import ensure_wrapped_if_datetimelike +from pandas.core.ops import missing +from pandas.core.ops.dispatch import should_extension_dispatch +from pandas.core.ops.invalid import invalid_comparison + +if TYPE_CHECKING: + from pandas._typing import ( + ArrayLike, + Shape, + ) + +# ----------------------------------------------------------------------------- +# Masking NA values and fallbacks for operations numpy does not support + + +def fill_binop(left, right, fill_value): + """ + If a non-None fill_value is given, replace null entries in left and right + with this value, but only in positions where _one_ of left/right is null, + not both. + + Parameters + ---------- + left : array-like + right : array-like + fill_value : object + + Returns + ------- + left : array-like + right : array-like + + Notes + ----- + Makes copies if fill_value is not None and NAs are present. + """ + if fill_value is not None: + left_mask = isna(left) + right_mask = isna(right) + + # one but not both + mask = left_mask ^ right_mask + + if left_mask.any(): + # Avoid making a copy if we can + left = left.copy() + left[left_mask & mask] = fill_value + + if right_mask.any(): + # Avoid making a copy if we can + right = right.copy() + right[right_mask & mask] = fill_value + + return left, right + + +def comp_method_OBJECT_ARRAY(op, x, y): + if isinstance(y, list): + # e.g. test_tuple_categories + y = construct_1d_object_array_from_listlike(y) + + if isinstance(y, (np.ndarray, ABCSeries, ABCIndex)): + if not is_object_dtype(y.dtype): + y = y.astype(np.object_) + + if isinstance(y, (ABCSeries, ABCIndex)): + y = y._values + + if x.shape != y.shape: + raise ValueError("Shapes must match", x.shape, y.shape) + result = libops.vec_compare(x.ravel(), y.ravel(), op) + else: + result = libops.scalar_compare(x.ravel(), y, op) + return result.reshape(x.shape) + + +def _masked_arith_op(x: np.ndarray, y, op): + """ + If the given arithmetic operation fails, attempt it again on + only the non-null elements of the input array(s). + + Parameters + ---------- + x : np.ndarray + y : np.ndarray, Series, Index + op : binary operator + """ + # For Series `x` is 1D so ravel() is a no-op; calling it anyway makes + # the logic valid for both Series and DataFrame ops. + xrav = x.ravel() + + if isinstance(y, np.ndarray): + dtype = find_common_type([x.dtype, y.dtype]) + result = np.empty(x.size, dtype=dtype) + + if len(x) != len(y): + raise ValueError(x.shape, y.shape) + ymask = notna(y) + + # NB: ravel() is only safe since y is ndarray; for e.g. PeriodIndex + # we would get int64 dtype, see GH#19956 + yrav = y.ravel() + mask = notna(xrav) & ymask.ravel() + + # See GH#5284, GH#5035, GH#19448 for historical reference + if mask.any(): + result[mask] = op(xrav[mask], yrav[mask]) + + else: + if not is_scalar(y): + raise TypeError( + f"Cannot broadcast np.ndarray with operand of type { type(y) }" + ) + + # mask is only meaningful for x + result = np.empty(x.size, dtype=x.dtype) + mask = notna(xrav) + + # 1 ** np.nan is 1. So we have to unmask those. + if op is pow: + mask = np.where(x == 1, False, mask) + elif op is roperator.rpow: + mask = np.where(y == 1, False, mask) + + if mask.any(): + result[mask] = op(xrav[mask], y) + + np.putmask(result, ~mask, np.nan) + result = result.reshape(x.shape) # 2D compat + return result + + +def _na_arithmetic_op(left: np.ndarray, right, op, is_cmp: bool = False): + """ + Return the result of evaluating op on the passed in values. + + If native types are not compatible, try coercion to object dtype. + + Parameters + ---------- + left : np.ndarray + right : np.ndarray or scalar + Excludes DataFrame, Series, Index, ExtensionArray. + is_cmp : bool, default False + If this a comparison operation. + + Returns + ------- + array-like + + Raises + ------ + TypeError : invalid operation + """ + if isinstance(right, str): + # can never use numexpr + func = op + else: + func = partial(expressions.evaluate, op) + + try: + result = func(left, right) + except TypeError: + if not is_cmp and ( + left.dtype == object or getattr(right, "dtype", None) == object + ): + # For object dtype, fallback to a masked operation (only operating + # on the non-missing values) + # Don't do this for comparisons, as that will handle complex numbers + # incorrectly, see GH#32047 + result = _masked_arith_op(left, right, op) + else: + raise + + if is_cmp and (is_scalar(result) or result is NotImplemented): + # numpy returned a scalar instead of operating element-wise + # e.g. numeric array vs str + # TODO: can remove this after dropping some future numpy version? + return invalid_comparison(left, right, op) + + return missing.dispatch_fill_zeros(op, left, right, result) + + +def arithmetic_op(left: ArrayLike, right: Any, op): + """ + Evaluate an arithmetic operation `+`, `-`, `*`, `/`, `//`, `%`, `**`, ... + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame or Index. Series is *not* excluded. + op : {operator.add, operator.sub, ...} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + Or a 2-tuple of these in the case of divmod or rdivmod. + """ + # NB: We assume that extract_array and ensure_wrapped_if_datetimelike + # have already been called on `left` and `right`, + # and `maybe_prepare_scalar_for_op` has already been called on `right` + # We need to special-case datetime64/timedelta64 dtypes (e.g. because numpy + # casts integer dtypes to timedelta64 when operating with timedelta64 - GH#22390) + + if ( + should_extension_dispatch(left, right) + or isinstance(right, (Timedelta, BaseOffset, Timestamp)) + or right is NaT + ): + # Timedelta/Timestamp and other custom scalars are included in the check + # because numexpr will fail on it, see GH#31457 + res_values = op(left, right) + else: + # TODO we should handle EAs consistently and move this check before the if/else + # (https://github.com/pandas-dev/pandas/issues/41165) + # error: Argument 2 to "_bool_arith_check" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + _bool_arith_check(op, left, right) # type: ignore[arg-type] + + # error: Argument 1 to "_na_arithmetic_op" has incompatible type + # "Union[ExtensionArray, ndarray[Any, Any]]"; expected "ndarray[Any, Any]" + res_values = _na_arithmetic_op(left, right, op) # type: ignore[arg-type] + + return res_values + + +def comparison_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a comparison operation `=`, `!=`, `>=`, `>`, `<=`, or `<`. + + Note: the caller is responsible for ensuring that numpy warnings are + suppressed (with np.errstate(all="ignore")) if needed. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.eq, operator.ne, operator.gt, operator.ge, operator.lt, operator.le} + + Returns + ------- + ndarray or ExtensionArray + """ + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = ensure_wrapped_if_datetimelike(right) + + rvalues = lib.item_from_zerodim(rvalues) + if isinstance(rvalues, list): + # We don't catch tuple here bc we may be comparing e.g. MultiIndex + # to a tuple that represents a single entry, see test_compare_tuple_strs + rvalues = np.asarray(rvalues) + + if isinstance(rvalues, (np.ndarray, ABCExtensionArray)): + # TODO: make this treatment consistent across ops and classes. + # We are not catching all listlikes here (e.g. frozenset, tuple) + # The ambiguous case is object-dtype. See GH#27803 + if len(lvalues) != len(rvalues): + raise ValueError( + "Lengths must match to compare", lvalues.shape, rvalues.shape + ) + + if should_extension_dispatch(lvalues, rvalues) or ( + (isinstance(rvalues, (Timedelta, BaseOffset, Timestamp)) or right is NaT) + and lvalues.dtype != object + ): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + elif is_scalar(rvalues) and isna(rvalues): # TODO: but not pd.NA? + # numpy does not like comparisons vs None + if op is operator.ne: + res_values = np.ones(lvalues.shape, dtype=bool) + else: + res_values = np.zeros(lvalues.shape, dtype=bool) + + elif is_numeric_v_string_like(lvalues, rvalues): + # GH#36377 going through the numexpr path would incorrectly raise + return invalid_comparison(lvalues, rvalues, op) + + elif lvalues.dtype == object or isinstance(rvalues, str): + res_values = comp_method_OBJECT_ARRAY(op, lvalues, rvalues) + + else: + res_values = _na_arithmetic_op(lvalues, rvalues, op, is_cmp=True) + + return res_values + + +def na_logical_op(x: np.ndarray, y, op): + try: + # For exposition, write: + # yarr = isinstance(y, np.ndarray) + # yint = is_integer(y) or (yarr and y.dtype.kind == "i") + # ybool = is_bool(y) or (yarr and y.dtype.kind == "b") + # xint = x.dtype.kind == "i" + # xbool = x.dtype.kind == "b" + # Then Cases where this goes through without raising include: + # (xint or xbool) and (yint or bool) + result = op(x, y) + except TypeError: + if isinstance(y, np.ndarray): + # bool-bool dtype operations should be OK, should not get here + assert not (x.dtype.kind == "b" and y.dtype.kind == "b") + x = ensure_object(x) + y = ensure_object(y) + result = libops.vec_binop(x.ravel(), y.ravel(), op) + else: + # let null fall thru + assert lib.is_scalar(y) + if not isna(y): + y = bool(y) + try: + result = libops.scalar_binop(x, y, op) + except ( + TypeError, + ValueError, + AttributeError, + OverflowError, + NotImplementedError, + ) as err: + typ = type(y).__name__ + raise TypeError( + f"Cannot perform '{op.__name__}' with a dtyped [{x.dtype}] array " + f"and scalar of type [{typ}]" + ) from err + + return result.reshape(x.shape) + + +def logical_op(left: ArrayLike, right: Any, op) -> ArrayLike: + """ + Evaluate a logical operation `|`, `&`, or `^`. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + Cannot be a DataFrame, Series, or Index. + op : {operator.and_, operator.or_, operator.xor} + Or one of the reversed variants from roperator. + + Returns + ------- + ndarray or ExtensionArray + """ + + def fill_bool(x, left=None): + # if `left` is specifically not-boolean, we do not cast to bool + if x.dtype.kind in "cfO": + # dtypes that can hold NA + mask = isna(x) + if mask.any(): + x = x.astype(object) + x[mask] = False + + if left is None or left.dtype.kind == "b": + x = x.astype(bool) + return x + + right = lib.item_from_zerodim(right) + if is_list_like(right) and not hasattr(right, "dtype"): + # e.g. list, tuple + warnings.warn( + "Logical ops (and, or, xor) between Pandas objects and dtype-less " + "sequences (e.g. list, tuple) are deprecated and will raise in a " + "future version. Wrap the object in a Series, Index, or np.array " + "before operating instead.", + FutureWarning, + stacklevel=find_stack_level(), + ) + right = construct_1d_object_array_from_listlike(right) + + # NB: We assume extract_array has already been called on left and right + lvalues = ensure_wrapped_if_datetimelike(left) + rvalues = right + + if should_extension_dispatch(lvalues, rvalues): + # Call the method on lvalues + res_values = op(lvalues, rvalues) + + else: + if isinstance(rvalues, np.ndarray): + is_other_int_dtype = rvalues.dtype.kind in "iu" + if not is_other_int_dtype: + rvalues = fill_bool(rvalues, lvalues) + + else: + # i.e. scalar + is_other_int_dtype = lib.is_integer(rvalues) + + res_values = na_logical_op(lvalues, rvalues, op) + + # For int vs int `^`, `|`, `&` are bitwise operators and return + # integer dtypes. Otherwise these are boolean ops + if not (left.dtype.kind in "iu" and is_other_int_dtype): + res_values = fill_bool(res_values) + + return res_values + + +def get_array_op(op): + """ + Return a binary array operation corresponding to the given operator op. + + Parameters + ---------- + op : function + Binary operator from operator or roperator module. + + Returns + ------- + functools.partial + """ + if isinstance(op, partial): + # We get here via dispatch_to_series in DataFrame case + # e.g. test_rolling_consistency_var_debiasing_factors + return op + + op_name = op.__name__.strip("_").lstrip("r") + if op_name == "arith_op": + # Reached via DataFrame._combine_frame i.e. flex methods + # e.g. test_df_add_flex_filled_mixed_dtypes + return op + + if op_name in {"eq", "ne", "lt", "le", "gt", "ge"}: + return partial(comparison_op, op=op) + elif op_name in {"and", "or", "xor", "rand", "ror", "rxor"}: + return partial(logical_op, op=op) + elif op_name in { + "add", + "sub", + "mul", + "truediv", + "floordiv", + "mod", + "divmod", + "pow", + }: + return partial(arithmetic_op, op=op) + else: + raise NotImplementedError(op_name) + + +def maybe_prepare_scalar_for_op(obj, shape: Shape): + """ + Cast non-pandas objects to pandas types to unify behavior of arithmetic + and comparison operations. + + Parameters + ---------- + obj: object + shape : tuple[int] + + Returns + ------- + out : object + + Notes + ----- + Be careful to call this *after* determining the `name` attribute to be + attached to the result of the arithmetic operation. + """ + if type(obj) is datetime.timedelta: + # GH#22390 cast up to Timedelta to rely on Timedelta + # implementation; otherwise operation against numeric-dtype + # raises TypeError + return Timedelta(obj) + elif type(obj) is datetime.datetime: + # cast up to Timestamp to rely on Timestamp implementation, see Timedelta above + return Timestamp(obj) + elif isinstance(obj, np.datetime64): + # GH#28080 numpy casts integer-dtype to datetime64 when doing + # array[int] + datetime64, which we do not allow + if isna(obj): + from pandas.core.arrays import DatetimeArray + + # Avoid possible ambiguities with pd.NaT + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("datetime64[ns]") + elif not is_supported_dtype(obj.dtype): + new_dtype = get_supported_dtype(obj.dtype) + obj = obj.astype(new_dtype) + right = np.broadcast_to(obj, shape) + return DatetimeArray._simple_new(right, dtype=right.dtype) + + return Timestamp(obj) + + elif isinstance(obj, np.timedelta64): + if isna(obj): + from pandas.core.arrays import TimedeltaArray + + # wrapping timedelta64("NaT") in Timedelta returns NaT, + # which would incorrectly be treated as a datetime-NaT, so + # we broadcast and wrap in a TimedeltaArray + # GH 52295 + if is_unitless(obj.dtype): + obj = obj.astype("timedelta64[ns]") + elif not is_supported_dtype(obj.dtype): + new_dtype = get_supported_dtype(obj.dtype) + obj = obj.astype(new_dtype) + right = np.broadcast_to(obj, shape) + return TimedeltaArray._simple_new(right, dtype=right.dtype) + + # In particular non-nanosecond timedelta64 needs to be cast to + # nanoseconds, or else we get undesired behavior like + # np.timedelta64(3, 'D') / 2 == np.timedelta64(1, 'D') + return Timedelta(obj) + + # We want NumPy numeric scalars to behave like Python scalars + # post NEP 50 + elif isinstance(obj, np.integer): + return int(obj) + + elif isinstance(obj, np.floating): + return float(obj) + + return obj + + +_BOOL_OP_NOT_ALLOWED = { + operator.truediv, + roperator.rtruediv, + operator.floordiv, + roperator.rfloordiv, + operator.pow, + roperator.rpow, +} + + +def _bool_arith_check(op, a: np.ndarray, b): + """ + In contrast to numpy, pandas raises an error for certain operations + with booleans. + """ + if op in _BOOL_OP_NOT_ALLOWED: + if a.dtype.kind == "b" and (is_bool_dtype(b) or lib.is_bool(b)): + op_name = op.__name__.strip("_").lstrip("r") + raise NotImplementedError( + f"operator '{op_name}' not implemented for bool dtypes" + ) diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/common.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/common.py new file mode 100644 index 0000000000000000000000000000000000000000..559977bacf881552d546e7704d4cf4b12b4a32fe --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/common.py @@ -0,0 +1,146 @@ +""" +Boilerplate functions used in defining binary operations. +""" +from __future__ import annotations + +from functools import wraps +from typing import ( + TYPE_CHECKING, + Callable, +) + +from pandas._libs.lib import item_from_zerodim +from pandas._libs.missing import is_matching_na + +from pandas.core.dtypes.generic import ( + ABCIndex, + ABCSeries, +) + +if TYPE_CHECKING: + from pandas._typing import F + + +def unpack_zerodim_and_defer(name: str) -> Callable[[F], F]: + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Parameters + ---------- + name : str + + Returns + ------- + decorator + """ + + def wrapper(method: F) -> F: + return _unpack_zerodim_and_defer(method, name) + + return wrapper + + +def _unpack_zerodim_and_defer(method, name: str): + """ + Boilerplate for pandas conventions in arithmetic and comparison methods. + + Ensure method returns NotImplemented when operating against "senior" + classes. Ensure zero-dimensional ndarrays are always unpacked. + + Parameters + ---------- + method : binary method + name : str + + Returns + ------- + method + """ + stripped_name = name.removeprefix("__").removesuffix("__") + is_cmp = stripped_name in {"eq", "ne", "lt", "le", "gt", "ge"} + + @wraps(method) + def new_method(self, other): + if is_cmp and isinstance(self, ABCIndex) and isinstance(other, ABCSeries): + # For comparison ops, Index does *not* defer to Series + pass + else: + prio = getattr(other, "__pandas_priority__", None) + if prio is not None: + if prio > self.__pandas_priority__: + # e.g. other is DataFrame while self is Index/Series/EA + return NotImplemented + + other = item_from_zerodim(other) + + return method(self, other) + + return new_method + + +def get_op_result_name(left, right): + """ + Find the appropriate name to pin to an operation result. This result + should always be either an Index or a Series. + + Parameters + ---------- + left : {Series, Index} + right : object + + Returns + ------- + name : object + Usually a string + """ + if isinstance(right, (ABCSeries, ABCIndex)): + name = _maybe_match_name(left, right) + else: + name = left.name + return name + + +def _maybe_match_name(a, b): + """ + Try to find a name to attach to the result of an operation between + a and b. If only one of these has a `name` attribute, return that + name. Otherwise return a consensus name if they match or None if + they have different names. + + Parameters + ---------- + a : object + b : object + + Returns + ------- + name : str or None + + See Also + -------- + pandas.core.common.consensus_name_attr + """ + a_has = hasattr(a, "name") + b_has = hasattr(b, "name") + if a_has and b_has: + try: + if a.name == b.name: + return a.name + elif is_matching_na(a.name, b.name): + # e.g. both are np.nan + return a.name + else: + return None + except TypeError: + # pd.NA + if is_matching_na(a.name, b.name): + return a.name + return None + except ValueError: + # e.g. np.int64(1) vs (np.int64(1), np.int64(2)) + return None + elif a_has: + return a.name + elif b_has: + return b.name + return None diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/dispatch.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/dispatch.py new file mode 100644 index 0000000000000000000000000000000000000000..a939fdd3d041e9f99dde7ea40fd7aa0572d0d9b7 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/dispatch.py @@ -0,0 +1,30 @@ +""" +Functions for defining unary operations. +""" +from __future__ import annotations + +from typing import ( + TYPE_CHECKING, + Any, +) + +from pandas.core.dtypes.generic import ABCExtensionArray + +if TYPE_CHECKING: + from pandas._typing import ArrayLike + + +def should_extension_dispatch(left: ArrayLike, right: Any) -> bool: + """ + Identify cases where Series operation should dispatch to ExtensionArray method. + + Parameters + ---------- + left : np.ndarray or ExtensionArray + right : object + + Returns + ------- + bool + """ + return isinstance(left, ABCExtensionArray) or isinstance(right, ABCExtensionArray) diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/docstrings.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/docstrings.py new file mode 100644 index 0000000000000000000000000000000000000000..bd2e532536d8491af44631e52982217a04ef5b17 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/docstrings.py @@ -0,0 +1,772 @@ +""" +Templating for ops docstrings +""" +from __future__ import annotations + + +def make_flex_doc(op_name: str, typ: str) -> str: + """ + Make the appropriate substitutions for the given operation and class-typ + into either _flex_doc_SERIES or _flex_doc_FRAME to return the docstring + to attach to a generated method. + + Parameters + ---------- + op_name : str {'__add__', '__sub__', ... '__eq__', '__ne__', ...} + typ : str {series, 'dataframe']} + + Returns + ------- + doc : str + """ + op_name = op_name.replace("__", "") + op_desc = _op_descriptions[op_name] + + op_desc_op = op_desc["op"] + assert op_desc_op is not None # for mypy + if op_name.startswith("r"): + equiv = f"other {op_desc_op} {typ}" + elif op_name == "divmod": + equiv = f"{op_name}({typ}, other)" + else: + equiv = f"{typ} {op_desc_op} other" + + if typ == "series": + base_doc = _flex_doc_SERIES + if op_desc["reverse"]: + base_doc += _see_also_reverse_SERIES.format( + reverse=op_desc["reverse"], see_also_desc=op_desc["see_also_desc"] + ) + doc_no_examples = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + series_returns=op_desc["series_returns"], + ) + ser_example = op_desc["series_examples"] + if ser_example: + doc = doc_no_examples + ser_example + else: + doc = doc_no_examples + elif typ == "dataframe": + if op_name in ["eq", "ne", "le", "lt", "ge", "gt"]: + base_doc = _flex_comp_doc_FRAME + doc = _flex_comp_doc_FRAME.format( + op_name=op_name, + desc=op_desc["desc"], + ) + else: + base_doc = _flex_doc_FRAME + doc = base_doc.format( + desc=op_desc["desc"], + op_name=op_name, + equiv=equiv, + reverse=op_desc["reverse"], + ) + else: + raise AssertionError("Invalid typ argument.") + return doc + + +_common_examples_algebra_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan], index=['a', 'b', 'c', 'd']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +dtype: float64 +>>> b = pd.Series([1, np.nan, 1, np.nan], index=['a', 'b', 'd', 'e']) +>>> b +a 1.0 +b NaN +d 1.0 +e NaN +dtype: float64""" + +_common_examples_comparison_SERIES = """ +Examples +-------- +>>> a = pd.Series([1, 1, 1, np.nan, 1], index=['a', 'b', 'c', 'd', 'e']) +>>> a +a 1.0 +b 1.0 +c 1.0 +d NaN +e 1.0 +dtype: float64 +>>> b = pd.Series([0, 1, 2, np.nan, 1], index=['a', 'b', 'c', 'd', 'f']) +>>> b +a 0.0 +b 1.0 +c 2.0 +d NaN +f 1.0 +dtype: float64""" + +_add_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.add(b, fill_value=0) +a 2.0 +b 1.0 +c 1.0 +d 1.0 +e NaN +dtype: float64 +""" +) + +_sub_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.subtract(b, fill_value=0) +a 0.0 +b 1.0 +c 1.0 +d -1.0 +e NaN +dtype: float64 +""" +) + +_mul_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.multiply(b, fill_value=0) +a 1.0 +b 0.0 +c 0.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_div_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divide(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_floordiv_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.floordiv(b, fill_value=0) +a 1.0 +b inf +c inf +d 0.0 +e NaN +dtype: float64 +""" +) + +_divmod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.divmod(b, fill_value=0) +(a 1.0 + b inf + c inf + d 0.0 + e NaN + dtype: float64, + a 0.0 + b NaN + c NaN + d 0.0 + e NaN + dtype: float64) +""" +) + +_mod_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.mod(b, fill_value=0) +a 0.0 +b NaN +c NaN +d 0.0 +e NaN +dtype: float64 +""" +) +_pow_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.pow(b, fill_value=0) +a 1.0 +b 1.0 +c 1.0 +d 0.0 +e NaN +dtype: float64 +""" +) + +_ne_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.ne(b, fill_value=0) +a False +b True +c True +d True +e True +dtype: bool +""" +) + +_eq_example_SERIES = ( + _common_examples_algebra_SERIES + + """ +>>> a.eq(b, fill_value=0) +a True +b False +c False +d False +e False +dtype: bool +""" +) + +_lt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.lt(b, fill_value=0) +a False +b False +c True +d False +e False +f True +dtype: bool +""" +) + +_le_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.le(b, fill_value=0) +a False +b True +c True +d False +e False +f True +dtype: bool +""" +) + +_gt_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.gt(b, fill_value=0) +a True +b False +c False +d False +e True +f False +dtype: bool +""" +) + +_ge_example_SERIES = ( + _common_examples_comparison_SERIES + + """ +>>> a.ge(b, fill_value=0) +a True +b True +c False +d False +e True +f False +dtype: bool +""" +) + +_returns_series = """Series\n The result of the operation.""" + +_returns_tuple = """2-Tuple of Series\n The result of the operation.""" + +_op_descriptions: dict[str, dict[str, str | None]] = { + # Arithmetic Operators + "add": { + "op": "+", + "desc": "Addition", + "reverse": "radd", + "series_examples": _add_example_SERIES, + "series_returns": _returns_series, + }, + "sub": { + "op": "-", + "desc": "Subtraction", + "reverse": "rsub", + "series_examples": _sub_example_SERIES, + "series_returns": _returns_series, + }, + "mul": { + "op": "*", + "desc": "Multiplication", + "reverse": "rmul", + "series_examples": _mul_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "mod": { + "op": "%", + "desc": "Modulo", + "reverse": "rmod", + "series_examples": _mod_example_SERIES, + "series_returns": _returns_series, + }, + "pow": { + "op": "**", + "desc": "Exponential power", + "reverse": "rpow", + "series_examples": _pow_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "truediv": { + "op": "/", + "desc": "Floating division", + "reverse": "rtruediv", + "series_examples": _div_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "floordiv": { + "op": "//", + "desc": "Integer division", + "reverse": "rfloordiv", + "series_examples": _floordiv_example_SERIES, + "series_returns": _returns_series, + "df_examples": None, + }, + "divmod": { + "op": "divmod", + "desc": "Integer division and modulo", + "reverse": "rdivmod", + "series_examples": _divmod_example_SERIES, + "series_returns": _returns_tuple, + "df_examples": None, + }, + # Comparison Operators + "eq": { + "op": "==", + "desc": "Equal to", + "reverse": None, + "series_examples": _eq_example_SERIES, + "series_returns": _returns_series, + }, + "ne": { + "op": "!=", + "desc": "Not equal to", + "reverse": None, + "series_examples": _ne_example_SERIES, + "series_returns": _returns_series, + }, + "lt": { + "op": "<", + "desc": "Less than", + "reverse": None, + "series_examples": _lt_example_SERIES, + "series_returns": _returns_series, + }, + "le": { + "op": "<=", + "desc": "Less than or equal to", + "reverse": None, + "series_examples": _le_example_SERIES, + "series_returns": _returns_series, + }, + "gt": { + "op": ">", + "desc": "Greater than", + "reverse": None, + "series_examples": _gt_example_SERIES, + "series_returns": _returns_series, + }, + "ge": { + "op": ">=", + "desc": "Greater than or equal to", + "reverse": None, + "series_examples": _ge_example_SERIES, + "series_returns": _returns_series, + }, +} + +_py_num_ref = """see + `Python documentation + `_ + for more details""" +_op_names = list(_op_descriptions.keys()) +for key in _op_names: + reverse_op = _op_descriptions[key]["reverse"] + if reverse_op is not None: + _op_descriptions[reverse_op] = _op_descriptions[key].copy() + _op_descriptions[reverse_op]["reverse"] = key + _op_descriptions[key][ + "see_also_desc" + ] = f"Reverse of the {_op_descriptions[key]['desc']} operator, {_py_num_ref}" + _op_descriptions[reverse_op][ + "see_also_desc" + ] = f"Element-wise {_op_descriptions[key]['desc']}, {_py_num_ref}" + +_flex_doc_SERIES = """ +Return {desc} of series and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value for +missing data in either one of the inputs. + +Parameters +---------- +other : Series or scalar value +level : int or name + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : None or float value, default None (NaN) + Fill existing missing (NaN) values, and any new element needed for + successful Series alignment, with this value before computation. + If data in both corresponding Series locations is missing + the result of filling (at that location) will be missing. +axis : {{0 or 'index'}} + Unused. Parameter needed for compatibility with DataFrame. + +Returns +------- +{series_returns} +""" + +_see_also_reverse_SERIES = """ +See Also +-------- +Series.{reverse} : {see_also_desc}. +""" + +_flex_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Equivalent to ``{equiv}``, but with support to substitute a fill_value +for missing data in one of the inputs. With reverse version, `{reverse}`. + +Among flexible wrappers (`add`, `sub`, `mul`, `div`, `floordiv`, `mod`, `pow`) to +arithmetic operators: `+`, `-`, `*`, `/`, `//`, `%`, `**`. + +Parameters +---------- +other : scalar, sequence, Series, dict or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}} + Whether to compare by the index (0 or 'index') or columns. + (1 or 'columns'). For Series input, axis to match Series index on. +level : int or label + Broadcast across a level, matching Index values on the + passed MultiIndex level. +fill_value : float or None, default None + Fill existing missing (NaN) values, and any new element needed for + successful DataFrame alignment, with this value before computation. + If data in both corresponding DataFrame locations is missing + the result will be missing. + +Returns +------- +DataFrame + Result of the arithmetic operation. + +See Also +-------- +DataFrame.add : Add DataFrames. +DataFrame.sub : Subtract DataFrames. +DataFrame.mul : Multiply DataFrames. +DataFrame.div : Divide DataFrames (float division). +DataFrame.truediv : Divide DataFrames (float division). +DataFrame.floordiv : Divide DataFrames (integer division). +DataFrame.mod : Calculate modulo (remainder after division). +DataFrame.pow : Calculate exponential power. + +Notes +----- +Mismatched indices will be unioned together. + +Examples +-------- +>>> df = pd.DataFrame({{'angles': [0, 3, 4], +... 'degrees': [360, 180, 360]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> df + angles degrees +circle 0 360 +triangle 3 180 +rectangle 4 360 + +Add a scalar with operator version which return the same +results. + +>>> df + 1 + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +>>> df.add(1) + angles degrees +circle 1 361 +triangle 4 181 +rectangle 5 361 + +Divide by constant with reverse version. + +>>> df.div(10) + angles degrees +circle 0.0 36.0 +triangle 0.3 18.0 +rectangle 0.4 36.0 + +>>> df.rdiv(10) + angles degrees +circle inf 0.027778 +triangle 3.333333 0.055556 +rectangle 2.500000 0.027778 + +Subtract a list and Series by axis with operator version. + +>>> df - [1, 2] + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub([1, 2], axis='columns') + angles degrees +circle -1 358 +triangle 2 178 +rectangle 3 358 + +>>> df.sub(pd.Series([1, 1, 1], index=['circle', 'triangle', 'rectangle']), +... axis='index') + angles degrees +circle -1 359 +triangle 2 179 +rectangle 3 359 + +Multiply a dictionary by axis. + +>>> df.mul({{'angles': 0, 'degrees': 2}}) + angles degrees +circle 0 720 +triangle 0 360 +rectangle 0 720 + +>>> df.mul({{'circle': 0, 'triangle': 2, 'rectangle': 3}}, axis='index') + angles degrees +circle 0 0 +triangle 6 360 +rectangle 12 1080 + +Multiply a DataFrame of different shape with operator version. + +>>> other = pd.DataFrame({{'angles': [0, 3, 4]}}, +... index=['circle', 'triangle', 'rectangle']) +>>> other + angles +circle 0 +triangle 3 +rectangle 4 + +>>> df * other + angles degrees +circle 0 NaN +triangle 9 NaN +rectangle 16 NaN + +>>> df.mul(other, fill_value=0) + angles degrees +circle 0 0.0 +triangle 9 0.0 +rectangle 16 0.0 + +Divide by a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'angles': [0, 3, 4, 4, 5, 6], +... 'degrees': [360, 180, 360, 360, 540, 720]}}, +... index=[['A', 'A', 'A', 'B', 'B', 'B'], +... ['circle', 'triangle', 'rectangle', +... 'square', 'pentagon', 'hexagon']]) +>>> df_multindex + angles degrees +A circle 0 360 + triangle 3 180 + rectangle 4 360 +B square 4 360 + pentagon 5 540 + hexagon 6 720 + +>>> df.div(df_multindex, level=1, fill_value=0) + angles degrees +A circle NaN 1.0 + triangle 1.0 1.0 + rectangle 1.0 1.0 +B square 0.0 0.0 + pentagon 0.0 0.0 + hexagon 0.0 0.0 +""" + +_flex_comp_doc_FRAME = """ +Get {desc} of dataframe and other, element-wise (binary operator `{op_name}`). + +Among flexible wrappers (`eq`, `ne`, `le`, `lt`, `ge`, `gt`) to comparison +operators. + +Equivalent to `==`, `!=`, `<=`, `<`, `>=`, `>` with support to choose axis +(rows or columns) and level for comparison. + +Parameters +---------- +other : scalar, sequence, Series, or DataFrame + Any single or multiple element data structure, or list-like object. +axis : {{0 or 'index', 1 or 'columns'}}, default 'columns' + Whether to compare by the index (0 or 'index') or columns + (1 or 'columns'). +level : int or label + Broadcast across a level, matching Index values on the passed + MultiIndex level. + +Returns +------- +DataFrame of bool + Result of the comparison. + +See Also +-------- +DataFrame.eq : Compare DataFrames for equality elementwise. +DataFrame.ne : Compare DataFrames for inequality elementwise. +DataFrame.le : Compare DataFrames for less than inequality + or equality elementwise. +DataFrame.lt : Compare DataFrames for strictly less than + inequality elementwise. +DataFrame.ge : Compare DataFrames for greater than inequality + or equality elementwise. +DataFrame.gt : Compare DataFrames for strictly greater than + inequality elementwise. + +Notes +----- +Mismatched indices will be unioned together. +`NaN` values are considered different (i.e. `NaN` != `NaN`). + +Examples +-------- +>>> df = pd.DataFrame({{'cost': [250, 150, 100], +... 'revenue': [100, 250, 300]}}, +... index=['A', 'B', 'C']) +>>> df + cost revenue +A 250 100 +B 150 250 +C 100 300 + +Comparison with a scalar, using either the operator or method: + +>>> df == 100 + cost revenue +A False True +B False False +C True False + +>>> df.eq(100) + cost revenue +A False True +B False False +C True False + +When `other` is a :class:`Series`, the columns of a DataFrame are aligned +with the index of `other` and broadcast: + +>>> df != pd.Series([100, 250], index=["cost", "revenue"]) + cost revenue +A True True +B True False +C False True + +Use the method to control the broadcast axis: + +>>> df.ne(pd.Series([100, 300], index=["A", "D"]), axis='index') + cost revenue +A True False +B True True +C True True +D True True + +When comparing to an arbitrary sequence, the number of columns must +match the number elements in `other`: + +>>> df == [250, 100] + cost revenue +A True True +B False False +C False False + +Use the method to control the axis: + +>>> df.eq([250, 250, 100], axis='index') + cost revenue +A True False +B False True +C True False + +Compare to a DataFrame of different shape. + +>>> other = pd.DataFrame({{'revenue': [300, 250, 100, 150]}}, +... index=['A', 'B', 'C', 'D']) +>>> other + revenue +A 300 +B 250 +C 100 +D 150 + +>>> df.gt(other) + cost revenue +A False False +B False False +C False True +D False False + +Compare to a MultiIndex by level. + +>>> df_multindex = pd.DataFrame({{'cost': [250, 150, 100, 150, 300, 220], +... 'revenue': [100, 250, 300, 200, 175, 225]}}, +... index=[['Q1', 'Q1', 'Q1', 'Q2', 'Q2', 'Q2'], +... ['A', 'B', 'C', 'A', 'B', 'C']]) +>>> df_multindex + cost revenue +Q1 A 250 100 + B 150 250 + C 100 300 +Q2 A 150 200 + B 300 175 + C 220 225 + +>>> df.le(df_multindex, level=1) + cost revenue +Q1 A True True + B True True + C True True +Q2 A False True + B True False + C True False +""" diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/invalid.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/invalid.py new file mode 100644 index 0000000000000000000000000000000000000000..e5ae6d359ac2205b01706211382d116b29176c7a --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/invalid.py @@ -0,0 +1,62 @@ +""" +Templates for invalid operations. +""" +from __future__ import annotations + +import operator +from typing import TYPE_CHECKING + +import numpy as np + +if TYPE_CHECKING: + from pandas._typing import npt + + +def invalid_comparison(left, right, op) -> npt.NDArray[np.bool_]: + """ + If a comparison has mismatched types and is not necessarily meaningful, + follow python3 conventions by: + + - returning all-False for equality + - returning all-True for inequality + - raising TypeError otherwise + + Parameters + ---------- + left : array-like + right : scalar, array-like + op : operator.{eq, ne, lt, le, gt} + + Raises + ------ + TypeError : on inequality comparisons + """ + if op is operator.eq: + res_values = np.zeros(left.shape, dtype=bool) + elif op is operator.ne: + res_values = np.ones(left.shape, dtype=bool) + else: + typ = type(right).__name__ + raise TypeError(f"Invalid comparison between dtype={left.dtype} and {typ}") + return res_values + + +def make_invalid_op(name: str): + """ + Return a binary method that always raises a TypeError. + + Parameters + ---------- + name : str + + Returns + ------- + invalid_op : function + """ + + def invalid_op(self, other=None): + typ = type(self).__name__ + raise TypeError(f"cannot perform {name} with this index type: {typ}") + + invalid_op.__name__ = name + return invalid_op diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py new file mode 100644 index 0000000000000000000000000000000000000000..adc1f63c568bf579f31b13446f4614435d443df1 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/mask_ops.py @@ -0,0 +1,189 @@ +""" +Ops for masked arrays. +""" +from __future__ import annotations + +import numpy as np + +from pandas._libs import ( + lib, + missing as libmissing, +) + + +def kleene_or( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``or`` using Kleene logic. + + Values are NA where we have ``NA | NA`` or ``NA | False``. + ``NA | True`` is considered True. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical or, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A | B == B | A + if left_mask is None: + return kleene_or(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="or") + + if right is libmissing.NA: + result = left.copy() + else: + result = left | right + + if right_mask is not None: + # output is unknown where (False & NA), (NA & False), (NA & NA) + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = ( + (left_false & right_mask) + | (right_false & left_mask) + | (left_mask & right_mask) + ) + else: + if right is True: + mask = np.zeros_like(left_mask) + elif right is libmissing.NA: + mask = (~left & ~left_mask) | left_mask + else: + # False + mask = left_mask.copy() + + return result, mask + + +def kleene_xor( + left: bool | np.ndarray | libmissing.NAType, + right: bool | np.ndarray | libmissing.NAType, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``xor`` using Kleene logic. + + This is the same as ``or``, with the following adjustments + + * True, True -> False + * True, NA -> NA + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A ^ B == B ^ A + if left_mask is None: + return kleene_xor(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + + raise_for_nan(right, method="xor") + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left ^ right + + if right_mask is None: + if right is libmissing.NA: + mask = np.ones_like(left_mask) + else: + mask = left_mask.copy() + else: + mask = left_mask | right_mask + + return result, mask + + +def kleene_and( + left: bool | libmissing.NAType | np.ndarray, + right: bool | libmissing.NAType | np.ndarray, + left_mask: np.ndarray | None, + right_mask: np.ndarray | None, +): + """ + Boolean ``and`` using Kleene logic. + + Values are ``NA`` for ``NA & NA`` or ``True & NA``. + + Parameters + ---------- + left, right : ndarray, NA, or bool + The values of the array. + left_mask, right_mask : ndarray, optional + The masks. Only one of these may be None, which implies that + the associated `left` or `right` value is a scalar. + + Returns + ------- + result, mask: ndarray[bool] + The result of the logical xor, and the new mask. + """ + # To reduce the number of cases, we ensure that `left` & `left_mask` + # always come from an array, not a scalar. This is safe, since + # A & B == B & A + if left_mask is None: + return kleene_and(right, left, right_mask, left_mask) + + if not isinstance(left, np.ndarray): + raise TypeError("Either `left` or `right` need to be a np.ndarray.") + raise_for_nan(right, method="and") + + if right is libmissing.NA: + result = np.zeros_like(left) + else: + result = left & right + + if right_mask is None: + # Scalar `right` + if right is libmissing.NA: + mask = (left & ~left_mask) | left_mask + + else: + mask = left_mask.copy() + if right is False: + # unmask everything + mask[:] = False + else: + # unmask where either left or right is False + left_false = ~(left | left_mask) + right_false = ~(right | right_mask) + mask = (left_mask & ~right_false) | (right_mask & ~left_false) + + return result, mask + + +def raise_for_nan(value, method: str) -> None: + if lib.is_float(value) and np.isnan(value): + raise ValueError(f"Cannot perform logical '{method}' with floating NaN") diff --git a/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/missing.py b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/missing.py new file mode 100644 index 0000000000000000000000000000000000000000..fc685935a35fceab74012912d3c3cae65b9c1818 --- /dev/null +++ b/vlmpy310/lib/python3.10/site-packages/pandas/core/ops/missing.py @@ -0,0 +1,176 @@ +""" +Missing data handling for arithmetic operations. + +In particular, pandas conventions regarding division by zero differ +from numpy in the following ways: + 1) np.array([-1, 0, 1], dtype=dtype1) // np.array([0, 0, 0], dtype=dtype2) + gives [nan, nan, nan] for most dtype combinations, and [0, 0, 0] for + the remaining pairs + (the remaining being dtype1==dtype2==intN and dtype==dtype2==uintN). + + pandas convention is to return [-inf, nan, inf] for all dtype + combinations. + + Note: the numpy behavior described here is py3-specific. + + 2) np.array([-1, 0, 1], dtype=dtype1) % np.array([0, 0, 0], dtype=dtype2) + gives precisely the same results as the // operation. + + pandas convention is to return [nan, nan, nan] for all dtype + combinations. + + 3) divmod behavior consistent with 1) and 2). +""" +from __future__ import annotations + +import operator + +import numpy as np + +from pandas.core import roperator + + +def _fill_zeros(result: np.ndarray, x, y): + """ + If this is a reversed op, then flip x,y + + If we have an integer value (or array in y) + and we have 0's, fill them with np.nan, + return the result. + + Mask the nan's from x. + """ + if result.dtype.kind == "f": + return result + + is_variable_type = hasattr(y, "dtype") + is_scalar_type = not isinstance(y, np.ndarray) + + if not is_variable_type and not is_scalar_type: + # e.g. test_series_ops_name_retention with mod we get here with list/tuple + return result + + if is_scalar_type: + y = np.array(y) + + if y.dtype.kind in "iu": + ymask = y == 0 + if ymask.any(): + # GH#7325, mask and nans must be broadcastable + mask = ymask & ~np.isnan(result) + + # GH#9308 doing ravel on result and mask can improve putmask perf, + # but can also make unwanted copies. + result = result.astype("float64", copy=False) + + np.putmask(result, mask, np.nan) + + return result + + +def mask_zero_div_zero(x, y, result: np.ndarray) -> np.ndarray: + """ + Set results of 0 // 0 to np.nan, regardless of the dtypes + of the numerator or the denominator. + + Parameters + ---------- + x : ndarray + y : ndarray + result : ndarray + + Returns + ------- + ndarray + The filled result. + + Examples + -------- + >>> x = np.array([1, 0, -1], dtype=np.int64) + >>> x + array([ 1, 0, -1]) + >>> y = 0 # int 0; numpy behavior is different with float + >>> result = x // y + >>> result # raw numpy result does not fill division by zero + array([0, 0, 0]) + >>> mask_zero_div_zero(x, y, result) + array([ inf, nan, -inf]) + """ + + if not hasattr(y, "dtype"): + # e.g. scalar, tuple + y = np.array(y) + if not hasattr(x, "dtype"): + # e.g scalar, tuple + x = np.array(x) + + zmask = y == 0 + + if zmask.any(): + # Flip sign if necessary for -0.0 + zneg_mask = zmask & np.signbit(y) + zpos_mask = zmask & ~zneg_mask + + x_lt0 = x < 0 + x_gt0 = x > 0 + nan_mask = zmask & (x == 0) + neginf_mask = (zpos_mask & x_lt0) | (zneg_mask & x_gt0) + posinf_mask = (zpos_mask & x_gt0) | (zneg_mask & x_lt0) + + if nan_mask.any() or neginf_mask.any() or posinf_mask.any(): + # Fill negative/0 with -inf, positive/0 with +inf, 0/0 with NaN + result = result.astype("float64", copy=False) + + result[nan_mask] = np.nan + result[posinf_mask] = np.inf + result[neginf_mask] = -np.inf + + return result + + +def dispatch_fill_zeros(op, left, right, result): + """ + Call _fill_zeros with the appropriate fill value depending on the operation, + with special logic for divmod and rdivmod. + + Parameters + ---------- + op : function (operator.add, operator.div, ...) + left : object (np.ndarray for non-reversed ops) + We have excluded ExtensionArrays here + right : object (np.ndarray for reversed ops) + We have excluded ExtensionArrays here + result : ndarray + + Returns + ------- + result : np.ndarray + + Notes + ----- + For divmod and rdivmod, the `result` parameter and returned `result` + is a 2-tuple of ndarray objects. + """ + if op is divmod: + result = ( + mask_zero_div_zero(left, right, result[0]), + _fill_zeros(result[1], left, right), + ) + elif op is roperator.rdivmod: + result = ( + mask_zero_div_zero(right, left, result[0]), + _fill_zeros(result[1], right, left), + ) + elif op is operator.floordiv: + # Note: no need to do this for truediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(left, right, result) + elif op is roperator.rfloordiv: + # Note: no need to do this for rtruediv; in py3 numpy behaves the way + # we want. + result = mask_zero_div_zero(right, left, result) + elif op is operator.mod: + result = _fill_zeros(result, left, right) + elif op is roperator.rmod: + result = _fill_zeros(result, right, left) + return result