diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_native.h new file mode 100644 index 0000000000000000000000000000000000000000..c0f65d6af92f321b67c08733a235418910715621 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_adaptive_avg_pool3d_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & _adaptive_avg_pool3d_out_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::Tensor & out); +TORCH_API at::Tensor adaptive_avg_pool3d_cpu(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor adaptive_avg_pool3d_cuda(const at::Tensor & self, at::IntArrayRef output_size); +TORCH_API at::Tensor adaptive_avg_pool3d_quantized_cpu(const at::Tensor & self, at::IntArrayRef output_size); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async.h new file mode 100644 index 0000000000000000000000000000000000000000..3bb927b8c574c342ee7c7bfa968331a0b107f807 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_assert_async.h @@ -0,0 +1,35 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_assert_async(Tensor self) -> () +inline void _assert_async(const at::Tensor & self) { + return at::_ops::_assert_async::call(self); +} + +// aten::_assert_async.msg(Tensor self, str assert_msg) -> () +inline void _assert_async(const at::Tensor & self, c10::string_view assert_msg) { + return at::_ops::_assert_async_msg::call(self, assert_msg); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..c3a9e5693f2938ee925eb0c1aa786985a9bce3fd --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_eigh_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _linalg_eigh(const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true); +TORCH_API ::std::tuple _linalg_eigh_out(at::Tensor & eigenvalues, at::Tensor & eigenvectors, const at::Tensor & A, c10::string_view UPLO="L", bool compute_v=true); +TORCH_API ::std::tuple _linalg_eigh_outf(const at::Tensor & A, c10::string_view UPLO, bool compute_v, at::Tensor & eigenvalues, at::Tensor & eigenvectors); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e875b66f19e4c5168a60cba8960aa84e890566c3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_linalg_slogdet_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API ::std::tuple _linalg_slogdet(const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_out(at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots, const at::Tensor & A); +TORCH_API ::std::tuple _linalg_slogdet_outf(const at::Tensor & A, at::Tensor & sign, at::Tensor & logabsdet, at::Tensor & LU, at::Tensor & pivots); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps.h new file mode 100644 index 0000000000000000000000000000000000000000..89e6e7fa5602e19c283dc0baa500a27255dc3812 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_lstm_mps.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_lstm_mps(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first) -> (Tensor, Tensor, Tensor, Tensor, Tensor, Tensor) +inline ::std::tuple _lstm_mps(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::_lstm_mps::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first); +} + +// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!)) +inline ::std::tuple _lstm_mps_out(at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5, const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first) { + return at::_ops::_lstm_mps_out::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5); +} +// aten::_lstm_mps.out(Tensor input, Tensor[] hx, Tensor[] params, bool has_biases, int num_layers, float dropout, bool train, bool bidirectional, bool batch_first, *, Tensor(a!) out0, Tensor(b!) out1, Tensor(c!) out2, Tensor(d!) out3, Tensor(e!) out4, Tensor(f!) out5) -> (Tensor(a!), Tensor(b!), Tensor(c!), Tensor(d!), Tensor(e!), Tensor(f!)) +inline ::std::tuple _lstm_mps_outf(const at::Tensor & input, at::TensorList hx, at::TensorList params, bool has_biases, int64_t num_layers, double dropout, bool train, bool bidirectional, bool batch_first, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2, at::Tensor & out3, at::Tensor & out4, at::Tensor & out5) { + return at::_ops::_lstm_mps_out::call(input, hx, params, has_biases, num_layers, dropout, train, bidirectional, batch_first, out0, out1, out2, out3, out4, out5); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16.h new file mode 100644 index 0000000000000000000000000000000000000000..6c26dc5f58698eb73935bcb2d36fef27363971d0 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_saturate_weight_to_fp16.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_saturate_weight_to_fp16(Tensor weight) -> Tensor +inline at::Tensor _saturate_weight_to_fp16(const at::Tensor & weight) { + return at::_ops::_saturate_weight_to_fp16::call(weight); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_native.h new file mode 100644 index 0000000000000000000000000000000000000000..a89a127bc055f8752f0bfc9f56d603a7fe711290 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_scaled_dot_product_efficient_attention_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple _scaled_dot_product_efficient_attention_cuda(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_bias, bool compute_log_sumexp, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt); +TORCH_API ::std::tuple _scaled_dot_product_efficient_attention_nestedtensor_cuda(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_bias, bool compute_log_sumexp, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..d98bc55c6105bff07b3ae9232de3ba1e3033d72e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_slow_conv2d_backward_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API ::std::tuple _slow_conv2d_backward_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding); +TORCH_API ::std::tuple _slow_conv2d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +TORCH_API ::std::tuple _slow_conv2d_backward_symint_out(at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding); +TORCH_API ::std::tuple _slow_conv2d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, at::Tensor & grad_input, at::Tensor & grad_weight, at::Tensor & grad_bias); +TORCH_API ::std::tuple _slow_conv2d_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, ::std::array output_mask); +TORCH_API ::std::tuple _slow_conv2d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & weight, c10::SymIntArrayRef kernel_size, c10::SymIntArrayRef stride, c10::SymIntArrayRef padding, ::std::array output_mask); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe.h new file mode 100644 index 0000000000000000000000000000000000000000..8ea807ce6162ad41aec1a0a10d3550b96ed59cdf --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_sparse_coo_tensor_unsafe.h @@ -0,0 +1,69 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } +} + +// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, at::IntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, c10::fromIntArrayRefSlow(size), dtype, layout, device, pin_memory, is_coalesced); + } +} + +// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, at::TensorOptions options={}, c10::optional is_coalesced=c10::nullopt) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), is_coalesced); + } +} + +// aten::_sparse_coo_tensor_unsafe(Tensor indices, Tensor values, SymInt[] size, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, bool? is_coalesced=None) -> Tensor +inline at::Tensor _sparse_coo_tensor_unsafe_symint(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced); +} +namespace symint { + template ::value>> + at::Tensor _sparse_coo_tensor_unsafe(const at::Tensor & indices, const at::Tensor & values, c10::SymIntArrayRef size, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory, c10::optional is_coalesced) { + return at::_ops::_sparse_coo_tensor_unsafe::call(indices, values, size, dtype, layout, device, pin_memory, is_coalesced); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h new file mode 100644 index 0000000000000000000000000000000000000000..1a2393390a8b83144ae12112c8cf26cd173df56f --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addbmm.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out); +} +// aten::addbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & addbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) { + return at::_ops::addbmm_out::call(self, batch1, batch2, beta, alpha, out); +} + +// aten::addbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor +inline at::Tensor addbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) { + return at::_ops::addbmm::call(self, batch1, batch2, beta, alpha); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2ecbc814d9efbc3d26757480dd07bef91f5a7ac6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/arctan2_ops.h @@ -0,0 +1,50 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API arctan2 { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctan2(Tensor self, Tensor other) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & other); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +struct TORCH_API arctan2_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctan2") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctan2.out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out); +}; + +struct TORCH_API arctan2_ { + using schema = at::Tensor & (at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::arctan2_") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "arctan2_(Tensor(a!) self, Tensor other) -> Tensor(a!)") + static at::Tensor & call(at::Tensor & self, const at::Tensor & other); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..04e68385f32a2bdb237de7be7b302c762dc225f8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/choose_qparams_optimized_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API choose_qparams_optimized { + using schema = ::std::tuple (const at::Tensor &, int64_t, int64_t, double, int64_t); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::choose_qparams_optimized") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "choose_qparams_optimized(Tensor input, int numel, int n_bins, float ratio, int bit_width) -> (Tensor, Tensor)") + static ::std::tuple call(const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); + static ::std::tuple redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & input, int64_t numel, int64_t n_bins, double ratio, int64_t bit_width); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/concat.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/concat.h new file mode 100644 index 0000000000000000000000000000000000000000..da4ba4f8bfcec4bf90504e744b2d7674b09b7ee6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/concat.h @@ -0,0 +1,53 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::concat(Tensor[] tensors, int dim=0) -> Tensor +inline at::Tensor concat(at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat::call(tensors, dim); +} + +// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, int64_t dim=0) { + return at::_ops::concat_out::call(tensors, dim, out); +} +// aten::concat.out(Tensor[] tensors, int dim=0, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_outf(at::TensorList tensors, int64_t dim, at::Tensor & out) { + return at::_ops::concat_out::call(tensors, dim, out); +} + +// aten::concat.names(Tensor[] tensors, Dimname dim) -> Tensor +inline at::Tensor concat(at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names::call(tensors, dim); +} + +// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_out(at::Tensor & out, at::TensorList tensors, at::Dimname dim) { + return at::_ops::concat_names_out::call(tensors, dim, out); +} +// aten::concat.names_out(Tensor[] tensors, Dimname dim, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & concat_outf(at::TensorList tensors, at::Dimname dim, at::Tensor & out) { + return at::_ops::concat_names_out::call(tensors, dim, out); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv1d_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv1d_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..37d1c01d6e88adcf2ed03b61e02f3e46995ddf58 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/conv1d_compositeimplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, at::IntArrayRef stride=1, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv1d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias={}, c10::SymIntArrayRef stride=c10::SymInt(1), c10::SymIntArrayRef padding=c10::SymInt(0), c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); +TORCH_API at::Tensor conv1d(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, at::IntArrayRef stride, c10::string_view padding, at::IntArrayRef dilation=1, int64_t groups=1); +TORCH_API at::Tensor conv1d_symint(const at::Tensor & input, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef stride, c10::string_view padding, c10::SymIntArrayRef dilation=c10::SymInt(1), c10::SymInt groups=1); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..1aa5f0ca9344e05a1c5afa77b29d1941fa3bee6e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummax_compositeimplicitautograd_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple cummax(const at::Tensor & self, at::Dimname dim); +TORCH_API ::std::tuple cummax_out(at::Tensor & values, at::Tensor & indices, const at::Tensor & self, at::Dimname dim); +TORCH_API ::std::tuple cummax_outf(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..804c77bb81c066d11ae990e858b45fb8ae7f714b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/embedding_renorm_cpu_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor & embedding_renorm_(at::Tensor & self, const at::Tensor & indices, double max_norm, double norm_type); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/equal_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/equal_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..74af76ef345ce977a9533dddf97d32ff10e8266e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/equal_ops.h @@ -0,0 +1,28 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API equal { + using schema = bool (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::equal") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "equal(Tensor self, Tensor other) -> bool") + static bool call(const at::Tensor & self, const at::Tensor & other); + static bool redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..a44bb97fa48c6db6501faa7b9f506c81b1d128b9 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fbgemm_linear_quantize_weight_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API ::std::tuple fbgemm_linear_quantize_weight(const at::Tensor & input); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..299972343f5f39d5b395e3761f67f4a4ecb5ed76 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfftfreq_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor fft_rfftfreq(int64_t n, double d=1.0, at::TensorOptions options={}); +TORCH_API at::Tensor fft_rfftfreq(int64_t n, double d, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & fft_rfftfreq_out(at::Tensor & out, int64_t n, double d=1.0); +TORCH_API at::Tensor & fft_rfftfreq_outf(int64_t n, double d, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e9da8ffd2a973c3fbee731e8cdad442f8038adc1 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hann_window_compositeexplicitautograd_dispatch.h @@ -0,0 +1,30 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor hann_window(int64_t window_length, at::TensorOptions options={}); +TORCH_API at::Tensor hann_window(int64_t window_length, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length); +TORCH_API at::Tensor & hann_window_outf(int64_t window_length, at::Tensor & out); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, at::TensorOptions options={}); +TORCH_API at::Tensor hann_window(int64_t window_length, bool periodic, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory); +TORCH_API at::Tensor & hann_window_out(at::Tensor & out, int64_t window_length, bool periodic); +TORCH_API at::Tensor & hann_window_outf(int64_t window_length, bool periodic, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h new file mode 100644 index 0000000000000000000000000000000000000000..b2c34091e951f465476600d959d349e9eafda585 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/hinge_embedding_loss_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor hinge_embedding_loss(const at::Tensor & self, const at::Tensor & target, double margin=1.0, int64_t reduction=at::Reduction::Mean); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h new file mode 100644 index 0000000000000000000000000000000000000000..71fbf4fa8a2ad04a91610a07eadd13c0d760da59 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/histogram_native.h @@ -0,0 +1,24 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API ::std::tuple histogram(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, const at::Tensor & bins, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +TORCH_API ::std::tuple histogram(const at::Tensor & self, int64_t bins=100, c10::optional> range=c10::nullopt, const c10::optional & weight={}, bool density=false); +TORCH_API ::std::tuple histogram_out(const at::Tensor & self, int64_t bins, c10::optional> range, const c10::optional & weight, bool density, at::Tensor & hist, at::Tensor & bin_edges); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..e0f0f8337346d2ccb6c4efd91defec36e6bf1051 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/index_add_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor index_add(const at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); +TORCH_API at::Tensor & index_add_(at::Tensor & self, int64_t dim, const at::Tensor & index, const at::Tensor & source, const at::Scalar & alpha=1); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..ee95465e0ff7d2100d0c4ee50262e48127785e7c --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/isneginf_cpu_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor isneginf(const at::Tensor & self); +TORCH_API at::Tensor & isneginf_out(at::Tensor & out, const at::Tensor & self); +TORCH_API at::Tensor & isneginf_outf(const at::Tensor & self, at::Tensor & out); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h new file mode 100644 index 0000000000000000000000000000000000000000..efd25925a9f305bfc576612dba15054329c7f856 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/kl_div.h @@ -0,0 +1,30 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::kl_div(Tensor self, Tensor target, int reduction=Mean, *, bool log_target=False) -> Tensor +inline at::Tensor kl_div(const at::Tensor & self, const at::Tensor & target, int64_t reduction=at::Reduction::Mean, bool log_target=false) { + return at::_ops::kl_div::call(self, target, reduction, log_target); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..20aa6e98c30c69ec0a4a49c77f72bf736683803b --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_cpu_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cpu { + +TORCH_API at::Tensor leaky_relu(const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & negative_slope=0.01); +TORCH_API at::Tensor & leaky_relu_outf(const at::Tensor & self, const at::Scalar & negative_slope, at::Tensor & out); +TORCH_API at::Tensor & leaky_relu_(at::Tensor & self, const at::Scalar & negative_slope=0.01); + +} // namespace cpu +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..8dfa1c422428f6152f34a2210a5be4716d0f8154 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_convolution_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API miopen_convolution { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, bool, bool); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic); +}; + +struct TORCH_API miopen_convolution_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, bool, bool, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::miopen_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "miopen_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, bool benchmark, bool deterministic, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, bool benchmark, bool deterministic, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_convolution_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_convolution_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..2a2b8dacca41fbed741ea078490ebce4d46f44e3 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_convolution_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API mkldnn_convolution { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_convolution(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups); +}; + +struct TORCH_API mkldnn_convolution_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, const c10::optional &, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymIntArrayRef, c10::SymInt, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::mkldnn_convolution") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "mkldnn_convolution.out(Tensor self, Tensor weight, Tensor? bias, SymInt[] padding, SymInt[] stride, SymInt[] dilation, SymInt groups, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & weight, const c10::optional & bias, c10::SymIntArrayRef padding, c10::SymIntArrayRef stride, c10::SymIntArrayRef dilation, c10::SymInt groups, at::Tensor & out); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h new file mode 100644 index 0000000000000000000000000000000000000000..9b72584abe9c4c4ce17c78d504d813f79c581212 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mkldnn_max_pool2d_backward_native.h @@ -0,0 +1,22 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor & mkldnn_max_pool2d_backward_out(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, at::IntArrayRef dilation, bool ceil_mode, at::Tensor & out); +TORCH_API at::Tensor mkldnn_max_pool2d_backward(const at::Tensor & grad_output, const at::Tensor & output, const at::Tensor & input, at::IntArrayRef kernel_size, at::IntArrayRef stride={}, at::IntArrayRef padding=0, at::IntArrayRef dilation=1, bool ceil_mode=false); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h new file mode 100644 index 0000000000000000000000000000000000000000..a1c594c5ca07efae46db49d085b1831448ce2319 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mse_loss_backward.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & mse_loss_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input); +} +// aten::mse_loss_backward.grad_input(Tensor grad_output, Tensor self, Tensor target, int reduction, *, Tensor(a!) grad_input) -> Tensor(a!) +inline at::Tensor & mse_loss_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, at::Tensor & grad_input) { + return at::_ops::mse_loss_backward_grad_input::call(grad_output, self, target, reduction, grad_input); +} + +// aten::mse_loss_backward(Tensor grad_output, Tensor self, Tensor target, int reduction) -> Tensor +inline at::Tensor mse_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction) { + return at::_ops::mse_loss_backward::call(grad_output, self, target, reduction); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..faf2a9ca6dbe27e90400960d39c9cb758a9694b8 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/norm_cuda_dispatch.h @@ -0,0 +1,28 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::ScalarType dtype, at::Tensor & out); +TORCH_API at::Tensor norm(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_out(at::Tensor & out, const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim=false); +TORCH_API at::Tensor & norm_outf(const at::Tensor & self, const c10::optional & p, at::IntArrayRef dim, bool keepdim, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pow.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pow.h new file mode 100644 index 0000000000000000000000000000000000000000..ad1ac48e3282ae2b623d8601b43c40bffcad8b6e --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/pow.h @@ -0,0 +1,67 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out); +} +// aten::pow.Tensor_Tensor_out(Tensor self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_outf(const at::Tensor & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Tensor_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Tensor(Tensor self, Tensor exponent) -> Tensor +inline at::Tensor pow(const at::Tensor & self, const at::Tensor & exponent) { + return at::_ops::pow_Tensor_Tensor::call(self, exponent); +} + +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_out(at::Tensor & out, const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar_out::call(self, exponent, out); +} +// aten::pow.Scalar_out(Scalar self, Tensor exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_outf(const at::Scalar & self, const at::Tensor & exponent, at::Tensor & out) { + return at::_ops::pow_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Scalar(Scalar self, Tensor exponent) -> Tensor +inline at::Tensor pow(const at::Scalar & self, const at::Tensor & exponent) { + return at::_ops::pow_Scalar::call(self, exponent); +} + +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out); +} +// aten::pow.Tensor_Scalar_out(Tensor self, Scalar exponent, *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & pow_outf(const at::Tensor & self, const at::Scalar & exponent, at::Tensor & out) { + return at::_ops::pow_Tensor_Scalar_out::call(self, exponent, out); +} + +// aten::pow.Tensor_Scalar(Tensor self, Scalar exponent) -> Tensor +inline at::Tensor pow(const at::Tensor & self, const at::Scalar & exponent) { + return at::_ops::pow_Tensor_Scalar::call(self, exponent); +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_gru_cell_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_gru_cell_native.h new file mode 100644 index 0000000000000000000000000000000000000000..49763f500d125add9f6659c32b6119c269a45e91 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/quantized_gru_cell_native.h @@ -0,0 +1,21 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +namespace at { +namespace native { +TORCH_API at::Tensor quantized_gru_cell(const at::Tensor & input, const at::Tensor & hx, const at::Tensor & w_ih, const at::Tensor & w_hh, const at::Tensor & b_ih, const at::Tensor & b_hh, const at::Tensor & packed_ih, const at::Tensor & packed_hh, const at::Tensor & col_offsets_ih, const at::Tensor & col_offsets_hh, const at::Scalar & scale_ih, const at::Scalar & scale_hh, const at::Scalar & zero_point_ih, const at::Scalar & zero_point_hh); +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/roll.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/roll.h new file mode 100644 index 0000000000000000000000000000000000000000..2a9a6fc898c619ad870667b1b4ec834b9f997908 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/roll.h @@ -0,0 +1,91 @@ +#pragma once + +// @generated by torchgen/gen.py from Function.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + + +#include + +namespace at { + + +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor +inline at::Tensor roll(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll::call(self, c10::fromIntArrayRefSlow(shifts), dims); +} +namespace symint { + template ::value>> + at::Tensor roll(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll::call(self, c10::fromIntArrayRefSlow(shifts), dims); + } +} + +// aten::roll(Tensor self, SymInt[1] shifts, int[1] dims=[]) -> Tensor +inline at::Tensor roll_symint(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll::call(self, shifts, dims); +} +namespace symint { + template ::value>> + at::Tensor roll(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll::call(self, shifts, dims); + } +} + +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & roll_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll_out::call(self, c10::fromIntArrayRefSlow(shifts), dims, out); +} +namespace symint { + template ::value>> + at::Tensor & roll_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll_out::call(self, c10::fromIntArrayRefSlow(shifts), dims, out); + } +} + +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & roll_outf(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::roll_out::call(self, c10::fromIntArrayRefSlow(shifts), dims, out); +} +namespace symint { + template ::value>> + at::Tensor & roll_outf(const at::Tensor & self, at::IntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::roll_out::call(self, c10::fromIntArrayRefSlow(shifts), dims, out); + } +} + +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & roll_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll_out::call(self, shifts, dims, out); +} +namespace symint { + template ::value>> + at::Tensor & roll_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims={}) { + return at::_ops::roll_out::call(self, shifts, dims, out); + } +} + +// aten::roll.out(Tensor self, SymInt[1] shifts, int[1] dims=[], *, Tensor(a!) out) -> Tensor(a!) +inline at::Tensor & roll_symint_outf(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::roll_out::call(self, shifts, dims, out); +} +namespace symint { + template ::value>> + at::Tensor & roll_outf(const at::Tensor & self, c10::SymIntArrayRef shifts, at::IntArrayRef dims, at::Tensor & out) { + return at::_ops::roll_out::call(self, shifts, dims, out); + } +} + +} diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..38336240175b170bcd3566c0644913c8b1f20c2d --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/scaled_dot_product_attention_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor scaled_dot_product_attention(const at::Tensor & query, const at::Tensor & key, const at::Tensor & value, const c10::optional & attn_mask={}, double dropout_p=0.0, bool is_causal=false, c10::optional scale=c10::nullopt); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/select_compositeimplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/select_compositeimplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..97a02ee0b41b3058d27fefafe0d2365cd09e9228 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/select_compositeimplicitautograd_dispatch.h @@ -0,0 +1,23 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeimplicitautograd { + +TORCH_API at::Tensor select(const at::Tensor & self, at::Dimname dim, int64_t index); + +} // namespace compositeimplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..f53c1b6de3f318e3e5b7ad04dfacbf8f602d6a8a --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/slice_scatter_compositeexplicitautograd_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautograd { + +TORCH_API at::Tensor & slice_scatter_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, int64_t step=1); +TORCH_API at::Tensor & slice_scatter_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, int64_t step, at::Tensor & out); +TORCH_API at::Tensor & slice_scatter_symint_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & src, int64_t dim=0, c10::optional start=c10::nullopt, c10::optional end=c10::nullopt, c10::SymInt step=1); +TORCH_API at::Tensor & slice_scatter_symint_outf(const at::Tensor & self, const at::Tensor & src, int64_t dim, c10::optional start, c10::optional end, c10::SymInt step, at::Tensor & out); + +} // namespace compositeexplicitautograd +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..3ac1521d3cff2182e4677f1f754097020dab9006 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_chebyshev_polynomial_w_cuda_dispatch.h @@ -0,0 +1,25 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace cuda { + +TORCH_API at::Tensor special_chebyshev_polynomial_w(const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_out(at::Tensor & out, const at::Tensor & x, const at::Tensor & n); +TORCH_API at::Tensor & special_chebyshev_polynomial_w_outf(const at::Tensor & x, const at::Tensor & n, at::Tensor & out); + +} // namespace cuda +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_native.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_native.h new file mode 100644 index 0000000000000000000000000000000000000000..fd944cda131b4fa2aed8215862d942fa36434a24 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_ndtri_native.h @@ -0,0 +1,23 @@ +#pragma once + +// @generated by torchgen/gen.py from NativeFunction.h + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace at { +namespace native { +struct TORCH_API structured_special_ndtri_out : public at::meta::structured_special_ndtri { +void impl(const at::Tensor & self, const at::Tensor & out); +}; +} // namespace native +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/take_ops.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/take_ops.h new file mode 100644 index 0000000000000000000000000000000000000000..e85629ac48f0b53748b405fe6394105cb9e5cc23 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/take_ops.h @@ -0,0 +1,39 @@ +#pragma once + +// @generated by torchgen/gen.py from Operator.h + +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { +namespace _ops { + + +struct TORCH_API take_out { + using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::take") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "take.out(Tensor self, Tensor index, *, Tensor(a!) out) -> Tensor(a!)") + static at::Tensor & call(const at::Tensor & self, const at::Tensor & index, at::Tensor & out); + static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index, at::Tensor & out); +}; + +struct TORCH_API take { + using schema = at::Tensor (const at::Tensor &, const at::Tensor &); + using ptr_schema = schema*; + // See Note [static constexpr char* members for windows NVCC] + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::take") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "") + STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "take(Tensor self, Tensor index) -> Tensor") + static at::Tensor call(const at::Tensor & self, const at::Tensor & index); + static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & index); +}; + +}} // namespace at::_ops diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_meta_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_meta_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..73546783d411935e2414458e84da3d473d05c509 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/tril_meta_dispatch.h @@ -0,0 +1,26 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace meta { + +TORCH_API at::Tensor tril(const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_out(at::Tensor & out, const at::Tensor & self, int64_t diagonal=0); +TORCH_API at::Tensor & tril_outf(const at::Tensor & self, int64_t diagonal, at::Tensor & out); +TORCH_API at::Tensor & tril_(at::Tensor & self, int64_t diagonal=0); + +} // namespace meta +} // namespace at diff --git a/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h new file mode 100644 index 0000000000000000000000000000000000000000..44c1228a1aeca10c2716814ea609f3fe87f53df6 --- /dev/null +++ b/videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/xlogy_compositeexplicitautogradnonfunctional_dispatch.h @@ -0,0 +1,24 @@ +#pragma once +// @generated by torchgen/gen.py from DispatchKeyFunction.h + +// NB: The implementing C++ file is RegisterDispatchKey.cpp + +// The only #includes we need are for custom classes that have defaults in the C++ API +#include +#include +#include + +// Forward declarations of any types needed in the operator signatures. +// We can't directly include these classes because it will cause circular include dependencies. +// This file is included by TensorBody.h, which defines the Tensor class. +#include + +namespace at { + +namespace compositeexplicitautogradnonfunctional { + +TORCH_API at::Tensor xlogy(const at::Tensor & self, const at::Tensor & other); +TORCH_API at::Tensor & xlogy_(at::Tensor & self, const at::Tensor & other); + +} // namespace compositeexplicitautogradnonfunctional +} // namespace at diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..861d7d3b723e42e9df0e81da6e2c51322c93c626 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/fft/__init__.py @@ -0,0 +1,16 @@ +# flake8: NOQA +from cupyx.scipy.fft._fft import ( + fft, ifft, fft2, ifft2, fftn, ifftn, + rfft, irfft, rfft2, irfft2, rfftn, irfftn, + hfft, ihfft, hfft2, ihfft2, hfftn, ihfftn +) +from cupyx.scipy.fft._fft import ( + __ua_domain__, __ua_convert__, __ua_function__) +from cupyx.scipy.fft._fft import _scipy_150, _scipy_160 +from cupyx.scipy.fft._fftlog import fht, ifht +from cupyx.scipy.fft._helper import next_fast_len # NOQA +from cupy.fft import fftshift, ifftshift, fftfreq, rfftfreq +from cupyx.scipy.fftpack import get_fft_plan +from cupyx.scipy.fft._realtransforms import ( + dct, dctn, dst, dstn, idct, idctn, idst, idstn +) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_fftlog.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_fftlog.py new file mode 100644 index 0000000000000000000000000000000000000000..27e029a623a0cec2baf5388c621c8850a10794cd --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/fft/_fftlog.py @@ -0,0 +1,225 @@ +'''Fast Hankel transforms using the FFTLog algorithm. +The implementation closely follows the Fortran code of Hamilton (2000). +''' + +import math +from warnings import warn + +import cupy +from cupyx.scipy.fft import _fft +from cupyx.scipy.special import loggamma, poch + +try: + # fht only exists in SciPy >= 1.7 + from scipy.fft import fht as _fht + _scipy_fft = _fft._scipy_fft + del _fht +except ImportError: + class _DummyModule: + def __getattr__(self, name): + return None + + _scipy_fft = _DummyModule() + +# Note scipy also defines fhtoffset but this only operates on scalars +__all__ = ['fht', 'ifht'] + + +# constants +LN_2 = math.log(2) + + +@_fft._implements(_scipy_fft.fht) +def fht(a, dln, mu, offset=0.0, bias=0.0): + """Compute the fast Hankel transform. + + Computes the discrete Hankel transform of a logarithmically spaced periodic + sequence using the FFTLog algorithm [1]_, [2]_. + + Parameters + ---------- + a : cupy.ndarray (..., n) + Real periodic input array, uniformly logarithmically spaced. For + multidimensional input, the transform is performed over the last axis. + dln : float + Uniform logarithmic spacing of the input array. + mu : float + Order of the Hankel transform, any positive or negative real number. + offset : float, optional + Offset of the uniform logarithmic spacing of the output array. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + A : cupy.ndarray (..., n) + The transformed output array, which is real, periodic, uniformly + logarithmically spaced, and of the same shape as the input array. + + See Also + -------- + :func:`scipy.special.fht` + :func:`scipy.special.fhtoffset` : Return an optimal offset for `fht`. + + References + ---------- + .. [1] Talman J. D., 1978, J. Comp. Phys., 29, 35 + .. [2] Hamilton A. J. S., 2000, MNRAS, 312, 257 (astro-ph/9905191) + + """ + + # size of transform + n = a.shape[-1] + + # bias input array + if bias != 0: + # a_q(r) = a(r) (r/r_c)^{-q} + j_c = (n-1)/2 + j = cupy.arange(n) + a = a * cupy.exp(-bias*(j - j_c)*dln) + + # compute FHT coefficients + u = fhtcoeff(n, dln, mu, offset=offset, bias=bias) + + # transform + A = _fhtq(a, u) + + # bias output array + if bias != 0: + # A(k) = A_q(k) (k/k_c)^{-q} (k_c r_c)^{-q} + A *= cupy.exp(-bias*((j - j_c)*dln + offset)) + + return A + + +@_fft._implements(_scipy_fft.ifht) +def ifht(A, dln, mu, offset=0.0, bias=0.0): + """Compute the inverse fast Hankel transform. + + Computes the discrete inverse Hankel transform of a logarithmically spaced + periodic sequence. This is the inverse operation to `fht`. + + Parameters + ---------- + A : cupy.ndarray (..., n) + Real periodic input array, uniformly logarithmically spaced. For + multidimensional input, the transform is performed over the last axis. + dln : float + Uniform logarithmic spacing of the input array. + mu : float + Order of the Hankel transform, any positive or negative real number. + offset : float, optional + Offset of the uniform logarithmic spacing of the output array. + bias : float, optional + Exponent of power law bias, any positive or negative real number. + + Returns + ------- + a : cupy.ndarray (..., n) + The transformed output array, which is real, periodic, uniformly + logarithmically spaced, and of the same shape as the input array. + + See Also + -------- + :func:`scipy.special.ifht` + :func:`scipy.special.fhtoffset` : Return an optimal offset for `fht`. + + """ + + # size of transform + n = A.shape[-1] + + # bias input array + if bias != 0: + # A_q(k) = A(k) (k/k_c)^{q} (k_c r_c)^{q} + j_c = (n - 1) / 2 + j = cupy.arange(n) + A = A * cupy.exp(bias * ((j - j_c) * dln + offset)) + + # compute FHT coefficients + u = fhtcoeff(n, dln, mu, offset=offset, bias=bias) + + # transform + a = _fhtq(A, u, inverse=True) + + # bias output array + if bias != 0: + # a(r) = a_q(r) (r/r_c)^{q} + a /= cupy.exp(-bias * (j - j_c) * dln) + + return a + + +def fhtcoeff(n, dln, mu, offset=0.0, bias=0.0): + '''Compute the coefficient array for a fast Hankel transform. + ''' + + lnkr, q = offset, bias + + # Hankel transform coefficients + # u_m = (kr)^{-i 2m pi/(n dlnr)} U_mu(q + i 2m pi/(n dlnr)) + # with U_mu(x) = 2^x Gamma((mu+1+x)/2)/Gamma((mu+1-x)/2) + xp = (mu + 1 + q)/2 + xm = (mu + 1 - q)/2 + y = cupy.linspace(0, math.pi * (n // 2) / (n * dln), n // 2 + 1) + u = cupy.empty(n // 2 + 1, dtype=complex) + v = cupy.empty(n // 2 + 1, dtype=complex) + u.imag[:] = y + u.real[:] = xm + loggamma(u, out=v) + u.real[:] = xp + loggamma(u, out=u) + y *= 2 * (LN_2 - lnkr) + u.real -= v.real + u.real += LN_2 * q + u.imag += v.imag + u.imag += y + cupy.exp(u, out=u) + + # fix last coefficient to be real + u.imag[-1] = 0 + + # deal with special cases + if not cupy.isfinite(u[0]): + # write u_0 = 2^q Gamma(xp)/Gamma(xm) = 2^q poch(xm, xp-xm) + # poch() handles special cases for negative integers correctly + u[0] = 2**q * poch(xm, xp - xm) + # the coefficient may be inf or 0, meaning the transform or the + # inverse transform, respectively, is singular + + return u + + +def _fhtq(a, u, inverse=False): + '''Compute the biased fast Hankel transform. + + This is the basic FFTLog routine. + ''' + + # size of transform + n = a.shape[-1] + + # check for singular transform or singular inverse transform + if cupy.isinf(u[0]) and not inverse: + warn('singular transform; consider changing the bias') + # fix coefficient to obtain (potentially correct) transform anyway + u = u.copy() + u[0] = 0 + elif u[0] == 0 and inverse: + warn('singular inverse transform; consider changing the bias') + # fix coefficient to obtain (potentially correct) inverse anyway + u = u.copy() + u[0] = cupy.inf + + # biased fast Hankel transform via real FFT + A = _fft.rfft(a, axis=-1) + if not inverse: + # forward transform + A *= u + else: + # backward transform + A /= u.conj() + A = _fft.irfft(A, n, axis=-1) + A = A[..., ::-1] + + return A diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..36a445735e9588ca9b7359444c3837d3c36cfb1a --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__init__.py @@ -0,0 +1,21 @@ +# Univariate Interpolation +from cupyx.scipy.interpolate._polyint import BarycentricInterpolator # NOQA +from cupyx.scipy.interpolate._polyint import KroghInterpolator # NOQA +from cupyx.scipy.interpolate._polyint import barycentric_interpolate # NOQA +from cupyx.scipy.interpolate._polyint import krogh_interpolate # NOQA +from cupyx.scipy.interpolate._interpolate import PPoly, BPoly, NdPPoly # NOQA +from cupyx.scipy.interpolate._cubic import ( # NOQA + CubicHermiteSpline, PchipInterpolator, pchip_interpolate, # NOQA + Akima1DInterpolator) # NOQA + +# 1-D Splines +from cupyx.scipy.interpolate._bspline import BSpline, splantider, splder # NOQA +from cupyx.scipy.interpolate._bspline2 import make_interp_spline # NOQA + +# Radial basis functions +from cupyx.scipy.interpolate._rbfinterp import RBFInterpolator # NOQA +from cupyx.scipy.interpolate._rgi import RegularGridInterpolator # NOQA +from cupyx.scipy.interpolate._rgi import interpn # NOQA + +# Backward compatibility +pchip = PchipInterpolator # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c1beb7e40a60eaf9fb9b88b65dbbd31fee93bc36 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_bspline.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_bspline.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dee7c7e6a74efd11c9323dffb8ae28b6d760e4d2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_bspline.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_bspline2.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_bspline2.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..20ec65541cc60198ba36e46768399eeff161c810 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_bspline2.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e5e5db91b635820cc542a7bb4b3d8f7ac1403f71 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_cubic.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..56ba240115d2077f4827f927b01918d1aeb27ecd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_interpolate.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..12a5731a29c39fb0f0528c7b7b7c8016cbf4a621 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_polyint.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d9890ca8f2ab87f4a79b3d74993e18c0f305acfa Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_rbfinterp.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..00d98342f924c3ff9be390fef0de3d34556c73d2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/__pycache__/_rgi.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_bspline.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_bspline.py new file mode 100644 index 0000000000000000000000000000000000000000..853101ac75ce6450f7d483d91971e17efe57bbde --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_bspline.py @@ -0,0 +1,943 @@ + +import operator + +import cupy +from cupy._core import internal +from cupy._core._scalar import get_typename + +from cupyx.scipy.sparse import csr_matrix + +import numpy as np + +TYPES = ['double', 'thrust::complex'] +INT_TYPES = ['int', 'long long'] + +INTERVAL_KERNEL = r''' +#include +extern "C" { +__global__ void find_interval( + const double* t, const double* x, long long* out, + int k, int n, bool extrapolate, int total_x) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= total_x) { + return; + } + + double xp = *&x[idx]; + double tb = *&t[k]; + double te = *&t[n]; + + if(isnan(xp)) { + out[idx] = -1; + return; + } + + if((xp < tb || xp > te) && !extrapolate) { + out[idx] = -1; + return; + } + + int left = k; + int right = n; + int mid; + bool found = false; + + while(left < right && !found) { + mid = ((right + left) / 2); + if(xp > *&t[mid]) { + left = mid + 1; + } else if (xp < *&t[mid]) { + right = mid - 1; + } else { + found = true; + } + } + + int default_value = left - 1 < k ? k : left - 1; + int result = found ? mid + 1 : default_value + 1; + + while(result != n && xp >= *&t[result]) { + result++; + } + + out[idx] = result - 1; +} +} +''' + +INTERVAL_MODULE = cupy.RawModule( + code=INTERVAL_KERNEL, options=('-std=c++11',),) +# name_expressions=[f'find_interval<{type_name}>' for type_name in TYPES]) + + +D_BOOR_KERNEL = r''' +#include +#include +#define COMPUTE_LINEAR 0x1 + +template +__global__ void d_boor( + const double* t, const T* c, const int k, const int mu, + const double* x, const long long* intervals, T* out, + double* temp, int num_c, int mode, int num_x) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + + if(idx >= num_x) { + return; + } + + double xp = *&x[idx]; + long long interval = *&intervals[idx]; + + double* h = temp + idx * (2 * k + 1); + double* hh = h + k + 1; + + int ind, j, n; + double xa, xb, w; + + if(mode == COMPUTE_LINEAR && interval < 0) { + for(j = 0; j < num_c; j++) { + out[num_c * idx + j] = CUDART_NAN; + } + return; + } + + /* + * Perform k-m "standard" deBoor iterations + * so that h contains the k+1 non-zero values of beta_{ell,k-m}(x) + * needed to calculate the remaining derivatives. + */ + h[0] = 1.0; + for (j = 1; j <= k - mu; j++) { + for(int p = 0; p < j; p++) { + hh[p] = h[p]; + } + h[0] = 0.0; + for (n = 1; n <= j; n++) { + ind = interval + n; + xb = t[ind]; + xa = t[ind - j]; + if (xb == xa) { + h[n] = 0.0; + continue; + } + w = hh[n - 1]/(xb - xa); + h[n - 1] += w*(xb - xp); + h[n] = w*(xp - xa); + } + } + + /* + * Now do m "derivative" recursions + * to convert the values of beta into the mth derivative + */ + for (j = k - mu + 1; j <= k; j++) { + for(int p = 0; p < j; p++) { + hh[p] = h[p]; + } + h[0] = 0.0; + for (n = 1; n <= j; n++) { + ind = interval + n; + xb = t[ind]; + xa = t[ind - j]; + if (xb == xa) { + h[mu] = 0.0; + continue; + } + w = ((double) j) * hh[n - 1]/(xb - xa); + h[n - 1] -= w; + h[n] = w; + } + } + + if(mode != COMPUTE_LINEAR) { + return; + } + + // Compute linear combinations + for(j = 0; j < num_c; j++) { + out[num_c * idx + j] = 0; + for(n = 0; n < k + 1; n++) { + out[num_c * idx + j] = ( + out[num_c * idx + j] + + c[(interval + n - k) * num_c + j] * ((T) h[n])); + } + } + +} +''' + +D_BOOR_MODULE = cupy.RawModule( + code=D_BOOR_KERNEL, options=('-std=c++11',), + name_expressions=[f'd_boor<{type_name}>' for type_name in TYPES]) + + +DESIGN_MAT_KERNEL = r''' +#include + +template +__global__ void compute_design_matrix( + const int k, const long long* intervals, double* bspline_basis, + double* data, U* indices, int num_intervals) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= num_intervals) { + return; + } + + long long interval = *&intervals[idx]; + + double* work = bspline_basis + idx * (2 * k + 1); + + for(int j = 0; j <= k; j++) { + int m = (k + 1) * idx + j; + data[m] = work[j]; + indices[m] = (U) (interval - k + j); + } +} +''' + +DESIGN_MAT_MODULE = cupy.RawModule( + code=DESIGN_MAT_KERNEL, options=('-std=c++11',), + name_expressions=[f'compute_design_matrix<{itype}>' + for itype in INT_TYPES]) + + +def _get_module_func(module, func_name, *template_args): + def _get_typename(dtype): + typename = get_typename(dtype) + if dtype.kind == 'c': + typename = 'thrust::' + typename + return typename + args_dtypes = [_get_typename(arg.dtype) for arg in template_args] + template = ', '.join(args_dtypes) + kernel_name = f'{func_name}<{template}>' if template_args else func_name + kernel = module.get_function(kernel_name) + return kernel + + +def _get_dtype(dtype): + """Return np.complex128 for complex dtypes, np.float64 otherwise.""" + if cupy.issubdtype(dtype, cupy.complexfloating): + return cupy.complex_ + else: + return cupy.float_ + + +def _as_float_array(x, check_finite=False): + """Convert the input into a C contiguous float array. + NB: Upcasts half- and single-precision floats to double precision. + """ + x = cupy.ascontiguousarray(x) + dtyp = _get_dtype(x.dtype) + x = x.astype(dtyp, copy=False) + if check_finite and not cupy.isfinite(x).all(): + raise ValueError("Array must not contain infs or nans.") + return x + + +def _evaluate_spline(t, c, k, xp, nu, extrapolate, out): + """ + Evaluate a spline in the B-spline basis. + + Parameters + ---------- + t : ndarray, shape (n+k+1) + knots + c : ndarray, shape (n, m) + B-spline coefficients + xp : ndarray, shape (s,) + Points to evaluate the spline at. + nu : int + Order of derivative to evaluate. + extrapolate : int, optional + Whether to extrapolate to ouf-of-bounds points, or to return NaNs. + out : ndarray, shape (s, m) + Computed values of the spline at each of the input points. + This argument is modified in-place. + """ + n = t.shape[0] - k - 1 + intervals = cupy.empty_like(xp, dtype=cupy.int64) + + # Compute intervals for each value + interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval') + interval_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,), + (t, xp, intervals, k, n, extrapolate, xp.shape[0])) + + # Compute interpolation + num_c = int(np.prod(c.shape[1:])) + temp = cupy.empty(xp.shape[0] * (2 * k + 1)) + d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', c) + d_boor_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,), + (t, c, k, nu, xp, intervals, out, temp, num_c, 1, + xp.shape[0])) + + +def _make_design_matrix(x, t, k, extrapolate, indices): + """ + Returns a design matrix in CSR format. + Note that only indices is passed, but not indptr because indptr is already + precomputed in the calling Python function design_matrix. + + Parameters + ---------- + x : array_like, shape (n,) + Points to evaluate the spline at. + t : array_like, shape (nt,) + Sorted 1D array of knots. + k : int + B-spline degree. + extrapolate : bool, optional + Whether to extrapolate to ouf-of-bounds points. + indices : ndarray, shape (n * (k + 1),) + Preallocated indices of the final CSR array. + Returns + ------- + data + The data array of a CSR array of the b-spline design matrix. + In each row all the basis elements are evaluated at the certain point + (first row - x[0], ..., last row - x[-1]). + + indices + The indices array of a CSR array of the b-spline design matrix. + """ + n = t.shape[0] - k - 1 + intervals = cupy.empty_like(x, dtype=cupy.int64) + + # Compute intervals for each value + interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval') + interval_kernel(((x.shape[0] + 128 - 1) // 128,), (128,), + (t, x, intervals, k, n, extrapolate, x.shape[0])) + + # Compute interpolation + bspline_basis = cupy.empty(x.shape[0] * (2 * k + 1)) + d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', x) + d_boor_kernel(((x.shape[0] + 128 - 1) // 128,), (128,), + (t, None, k, 0, x, intervals, None, bspline_basis, 0, 0, + x.shape[0])) + + data = cupy.zeros(x.shape[0] * (k + 1), dtype=cupy.float64) + design_mat_kernel = _get_module_func( + DESIGN_MAT_MODULE, 'compute_design_matrix', indices) + design_mat_kernel(((x.shape[0] + 128 - 1) // 128,), (128,), + (k, intervals, bspline_basis, data, indices, + x.shape[0])) + + return data, indices + + +def splder(tck, n=1): + """ + Compute the spline representation of the derivative of a given spline + + Parameters + ---------- + tck : tuple of (t, c, k) + Spline whose derivative to compute + n : int, optional + Order of derivative to evaluate. Default: 1 + + Returns + ------- + tck_der : tuple of (t2, c2, k2) + Spline of order k2=k-n representing the derivative + of the input spline. + + Notes + ----- + .. seealso:: :class:`scipy.interpolate.splder` + + See Also + -------- + splantider, splev, spalde + """ + if n < 0: + return splantider(tck, -n) + + t, c, k = tck + + if n > k: + raise ValueError(("Order of derivative (n = %r) must be <= " + "order of spline (k = %r)") % (n, tck[2])) + + # Extra axes for the trailing dims of the `c` array: + sh = (slice(None),) + ((None,)*len(c.shape[1:])) + + try: + for j in range(n): + # See e.g. Schumaker, Spline Functions: Basic Theory, Chapter 5 + + # Compute the denominator in the differentiation formula. + # (and append trailing dims, if necessary) + dt = t[k+1:-1] - t[1:-k-1] + dt = dt[sh] + # Compute the new coefficients + c = (c[1:-1-k] - c[:-2-k]) * k / dt + # Pad coefficient array to same size as knots (FITPACK + # convention) + c = cupy.r_[c, np.zeros((k,) + c.shape[1:])] + # Adjust knots + t = t[1:-1] + k -= 1 + except FloatingPointError as e: + raise ValueError(("The spline has internal repeated knots " + "and is not differentiable %d times") % n) from e + + return t, c, k + + +def splantider(tck, n=1): + """ + Compute the spline for the antiderivative (integral) of a given spline. + + Parameters + ---------- + tck : tuple of (t, c, k) + Spline whose antiderivative to compute + n : int, optional + Order of antiderivative to evaluate. Default: 1 + + Returns + ------- + tck_ader : tuple of (t2, c2, k2) + Spline of order k2=k+n representing the antiderivative of the input + spline. + + See Also + -------- + splder, splev, spalde + + Notes + ----- + The `splder` function is the inverse operation of this function. + Namely, ``splder(splantider(tck))`` is identical to `tck`, modulo + rounding error. + + .. seealso:: :class:`scipy.interpolate.splantider` + """ + if n < 0: + return splder(tck, -n) + + t, c, k = tck + + # Extra axes for the trailing dims of the `c` array: + sh = (slice(None),) + (None,)*len(c.shape[1:]) + + for j in range(n): + # This is the inverse set of operations to splder. + + # Compute the multiplier in the antiderivative formula. + dt = t[k+1:] - t[:-k-1] + dt = dt[sh] + # Compute the new coefficients + c = cupy.cumsum(c[:-k-1] * dt, axis=0) / (k + 1) + c = cupy.r_[cupy.zeros((1,) + c.shape[1:]), + c, [c[-1]] * (k+2)] + # New knots + t = cupy.r_[t[0], t, t[-1]] + k += 1 + + return t, c, k + + +class BSpline: + r"""Univariate spline in the B-spline basis. + + .. math:: + S(x) = \sum_{j=0}^{n-1} c_j B_{j, k; t}(x) + + where :math:`B_{j, k; t}` are B-spline basis functions of degree `k` + and knots `t`. + + Parameters + ---------- + t : ndarray, shape (n+k+1,) + knots + c : ndarray, shape (>=n, ...) + spline coefficients + k : int + B-spline degree + extrapolate : bool or 'periodic', optional + whether to extrapolate beyond the base interval, ``t[k] .. t[n]``, + or to return nans. + If True, extrapolates the first and last polynomial pieces of b-spline + functions active on the base interval. + If 'periodic', periodic extrapolation is used. + Default is True. + axis : int, optional + Interpolation axis. Default is zero. + + Attributes + ---------- + t : ndarray + knot vector + c : ndarray + spline coefficients + k : int + spline degree + extrapolate : bool + If True, extrapolates the first and last polynomial pieces of b-spline + functions active on the base interval. + axis : int + Interpolation axis. + tck : tuple + A read-only equivalent of ``(self.t, self.c, self.k)`` + + Notes + ----- + B-spline basis elements are defined via + + .. math:: + B_{i, 0}(x) = 1, \textrm{if $t_i \le x < t_{i+1}$, otherwise $0$,} + + B_{i, k}(x) = \frac{x - t_i}{t_{i+k} - t_i} B_{i, k-1}(x) + + \frac{t_{i+k+1} - x}{t_{i+k+1} - t_{i+1}} B_{i+1, k-1}(x) + + **Implementation details** + + - At least ``k+1`` coefficients are required for a spline of degree `k`, + so that ``n >= k+1``. Additional coefficients, ``c[j]`` with + ``j > n``, are ignored. + + - B-spline basis elements of degree `k` form a partition of unity on the + *base interval*, ``t[k] <= x <= t[n]``. + + - Based on [1]_ and [2]_ + + .. seealso:: :class:`scipy.interpolate.BSpline` + + References + ---------- + .. [1] Tom Lyche and Knut Morken, Spline methods, + http://www.uio.no/studier/emner/matnat/ifi/INF-MAT5340/v05/undervisningsmateriale/ + .. [2] Carl de Boor, A practical guide to splines, Springer, 2001. + """ + + def __init__(self, t, c, k, extrapolate=True, axis=0): + self.k = operator.index(k) + self.c = cupy.asarray(c) + self.t = cupy.ascontiguousarray(t, dtype=cupy.float64) + + if extrapolate == 'periodic': + self.extrapolate = extrapolate + else: + self.extrapolate = bool(extrapolate) + + n = self.t.shape[0] - self.k - 1 + + axis = internal._normalize_axis_index(axis, self.c.ndim) + + # Note that the normalized axis is stored in the object. + self.axis = axis + if axis != 0: + # roll the interpolation axis to be the first one in self.c + # More specifically, the target shape for self.c is (n, ...), + # and axis !=0 means that we have c.shape (..., n, ...) + # ^ + # axis + self.c = cupy.moveaxis(self.c, axis, 0) + + if k < 0: + raise ValueError("Spline order cannot be negative.") + if self.t.ndim != 1: + raise ValueError("Knot vector must be one-dimensional.") + if n < self.k + 1: + raise ValueError("Need at least %d knots for degree %d" % + (2*k + 2, k)) + if (cupy.diff(self.t) < 0).any(): + raise ValueError("Knots must be in a non-decreasing order.") + if len(cupy.unique(self.t[k:n+1])) < 2: + raise ValueError("Need at least two internal knots.") + if not cupy.isfinite(self.t).all(): + raise ValueError("Knots should not have nans or infs.") + if self.c.ndim < 1: + raise ValueError("Coefficients must be at least 1-dimensional.") + if self.c.shape[0] < n: + raise ValueError( + "Knots, coefficients and degree are inconsistent.") + + dt = _get_dtype(self.c.dtype) + self.c = cupy.ascontiguousarray(self.c, dtype=dt) + + @classmethod + def construct_fast(cls, t, c, k, extrapolate=True, axis=0): + """Construct a spline without making checks. + Accepts same parameters as the regular constructor. Input arrays + `t` and `c` must of correct shape and dtype. + """ + self = object.__new__(cls) + self.t, self.c, self.k = t, c, k + self.extrapolate = extrapolate + self.axis = axis + return self + + @property + def tck(self): + """Equivalent to ``(self.t, self.c, self.k)`` (read-only). + """ + return self.t, self.c, self.k + + @classmethod + def basis_element(cls, t, extrapolate=True): + """Return a B-spline basis element ``B(x | t[0], ..., t[k+1])``. + + Parameters + ---------- + t : ndarray, shape (k+2,) + internal knots + extrapolate : bool or 'periodic', optional + whether to extrapolate beyond the base interval, + ``t[0] .. t[k+1]``, or to return nans. + If 'periodic', periodic extrapolation is used. + Default is True. + + Returns + ------- + basis_element : callable + A callable representing a B-spline basis element for the knot + vector `t`. + + Notes + ----- + The degree of the B-spline, `k`, is inferred from the length of `t` as + ``len(t)-2``. The knot vector is constructed by appending and + prepending ``k+1`` elements to internal knots `t`. + + .. seealso:: :class:`scipy.interpolate.BSpline` + """ + k = len(t) - 2 + t = _as_float_array(t) + t = cupy.r_[(t[0]-1,) * k, t, (t[-1]+1,) * k] + c = cupy.zeros_like(t) + c[k] = 1. + return cls.construct_fast(t, c, k, extrapolate) + + @classmethod + def design_matrix(cls, x, t, k, extrapolate=False): + """ + Returns a design matrix as a CSR format sparse array. + + Parameters + ---------- + x : array_like, shape (n,) + Points to evaluate the spline at. + t : array_like, shape (nt,) + Sorted 1D array of knots. + k : int + B-spline degree. + extrapolate : bool or 'periodic', optional + Whether to extrapolate based on the first and last intervals + or raise an error. If 'periodic', periodic extrapolation is used. + Default is False. + + Returns + ------- + design_matrix : `csr_matrix` object + Sparse matrix in CSR format where each row contains all the basis + elements of the input row (first row = basis elements of x[0], + ..., last row = basis elements x[-1]). + + Notes + ----- + In each row of the design matrix all the basis elements are evaluated + at the certain point (first row - x[0], ..., last row - x[-1]). + `nt` is a length of the vector of knots: as far as there are + `nt - k - 1` basis elements, `nt` should be not less than `2 * k + 2` + to have at least `k + 1` basis element. + + Out of bounds `x` raises a ValueError. + + .. note:: + This method returns a `csr_matrix` instance as CuPy still does not + have `csr_array`. + + .. seealso:: :class:`scipy.interpolate.BSpline` + """ + x = _as_float_array(x, True) + t = _as_float_array(t, True) + + if extrapolate != 'periodic': + extrapolate = bool(extrapolate) + + if k < 0: + raise ValueError("Spline order cannot be negative.") + if t.ndim != 1 or np.any(t[1:] < t[:-1]): + raise ValueError(f"Expect t to be a 1-D sorted array_like, but " + f"got t={t}.") + # There are `nt - k - 1` basis elements in a BSpline built on the + # vector of knots with length `nt`, so to have at least `k + 1` basis + # elements we need to have at least `2 * k + 2` elements in the vector + # of knots. + if len(t) < 2 * k + 2: + raise ValueError(f"Length t is not enough for k={k}.") + + if extrapolate == 'periodic': + # With periodic extrapolation we map x to the segment + # [t[k], t[n]]. + n = t.size - k - 1 + x = t[k] + (x - t[k]) % (t[n] - t[k]) + extrapolate = False + elif not extrapolate and ( + (min(x) < t[k]) or (max(x) > t[t.shape[0] - k - 1]) + ): + # Checks from `find_interval` function + raise ValueError(f'Out of bounds w/ x = {x}.') + + # Compute number of non-zeros of final CSR array in order to determine + # the dtype of indices and indptr of the CSR array. + n = x.shape[0] + nnz = n * (k + 1) + if nnz < cupy.iinfo(cupy.int32).max: + int_dtype = cupy.int32 + else: + int_dtype = cupy.int64 + # Preallocate indptr and indices + indices = cupy.empty(n * (k + 1), dtype=int_dtype) + indptr = cupy.arange(0, (n + 1) * (k + 1), k + 1, dtype=int_dtype) + + # indptr is not passed to CUDA as it is already fully computed + data, indices = _make_design_matrix( + x, t, k, extrapolate, indices + ) + return csr_matrix( + (data, indices, indptr), + shape=(x.shape[0], t.shape[0] - k - 1) + ) + + def __call__(self, x, nu=0, extrapolate=None): + """ + Evaluate a spline function. + + Parameters + ---------- + x : array_like + points to evaluate the spline at. + nu : int, optional + derivative to evaluate (default is 0). + extrapolate : bool or 'periodic', optional + whether to extrapolate based on the first and last intervals + or return nans. If 'periodic', periodic extrapolation is used. + Default is `self.extrapolate`. + + Returns + ------- + y : array_like + Shape is determined by replacing the interpolation axis + in the coefficient array with the shape of `x`. + """ + if extrapolate is None: + extrapolate = self.extrapolate + + x = cupy.asarray(x) + x_shape, x_ndim = x.shape, x.ndim + x = cupy.ascontiguousarray(cupy.ravel(x), dtype=cupy.float64) + + # With periodic extrapolation we map x to the segment + # [self.t[k], self.t[n]]. + if extrapolate == 'periodic': + n = self.t.size - self.k - 1 + x = self.t[self.k] + (x - self.t[self.k]) % (self.t[n] - + self.t[self.k]) + extrapolate = False + + out = cupy.empty( + (len(x), int(np.prod(self.c.shape[1:]))), dtype=self.c.dtype) + + self._evaluate(x, nu, extrapolate, out) + out = out.reshape(x_shape + self.c.shape[1:]) + if self.axis != 0: + # transpose to move the calculated values to the interpolation axis + dim_order = list(range(out.ndim)) + dim_order = ( + dim_order[x_ndim:x_ndim+self.axis] + + dim_order[:x_ndim] + + dim_order[x_ndim+self.axis:]) + out = out.transpose(dim_order) + + return out + + def _ensure_c_contiguous(self): + if not self.t.flags.c_contiguous: + self.t = self.t.copy() + if not self.c.flags.c_contiguous: + self.c = self.c.copy() + + def _evaluate(self, xp, nu, extrapolate, out): + _evaluate_spline(self.t, self.c.reshape(self.c.shape[0], -1), + self.k, xp, nu, extrapolate, out) + + def derivative(self, nu=1): + """ + Return a B-spline representing the derivative. + + Parameters + ---------- + nu : int, optional + Derivative order. + Default is 1. + + Returns + ------- + b : BSpline object + A new instance representing the derivative. + + See Also + -------- + splder, splantider + """ + c = self.c + # pad the c array if needed + ct = len(self.t) - len(c) + if ct > 0: + c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])] + tck = splder((self.t, c, self.k), nu) + return self.construct_fast(*tck, extrapolate=self.extrapolate, + axis=self.axis) + + def antiderivative(self, nu=1): + """ + Return a B-spline representing the antiderivative. + + Parameters + ---------- + nu : int, optional + Antiderivative order. Default is 1. + + Returns + ------- + b : BSpline object + A new instance representing the antiderivative. + + Notes + ----- + If antiderivative is computed and ``self.extrapolate='periodic'``, + it will be set to False for the returned instance. This is done because + the antiderivative is no longer periodic and its correct evaluation + outside of the initially given x interval is difficult. + + See Also + -------- + splder, splantider + """ + c = self.c + # pad the c array if needed + ct = len(self.t) - len(c) + if ct > 0: + c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])] + tck = splantider((self.t, c, self.k), nu) + + if self.extrapolate == 'periodic': + extrapolate = False + else: + extrapolate = self.extrapolate + + return self.construct_fast(*tck, extrapolate=extrapolate, + axis=self.axis) + + def integrate(self, a, b, extrapolate=None): + """ + Compute a definite integral of the spline. + + Parameters + ---------- + a : float + Lower limit of integration. + b : float + Upper limit of integration. + extrapolate : bool or 'periodic', optional + whether to extrapolate beyond the base interval, + ``t[k] .. t[-k-1]``, or take the spline to be zero outside of the + base interval. If 'periodic', periodic extrapolation is used. + If None (default), use `self.extrapolate`. + + Returns + ------- + I : array_like + Definite integral of the spline over the interval ``[a, b]``. + """ + if extrapolate is None: + extrapolate = self.extrapolate + + # Prepare self.t and self.c. + self._ensure_c_contiguous() + + # Swap integration bounds if needed. + sign = 1 + if b < a: + a, b = b, a + sign = -1 + n = self.t.size - self.k - 1 + + if extrapolate != "periodic" and not extrapolate: + # Shrink the integration interval, if needed. + a = max(a, self.t[self.k].item()) + b = min(b, self.t[n].item()) + + # if self.c.ndim == 1: + # # Fast path: use FITPACK's routine + # # (cf _fitpack_impl.splint). + # integral = splint(a, b, self.tck) + # return integral * sign + + out = cupy.empty( + (2, int(np.prod(self.c.shape[1:]))), dtype=self.c.dtype) + + # Compute the antiderivative. + c = self.c + ct = len(self.t) - len(c) + if ct > 0: + c = cupy.r_[c, cupy.zeros((ct,) + c.shape[1:])] + ta, ca, ka = splantider((self.t, c, self.k), 1) + + if extrapolate == 'periodic': + # Split the integral into the part over period (can be several + # of them) and the remaining part. + + ts, te = self.t[self.k], self.t[n] + period = te - ts + interval = b - a + n_periods, left = divmod(interval, period) + + if n_periods > 0: + # Evaluate the difference of antiderivatives. + x = cupy.asarray([ts, te], dtype=cupy.float64) + _evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral = out[1] - out[0] + integral *= n_periods + else: + integral = cupy.zeros((1, int(np.prod(self.c.shape[1:]))), + dtype=self.c.dtype) + + # Map a to [ts, te], b is always a + left. + a = ts + (a - ts) % period + b = a + left + + # If b <= te then we need to integrate over [a, b], otherwise + # over [a, te] and from xs to what is remained. + if b <= te: + x = cupy.asarray([a, b], dtype=cupy.float64) + _evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral += out[1] - out[0] + else: + x = cupy.asarray([a, te], dtype=cupy.float64) + _evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral += out[1] - out[0] + + x = cupy.asarray([ts, ts + b - te], dtype=cupy.float64) + _evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, False, out) + integral += out[1] - out[0] + else: + # Evaluate the difference of antiderivatives. + x = cupy.asarray([a, b], dtype=cupy.float64) + _evaluate_spline(ta, ca.reshape(ca.shape[0], -1), + ka, x, 0, extrapolate, out) + integral = out[1] - out[0] + + integral *= sign + return integral.reshape(ca.shape[1:]) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_bspline2.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_bspline2.py new file mode 100644 index 0000000000000000000000000000000000000000..bacf5df964467f0948d9ae8080fae330ecfc0a9b --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_bspline2.py @@ -0,0 +1,568 @@ +import operator + +import numpy +if numpy.__version__ < '2': + from numpy.core.multiarray import normalize_axis_index +else: + from numpy.lib.array_utils import normalize_axis_index + +import cupy +from cupyx.scipy import sparse +from cupyx.scipy.sparse.linalg import spsolve + +from cupyx.scipy.interpolate._bspline import ( + _get_module_func, INTERVAL_MODULE, D_BOOR_MODULE, BSpline) + + +def _get_dtype(dtype): + """Return np.complex128 for complex dtypes, np.float64 otherwise.""" + if cupy.issubdtype(dtype, cupy.complexfloating): + return cupy.complex_ + else: + return cupy.float_ + + +def _as_float_array(x, check_finite=False): + """Convert the input into a C contiguous float array. + + NB: Upcasts half- and single-precision floats to double precision. + """ + x = cupy.asarray(x) + x = cupy.ascontiguousarray(x) + dtyp = _get_dtype(x.dtype) + x = x.astype(dtyp, copy=False) + if check_finite and not cupy.isfinite(x).all(): + raise ValueError("Array must not contain infs or nans.") + return x + + +# vendored from scipy/_lib/_util.py +def prod(iterable): + """ + Product of a sequence of numbers. + Faster than np.prod for short lists like array shapes, and does + not overflow if using Python integers. + """ + product = 1 + for x in iterable: + product *= x + return product + + +################################# +# Interpolating spline helpers # +################################# + +def _not_a_knot(x, k): + """Given data x, construct the knot vector w/ not-a-knot BC. + cf de Boor, XIII(12).""" + x = cupy.asarray(x) + if k % 2 != 1: + raise ValueError("Odd degree for now only. Got %s." % k) + + m = (k - 1) // 2 + t = x[m+1:-m-1] + t = cupy.r_[(x[0],)*(k+1), t, (x[-1],)*(k+1)] + return t + + +def _augknt(x, k): + """Construct a knot vector appropriate for the order-k interpolation.""" + return cupy.r_[(x[0],)*k, x, (x[-1],)*k] + + +def _periodic_knots(x, k): + """Returns vector of nodes on a circle.""" + xc = cupy.copy(x) + n = len(xc) + if k % 2 == 0: + dx = cupy.diff(xc) + xc[1: -1] -= dx[:-1] / 2 + dx = cupy.diff(xc) + t = cupy.zeros(n + 2 * k) + t[k: -k] = xc + for i in range(0, k): + # filling first `k` elements in descending order + t[k - i - 1] = t[k - i] - dx[-(i % (n - 1)) - 1] + # filling last `k` elements in ascending order + t[-k + i] = t[-k + i - 1] + dx[i % (n - 1)] + return t + + +def _convert_string_aliases(deriv, target_shape): + if isinstance(deriv, str): + if deriv == "clamped": + deriv = [(1, cupy.zeros(target_shape))] + elif deriv == "natural": + deriv = [(2, cupy.zeros(target_shape))] + else: + raise ValueError("Unknown boundary condition : %s" % deriv) + return deriv + + +def _process_deriv_spec(deriv): + if deriv is not None: + try: + ords, vals = zip(*deriv) + except TypeError as e: + msg = ("Derivatives, `bc_type`, should be specified as a pair of " + "iterables of pairs of (order, value).") + raise ValueError(msg) from e + else: + ords, vals = [], [] + return cupy.atleast_1d(ords, vals) + + +def make_interp_spline(x, y, k=3, t=None, bc_type=None, axis=0, + check_finite=True): + """Compute the (coefficients of) interpolating B-spline. + + Parameters + ---------- + x : array_like, shape (n,) + Abscissas. + y : array_like, shape (n, ...) + Ordinates. + k : int, optional + B-spline degree. Default is cubic, ``k = 3``. + t : array_like, shape (nt + k + 1,), optional. + Knots. + The number of knots needs to agree with the number of data points and + the number of derivatives at the edges. Specifically, ``nt - n`` must + equal ``len(deriv_l) + len(deriv_r)``. + bc_type : 2-tuple or None + Boundary conditions. + Default is None, which means choosing the boundary conditions + automatically. Otherwise, it must be a length-two tuple where the first + element (``deriv_l``) sets the boundary conditions at ``x[0]`` and + the second element (``deriv_r``) sets the boundary conditions at + ``x[-1]``. Each of these must be an iterable of pairs + ``(order, value)`` which gives the values of derivatives of specified + orders at the given edge of the interpolation interval. + Alternatively, the following string aliases are recognized: + + * ``"clamped"``: The first derivatives at the ends are zero. This is + equivalent to ``bc_type=([(1, 0.0)], [(1, 0.0)])``. + * ``"natural"``: The second derivatives at ends are zero. This is + equivalent to ``bc_type=([(2, 0.0)], [(2, 0.0)])``. + * ``"not-a-knot"`` (default): The first and second segments are the + same polynomial. This is equivalent to having ``bc_type=None``. + * ``"periodic"``: The values and the first ``k-1`` derivatives at the + ends are equivalent. + + axis : int, optional + Interpolation axis. Default is 0. + check_finite : bool, optional + Whether to check that the input arrays contain only finite numbers. + Disabling may give a performance gain, but may result in problems + (crashes, non-termination) if the inputs do contain infinities or NaNs. + Default is True. + + Returns + ------- + b : a BSpline object of the degree ``k`` and with knots ``t``. + + """ + # convert string aliases for the boundary conditions + if bc_type is None or bc_type == 'not-a-knot' or bc_type == 'periodic': + deriv_l, deriv_r = None, None + elif isinstance(bc_type, str): + deriv_l, deriv_r = bc_type, bc_type + else: + try: + deriv_l, deriv_r = bc_type + except TypeError as e: + raise ValueError("Unknown boundary condition: %s" % bc_type) from e + + y = cupy.asarray(y) + + axis = normalize_axis_index(axis, y.ndim) + + x = _as_float_array(x, check_finite) + y = _as_float_array(y, check_finite) + + y = cupy.moveaxis(y, axis, 0) # now internally interp axis is zero + + # sanity check the input + if bc_type == 'periodic' and not cupy.allclose(y[0], y[-1], atol=1e-15): + raise ValueError("First and last points does not match while " + "periodic case expected") + if x.size != y.shape[0]: + raise ValueError('Shapes of x {} and y {} are incompatible' + .format(x.shape, y.shape)) + if (x[1:] == x[:-1]).any(): + raise ValueError("Expect x to not have duplicates") + if x.ndim != 1 or (x[1:] < x[:-1]).any(): + raise ValueError("Expect x to be a 1D strictly increasing sequence.") + + # special-case k=0 right away + if k == 0: + if any(_ is not None for _ in (t, deriv_l, deriv_r)): + raise ValueError("Too much info for k=0: t and bc_type can only " + "be None.") + t = cupy.r_[x, x[-1]] + c = cupy.asarray(y) + c = cupy.ascontiguousarray(c, dtype=_get_dtype(c.dtype)) + return BSpline.construct_fast(t, c, k, axis=axis) + + # special-case k=1 (e.g., Lyche and Morken, Eq.(2.16)) + if k == 1 and t is None: + if not (deriv_l is None and deriv_r is None): + raise ValueError( + "Too much info for k=1: bc_type can only be None.") + t = cupy.r_[x[0], x, x[-1]] + c = cupy.asarray(y) + c = cupy.ascontiguousarray(c, dtype=_get_dtype(c.dtype)) + return BSpline.construct_fast(t, c, k, axis=axis) + + k = operator.index(k) + + if bc_type == 'periodic' and t is not None: + raise NotImplementedError("For periodic case t is constructed " + "automatically and can not be passed " + "manually") + + # come up with a sensible knot vector, if needed + if t is None: + if deriv_l is None and deriv_r is None: + if bc_type == 'periodic': + t = _periodic_knots(x, k) + elif k == 2: + # OK, it's a bit ad hoc: Greville sites + omit + # 2nd and 2nd-to-last points, a la not-a-knot + t = (x[1:] + x[:-1]) / 2. + t = cupy.r_[(x[0],)*(k+1), + t[1:-1], + (x[-1],)*(k+1)] + else: + t = _not_a_knot(x, k) + else: + t = _augknt(x, k) + + t = _as_float_array(t, check_finite) + + if k < 0: + raise ValueError("Expect non-negative k.") + if t.ndim != 1 or (t[1:] < t[:-1]).any(): + raise ValueError("Expect t to be a 1-D sorted array_like.") + if t.size < x.size + k + 1: + raise ValueError('Got %d knots, need at least %d.' % + (t.size, x.size + k + 1)) + if (x[0] < t[k]) or (x[-1] > t[-k]): + raise ValueError('Out of bounds w/ x = %s.' % x) + + if bc_type == 'periodic': + return _make_periodic_spline(x, y, t, k, axis) + + # Here : deriv_l, r = [(nu, value), ...] + deriv_l = _convert_string_aliases(deriv_l, y.shape[1:]) + deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l) + nleft = deriv_l_ords.shape[0] + + deriv_r = _convert_string_aliases(deriv_r, y.shape[1:]) + deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r) + nright = deriv_r_ords.shape[0] + + # have `n` conditions for `nt` coefficients; need nt-n derivatives + n = x.size + nt = t.size - k - 1 + + if nt - n != nleft + nright: + raise ValueError("The number of derivatives at boundaries does not " + "match: expected %s, got %s + %s" % + (nt-n, nleft, nright)) + + # bail out if the `y` array is zero-sized + if y.size == 0: + c = cupy.zeros((nt,) + y.shape[1:], dtype=float) + return BSpline.construct_fast(t, c, k, axis=axis) + + # Construct the colocation matrix of b-splines + boundary conditions. + # The coefficients of the interpolating B-spline function are the solution + # of the linear system `A @ c = rhs` where `A` is the colocation matrix + # (i.e., each row of A corresponds to a data point in the `x` array and + # contains b-splines which are non-zero at this value of x) + # Each boundary condition is a fixed value of a certain derivative + # at the edge, so each derivative adds a row to `A`. + # The `rhs` is the array of data values, `y`, plus derivatives from + # boundary conditions, if any. + # The colocation matrix is banded (has at most k+1 diagonals). Since LAPACK + # linear algebra (?gbsv) is not available, we store it as a CSR array + + # 1. Construct the colocation matrix itself. + matr = BSpline.design_matrix(x, t, k) + + # 2. Boundary conditions: need to augment the design matrix with additional + # rows, one row per derivative at the left and right edges. + # The left-side boundary conditions go to the first rows of the matrix + # and the right-side boundary conditions go to the last rows. + # Will need a python loop for each derivative because in general they + # can be of any order, `m`. + # To compute the derivatives, will invoke the de Boor D kernel. + if nleft > 0 or nright > 0: + # Prepare the I/O arrays for the kernels. We only need the non-zero + # b-splines at x[0] and x[-1], but the kernel wants more arrays which + # we allocate and ignore (mode != 1) + temp = cupy.zeros((2 * k + 1, ), dtype=float) + num_c = 1 + dummy_c = cupy.empty((nt, num_c), dtype=float) + out = cupy.empty((1, 1), dtype=dummy_c.dtype) + + d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', dummy_c) + + # find the intervals for x[0] and x[-1] + intervals_bc = cupy.empty(2, dtype=cupy.int64) + interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval') + interval_kernel((1,), (2,), + (t, cupy.r_[x[0], x[-1]], intervals_bc, k, nt, + False, 2)) + + # 3. B.C.s at x[0] + if nleft > 0: + x0 = cupy.array([x[0]], dtype=x.dtype) + rows = cupy.zeros((nleft, nt), dtype=float) + + for j, m in enumerate(deriv_l_ords): + # place the derivatives of the order m at x[0] into `temp` + d_boor_kernel((1,), (1,), + (t, dummy_c, k, int(m), x0, intervals_bc, + out, # ignore (mode !=1), + temp, # non-zero b-splines + num_c, # the 2nd dimension of `dummy_c`. Ignore. + 0, # mode != 1 => do not touch dummy_c array + 1)) # the length of the `x0` array + left = intervals_bc[0] + rows[j, left-k:left+1] = temp[:k+1] + + matr = sparse.vstack([sparse.csr_matrix(rows), # A[:nleft, :] + matr]) + + # 4. Repeat for B.C.s at x[-1] + if nright > 0: + intervals_bc[0] = intervals_bc[-1] # use the intervals for x[-1] + x0 = cupy.array([x[-1]], dtype=x.dtype) + rows = cupy.zeros((nright, nt), dtype=float) + + for j, m in enumerate(deriv_r_ords): + # place the derivatives of the order m at x[0] into `temp` + d_boor_kernel((1,), (1,), + (t, dummy_c, k, int(m), x0, intervals_bc, + out, # ignore (mode !=1), + temp, # non-zero b-splines + num_c, # the 2nd dimension of `dummy_c`. Ignore. + 0, # mode != 1 => do not touch dummy_c array + 1)) # the length of the `x0` array + left = intervals_bc[0] + rows[j, left-k:left+1] = temp[:k+1] + + matr = sparse.vstack([matr, + sparse.csr_matrix(rows)]) # A[nleft+len(x):, :] + + # 5. Prepare the RHS: `y` values to interpolate (+ derivatives, if any) + extradim = prod(y.shape[1:]) + rhs = cupy.empty((nt, extradim), dtype=y.dtype) + if nleft > 0: + rhs[:nleft] = deriv_l_vals.reshape(-1, extradim) + rhs[nleft:nt - nright] = y.reshape(-1, extradim) + if nright > 0: + rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim) + + # 6. Finally, solve the linear system for the coefficients. + if cupy.issubdtype(rhs.dtype, cupy.complexfloating): + # avoid upcasting the l.h.s. to complex (that doubles the memory) + coef = (spsolve(matr, rhs.real) + + spsolve(matr, rhs.imag) * 1.j) + else: + coef = spsolve(matr, rhs) + coef = cupy.ascontiguousarray(coef.reshape((nt,) + y.shape[1:])) + return BSpline(t, coef, k) + + +def _make_interp_spline_full_matrix(x, y, k, t, bc_type): + """ Construct the interpolating spline spl(x) = y with *full* linalg. + + Only useful for testing, do not call directly! + This version is O(N**2) in memory and O(N**3) in flop count. + """ + # convert string aliases for the boundary conditions + if bc_type is None or bc_type == 'not-a-knot': + deriv_l, deriv_r = None, None + elif isinstance(bc_type, str): + deriv_l, deriv_r = bc_type, bc_type + else: + try: + deriv_l, deriv_r = bc_type + except TypeError as e: + raise ValueError("Unknown boundary condition: %s" % bc_type) from e + + # Here : deriv_l, r = [(nu, value), ...] + deriv_l = _convert_string_aliases(deriv_l, y.shape[1:]) + deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l) + nleft = deriv_l_ords.shape[0] + + deriv_r = _convert_string_aliases(deriv_r, y.shape[1:]) + deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r) + nright = deriv_r_ords.shape[0] + + # have `n` conditions for `nt` coefficients; need nt-n derivatives + n = x.size + nt = t.size - k - 1 + # Here : deriv_l, r = [(nu, value), ...] + deriv_l = _convert_string_aliases(deriv_l, y.shape[1:]) + deriv_l_ords, deriv_l_vals = _process_deriv_spec(deriv_l) + nleft = deriv_l_ords.shape[0] + + deriv_r = _convert_string_aliases(deriv_r, y.shape[1:]) + deriv_r_ords, deriv_r_vals = _process_deriv_spec(deriv_r) + nright = deriv_r_ords.shape[0] + + # have `n` conditions for `nt` coefficients; need nt-n derivatives + n = x.size + nt = t.size - k - 1 + assert nt - n == nleft + nright + + # Construct the colocation matrix of b-splines + boundary conditions. + # The coefficients of the interpolating B-spline function are the solution + # of the linear system `A @ c = rhs` where `A` is the colocation matrix + # (i.e., each row of A corresponds to a data point in the `x` array and + # contains b-splines which are non-zero at this value of x) + # Each boundary condition is a fixed value of a certain derivative + # at the edge, so each derivative adds a row to `A`. + # The `rhs` is the array of data values, `y`, plus derivatives from + # boundary conditions, if any. + + # 1. Compute intervals for each value + intervals = cupy.empty_like(x, dtype=cupy.int64) + interval_kernel = _get_module_func(INTERVAL_MODULE, 'find_interval') + interval_kernel(((x.shape[0] + 128 - 1) // 128,), (128,), + (t, x, intervals, k, nt, False, x.shape[0])) + + # 2. Compute non-zero b-spline basis elements for each value in `x` + # The way de_Boor_D kernel is written, it wants `c` and `out` arrays + # which we do not use (but need to provide to the kernel), and the + # `temp` array contains non-zero b-spline basis elements, which we do want. + dummy_c = cupy.empty((nt, 1), dtype=float) + out = cupy.empty( + (len(x), prod(dummy_c.shape[1:])), dtype=dummy_c.dtype) + + num_c = prod(dummy_c.shape[1:]) + temp = cupy.empty(x.shape[0] * (2 * k + 1)) + d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', dummy_c) + d_boor_kernel(((x.shape[0] + 128 - 1) // 128,), (128,), + (t, dummy_c, k, 0, x, intervals, out, temp, num_c, 0, + x.shape[0])) + + # 3. Construct the colocation matrix. + # For each value in `x`, the `temp` array contains 2k+1 entries : first + # k+1 elements are b-splines, followed by k entries used for work storage + # which we ignore. + # XXX: full matrices! Can / should use banded linear algebra instead. + A = cupy.zeros((nt, nt), dtype=float) + offset = nleft + for j in range(len(x)): + left = intervals[j] + A[j + offset, left-k:left+1] = temp[j*(2*k+1):j*(2*k+1)+k+1] + + # 4. Handle boundary conditions: The colocation matrix is augmented with + # additional rows, one row per derivative at the left and right edges. + # We need a python loop for each derivative because in general they can be + # of any order, `m`. + # The left-side boundary conditions go to the first rows of the matrix + # and the right-side boundary conditions go to the last rows. + intervals_bc = cupy.empty(1, dtype=cupy.int64) + if nleft > 0: + intervals_bc[0] = intervals[0] + x0 = cupy.array([x[0]], dtype=x.dtype) + for j, m in enumerate(deriv_l_ords): + # place the derivatives of the order m at x[0] into `temp` + d_boor_kernel((1,), (1,), + (t, dummy_c, k, int(m), x0, intervals_bc, out, temp, + num_c, 0, 1)) + left = intervals_bc[0] + A[j, left-k:left+1] = temp[:k+1] + + # repeat for the b.c. at the right edge. + if nright > 0: + intervals_bc[0] = intervals[-1] + x0 = cupy.array([x[-1]], dtype=x.dtype) + for j, m in enumerate(deriv_r_ords): + # place the derivatives of the order m at x[0] into `temp` + d_boor_kernel((1,), (1,), + (t, dummy_c, k, int(m), x0, intervals_bc, out, temp, + num_c, 0, 1)) + left = intervals_bc[0] + row = nleft + len(x) + j + A[row, left-k:left+1] = temp[:k+1] + + # 5. Prepare the RHS: `y` values to interpolate (+ derivatives, if any) + extradim = prod(y.shape[1:]) + rhs = cupy.empty((nt, extradim), dtype=y.dtype) + if nleft > 0: + rhs[:nleft] = deriv_l_vals.reshape(-1, extradim) + rhs[nleft:nt - nright] = y.reshape(-1, extradim) + if nright > 0: + rhs[nt - nright:] = deriv_r_vals.reshape(-1, extradim) + + # 6. Finally, solve for the coefficients. + from cupy.linalg import solve + coef = solve(A, rhs) + + coef = cupy.ascontiguousarray(coef.reshape((nt,) + y.shape[1:])) + return BSpline(t, coef, k) + + +def _make_periodic_spline(x, y, t, k, axis): + n = x.size + + # 1. Construct the colocation matrix. + matr = BSpline.design_matrix(x, t, k) + + # 2. Boundary conditions: need to augment the design matrix with additional + # rows, one row per derivative at the left and right edges. + # The k-1 boundary conditions go to the first rows of the matrix + # To compute the derivatives, will invoke the de Boor D kernel. + + # Prepare the I/O arrays for the kernels. We only need the non-zero + # b-splines at x[0] and x[-1], but the kernel wants more arrays which + # we allocate and ignore (mode != 1) + temp = cupy.zeros(2*(2*k+1), dtype=float) + num_c = 1 + dummy_c = cupy.empty((t.size - k - 1, num_c), dtype=float) + out = cupy.empty((2, 1), dtype=dummy_c.dtype) + + d_boor_kernel = _get_module_func(D_BOOR_MODULE, 'd_boor', dummy_c) + + # find the intervals for x[0] and x[-1] + x0 = cupy.r_[x[0], x[-1]] + intervals_bc = cupy.array([k, n + k - 1], dtype=cupy.int64) # match scipy + + # 3. B.C.s + rows = cupy.zeros((k-1, n + k - 1), dtype=float) + + for m in range(k-1): + # place the derivatives of the order m at x[0] into `temp` + d_boor_kernel((1,), (2,), + (t, dummy_c, k, m+1, x0, intervals_bc, + out, # ignore (mode !=1), + temp, # non-zero b-splines + num_c, # the 2nd dimension of `dummy_c`. Ignore. + 0, # mode != 1 => do not touch dummy_c array + 2)) # the length of the `x0` array + rows[m, :k+1] = temp[:k+1] + rows[m, -k:] -= temp[2*k + 1:(2*k + 1) + k+1][:-1] + + matr_csr = sparse.vstack([sparse.csr_matrix(rows), # A[:nleft, :] + matr]) + + # r.h.s. + extradim = prod(y.shape[1:]) + rhs = cupy.empty((n + k - 1, extradim), dtype=float) + rhs[:(k - 1), :] = 0 + rhs[(k - 1):, :] = (y.reshape(n, 0) if y.size == 0 else + y.reshape((-1, extradim))) + + # solve for the coefficients + coef = spsolve(matr_csr, rhs) + coef = cupy.ascontiguousarray(coef.reshape((n + k - 1,) + y.shape[1:])) + return BSpline.construct_fast(t, coef, k, + extrapolate='periodic', axis=axis) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_cubic.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_cubic.py new file mode 100644 index 0000000000000000000000000000000000000000..e839a1abeb673aab5910b0bd9d84be7149e96bb1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_cubic.py @@ -0,0 +1,414 @@ + +import cupy +from cupyx.scipy.interpolate._interpolate import PPoly + + +def _isscalar(x): + """Check whether x is if a scalar type, or 0-dim""" + return cupy.isscalar(x) or hasattr(x, 'shape') and x.shape == () + + +def prepare_input(x, y, axis, dydx=None): + """Prepare input for cubic spline interpolators. + All data are converted to numpy arrays and checked for correctness. + Axes equal to `axis` of arrays `y` and `dydx` are moved to be the 0th + axis. The value of `axis` is converted to lie in + [0, number of dimensions of `y`). + """ + + x, y = map(cupy.asarray, (x, y)) + if cupy.issubdtype(x.dtype, cupy.complexfloating): + raise ValueError("`x` must contain real values.") + x = x.astype(float) + + if cupy.issubdtype(y.dtype, cupy.complexfloating): + dtype = complex + else: + dtype = float + + if dydx is not None: + dydx = cupy.asarray(dydx) + if y.shape != dydx.shape: + raise ValueError("The shapes of `y` and `dydx` must be identical.") + if cupy.issubdtype(dydx.dtype, cupy.complexfloating): + dtype = complex + dydx = dydx.astype(dtype, copy=False) + + y = y.astype(dtype, copy=False) + axis = axis % y.ndim + if x.ndim != 1: + raise ValueError("`x` must be 1-dimensional.") + if x.shape[0] < 2: + raise ValueError("`x` must contain at least 2 elements.") + if x.shape[0] != y.shape[axis]: + raise ValueError("The length of `y` along `axis`={0} doesn't " + "match the length of `x`".format(axis)) + + if not cupy.all(cupy.isfinite(x)): + raise ValueError("`x` must contain only finite values.") + if not cupy.all(cupy.isfinite(y)): + raise ValueError("`y` must contain only finite values.") + + if dydx is not None and not cupy.all(cupy.isfinite(dydx)): + raise ValueError("`dydx` must contain only finite values.") + + dx = cupy.diff(x) + if cupy.any(dx <= 0): + raise ValueError("`x` must be strictly increasing sequence.") + + y = cupy.moveaxis(y, axis, 0) + if dydx is not None: + dydx = cupy.moveaxis(dydx, axis, 0) + + return x, dx, y, axis, dydx + + +class CubicHermiteSpline(PPoly): + """Piecewise-cubic interpolator matching values and first derivatives. + + The result is represented as a `PPoly` instance. [1]_ + + Parameters + ---------- + x : array_like, shape (n,) + 1-D array containing values of the independent variable. + Values must be real, finite and in strictly increasing order. + y : array_like + Array containing values of the dependent variable. It can have + arbitrary number of dimensions, but the length along ``axis`` + (see below) must match the length of ``x``. Values must be finite. + dydx : array_like + Array containing derivatives of the dependent variable. It can have + arbitrary number of dimensions, but the length along ``axis`` + (see below) must match the length of ``x``. Values must be finite. + axis : int, optional + Axis along which `y` is assumed to be varying. Meaning that for + ``x[i]`` the corresponding values are ``cupy.take(y, i, axis=axis)``. + Default is 0. + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. If 'periodic', + periodic extrapolation is used. If None (default), it is set to True. + + Attributes + ---------- + x : ndarray, shape (n,) + Breakpoints. The same ``x`` which was passed to the constructor. + c : ndarray, shape (4, n-1, ...) + Coefficients of the polynomials on each segment. The trailing + dimensions match the dimensions of `y`, excluding ``axis``. + For example, if `y` is 1-D, then ``c[k, i]`` is a coefficient for + ``(x-x[i])**(3-k)`` on the segment between ``x[i]`` and ``x[i+1]``. + axis : int + Interpolation axis. The same axis which was passed to the + constructor. + + See Also + -------- + Akima1DInterpolator : Akima 1D interpolator. + PchipInterpolator : PCHIP 1-D monotonic cubic interpolator. + PPoly : Piecewise polynomial in terms of coefficients and breakpoints + + Notes + ----- + If you want to create a higher-order spline matching higher-order + derivatives, use `BPoly.from_derivatives`. + + References + ---------- + .. [1] `Cubic Hermite spline + `_ + on Wikipedia. + """ + + def __init__(self, x, y, dydx, axis=0, extrapolate=None): + if extrapolate is None: + extrapolate = True + + x, dx, y, axis, dydx = prepare_input(x, y, axis, dydx) + + dxr = dx.reshape([dx.shape[0]] + [1] * (y.ndim - 1)) + slope = cupy.diff(y, axis=0) / dxr + t = (dydx[:-1] + dydx[1:] - 2 * slope) / dxr + + c = cupy.empty((4, len(x) - 1) + y.shape[1:], dtype=t.dtype) + c[0] = t / dxr + c[1] = (slope - dydx[:-1]) / dxr - t + c[2] = dydx[:-1] + c[3] = y[:-1] + + super().__init__(c, x, extrapolate=extrapolate) + self.axis = axis + + +class PchipInterpolator(CubicHermiteSpline): + r"""PCHIP 1-D monotonic cubic interpolation. + + ``x`` and ``y`` are arrays of values used to approximate some function f, + with ``y = f(x)``. The interpolant uses monotonic cubic splines + to find the value of new points. (PCHIP stands for Piecewise Cubic + Hermite Interpolating Polynomial). + + Parameters + ---------- + x : ndarray + A 1-D array of monotonically increasing real values. ``x`` cannot + include duplicate values (otherwise f is overspecified) + y : ndarray + A 1-D array of real values. ``y``'s length along the interpolation + axis must be equal to the length of ``x``. If N-D array, use ``axis`` + parameter to select correct axis. + axis : int, optional + Axis in the y array corresponding to the x-coordinate values. + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + See Also + -------- + CubicHermiteSpline : Piecewise-cubic interpolator. + Akima1DInterpolator : Akima 1D interpolator. + PPoly : Piecewise polynomial in terms of coefficients and breakpoints. + + Notes + ----- + The interpolator preserves monotonicity in the interpolation data and does + not overshoot if the data is not smooth. + + The first derivatives are guaranteed to be continuous, but the second + derivatives may jump at :math:`x_k`. + + Determines the derivatives at the points :math:`x_k`, :math:`f'_k`, + by using PCHIP algorithm [1]_. + + Let :math:`h_k = x_{k+1} - x_k`, and :math:`d_k = (y_{k+1} - y_k) / h_k` + are the slopes at internal points :math:`x_k`. + If the signs of :math:`d_k` and :math:`d_{k-1}` are different or either of + them equals zero, then :math:`f'_k = 0`. Otherwise, it is given by the + weighted harmonic mean + + .. math:: + + \frac{w_1 + w_2}{f'_k} = \frac{w_1}{d_{k-1}} + \frac{w_2}{d_k} + + where :math:`w_1 = 2 h_k + h_{k-1}` and :math:`w_2 = h_k + 2 h_{k-1}`. + + The end slopes are set using a one-sided scheme [2]_. + + + References + ---------- + .. [1] F. N. Fritsch and J. Butland, + A method for constructing local + monotone piecewise cubic interpolants, + SIAM J. Sci. Comput., 5(2), 300-304 (1984). + `10.1137/0905021 `_. + .. [2] see, e.g., C. Moler, Numerical Computing with Matlab, 2004. + `10.1137/1.9780898717952 `_ + """ + + def __init__(self, x, y, axis=0, extrapolate=None): + x, _, y, axis, _ = prepare_input(x, y, axis) + xp = x.reshape((x.shape[0],) + (1,)*(y.ndim-1)) + dk = self._find_derivatives(xp, y) + super().__init__(x, y, dk, axis=0, extrapolate=extrapolate) + self.axis = axis + + @staticmethod + def _edge_case(h0, h1, m0, m1): + # one-sided three-point estimate for the derivative + d = ((2 * h0 + h1) * m0 - h0 * m1) / (h0 + h1) + + # try to preserve shape + mask = cupy.sign(d) != cupy.sign(m0) + mask2 = (cupy.sign(m0) != cupy.sign(m1)) & ( + cupy.abs(d) > 3.*cupy.abs(m0)) + mmm = (~mask) & mask2 + + d[mask] = 0. + d[mmm] = 3.*m0[mmm] + + return d + + @staticmethod + def _find_derivatives(x, y): + # Determine the derivatives at the points y_k, d_k, by using + # PCHIP algorithm is: + # We choose the derivatives at the point x_k by + # Let m_k be the slope of the kth segment (between k and k+1) + # If m_k=0 or m_{k-1}=0 or sgn(m_k) != sgn(m_{k-1}) then d_k == 0 + # else use weighted harmonic mean: + # w_1 = 2h_k + h_{k-1}, w_2 = h_k + 2h_{k-1} + # 1/d_k = 1/(w_1 + w_2)*(w_1 / m_k + w_2 / m_{k-1}) + # where h_k is the spacing between x_k and x_{k+1} + y_shape = y.shape + if y.ndim == 1: + # So that _edge_case doesn't end up assigning to scalars + x = x[:, None] + y = y[:, None] + + hk = x[1:] - x[:-1] + mk = (y[1:] - y[:-1]) / hk + + if y.shape[0] == 2: + # edge case: only have two points, use linear interpolation + dk = cupy.zeros_like(y) + dk[0] = mk + dk[1] = mk + return dk.reshape(y_shape) + + smk = cupy.sign(mk) + condition = (smk[1:] != smk[:-1]) | (mk[1:] == 0) | (mk[:-1] == 0) + + w1 = 2*hk[1:] + hk[:-1] + w2 = hk[1:] + 2*hk[:-1] + + # values where division by zero occurs will be excluded + # by 'condition' afterwards + whmean = (w1 / mk[:-1] + w2 / mk[1:]) / (w1 + w2) + + dk = cupy.zeros_like(y) + dk[1:-1] = cupy.where(condition, 0.0, 1.0 / whmean) + + # special case endpoints, as suggested in + # Cleve Moler, Numerical Computing with MATLAB, Chap 3.6 (pchiptx.m) + dk[0] = PchipInterpolator._edge_case(hk[0], hk[1], mk[0], mk[1]) + dk[-1] = PchipInterpolator._edge_case(hk[-1], hk[-2], mk[-1], mk[-2]) + + return dk.reshape(y_shape) + + +def pchip_interpolate(xi, yi, x, der=0, axis=0): + """ + Convenience function for pchip interpolation. + + xi and yi are arrays of values used to approximate some function f, + with ``yi = f(xi)``. The interpolant uses monotonic cubic splines + to find the value of new points x and the derivatives there. + See `scipy.interpolate.PchipInterpolator` for details. + + Parameters + ---------- + xi : array_like + A sorted list of x-coordinates, of length N. + yi : array_like + A 1-D array of real values. `yi`'s length along the interpolation + axis must be equal to the length of `xi`. If N-D array, use axis + parameter to select correct axis. + x : scalar or array_like + Of length M. + der : int or list, optional + Derivatives to extract. The 0th derivative can be included to + return the function value. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + See Also + -------- + PchipInterpolator : PCHIP 1-D monotonic cubic interpolator. + + Returns + ------- + y : scalar or array_like + The result, of length R or length M or M by R. + """ + P = PchipInterpolator(xi, yi, axis=axis) + + if der == 0: + return P(x) + elif _isscalar(der): + return P.derivative(der)(x) + else: + return [P.derivative(nu)(x) for nu in der] + + +class Akima1DInterpolator(CubicHermiteSpline): + """ + Akima interpolator + + Fit piecewise cubic polynomials, given vectors x and y. The interpolation + method by Akima uses a continuously differentiable sub-spline built from + piecewise cubic polynomials. The resultant curve passes through the given + data points and will appear smooth and natural [1]_. + + Parameters + ---------- + x : ndarray, shape (m, ) + 1-D array of monotonically increasing real values. + y : ndarray, shape (m, ...) + N-D array of real values. The length of ``y`` along the first axis + must be equal to the length of ``x``. + axis : int, optional + Specifies the axis of ``y`` along which to interpolate. Interpolation + defaults to the first axis of ``y``. + + See Also + -------- + CubicHermiteSpline : Piecewise-cubic interpolator. + PchipInterpolator : PCHIP 1-D monotonic cubic interpolator. + PPoly : Piecewise polynomial in terms of coefficients and breakpoints + + Notes + ----- + Use only for precise data, as the fitted curve passes through the given + points exactly. This routine is useful for plotting a pleasingly smooth + curve through a few given points for purposes of plotting. + + References + ---------- + .. [1] A new method of interpolation and smooth curve fitting based + on local procedures. Hiroshi Akima, J. ACM, October 1970, 17(4), + 589-602. + """ + + def __init__(self, x, y, axis=0): + # Original implementation in MATLAB by N. Shamsundar (BSD licensed) + # https://www.mathworks.com/matlabcentral/fileexchange/1814-akima-interpolation # noqa: E501 + x, dx, y, axis, _ = prepare_input(x, y, axis) + + # determine slopes between breakpoints + m = cupy.empty((x.size + 3, ) + y.shape[1:]) + dx = dx[(slice(None), ) + (None, ) * (y.ndim - 1)] + m[2:-2] = cupy.diff(y, axis=0) / dx + + # add two additional points on the left ... + m[1] = 2. * m[2] - m[3] + m[0] = 2. * m[1] - m[2] + # ... and on the right + m[-2] = 2. * m[-3] - m[-4] + m[-1] = 2. * m[-2] - m[-3] + + # if m1 == m2 != m3 == m4, the slope at the breakpoint is not + # defined. This is the fill value: + t = .5 * (m[3:] + m[:-3]) + # get the denominator of the slope t + dm = cupy.abs(cupy.diff(m, axis=0)) + f1 = dm[2:] + f2 = dm[:-2] + f12 = f1 + f2 + # These are the mask of where the slope at breakpoint is defined: + max_value = -cupy.inf if y.size == 0 else cupy.max(f12) + ind = cupy.nonzero(f12 > 1e-9 * max_value) + x_ind, y_ind = ind[0], ind[1:] + # Set the slope at breakpoint + t[ind] = (f1[ind] * m[(x_ind + 1,) + y_ind] + + f2[ind] * m[(x_ind + 2,) + y_ind]) / f12[ind] + + super().__init__(x, y, t, axis=0, extrapolate=False) + self.axis = axis + + def extend(self, c, x, right=True): + raise NotImplementedError("Extending a 1-D Akima interpolator is not " + "yet implemented") + + # These are inherited from PPoly, but they do not produce an Akima + # interpolator. Hence stub them out. + @classmethod + def from_spline(cls, tck, extrapolate=None): + raise NotImplementedError("This method does not make sense for " + "an Akima interpolator.") + + @classmethod + def from_bernstein_basis(cls, bp, extrapolate=None): + raise NotImplementedError("This method does not make sense for " + "an Akima interpolator.") diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_interpolate.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_interpolate.py new file mode 100644 index 0000000000000000000000000000000000000000..b1e8a4e0b1d315fa66acc116d87f09b6ee14e1ec --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_interpolate.py @@ -0,0 +1,2364 @@ + +import math + +import cupy +from cupy._core import internal # NOQA +from cupy._core._scalar import get_typename # NOQA +from cupyx.scipy import special as spec +from cupyx.scipy.interpolate._bspline import BSpline, _get_dtype + +import numpy as np + +try: + from math import comb + _comb = comb +except ImportError: + def _comb(n, k): + return math.factorial(n) // (math.factorial(n - k) * math.factorial(k)) + + +MAX_DIMS = 64 +TYPES = ['double', 'thrust::complex'] +INT_TYPES = ['int', 'long long'] + +INTERVAL_KERNEL = r''' +#include + +#define le_or_ge(x, y, r) ((r) ? ((x) < (y)) : ((x) > (y))) +#define ge_or_le(x, y, r) ((r) ? ((x) > (y)) : ((x) < (y))) +#define geq_or_leq(x, y, r) ((r) ? ((x) >= (y)) : ((x) <= (y))) + +__device__ long long find_breakpoint_position( + const double* breakpoints, const double xp, bool extrapolate, + const int total_breakpoints, const bool* pasc) { + + double a = *&breakpoints[0]; + double b = *&breakpoints[total_breakpoints - 1]; + bool asc = pasc[0]; + + if(isnan(xp)) { + return -1; + } + + if(le_or_ge(xp, a, asc) || ge_or_le(xp, b, asc)) { + if(!extrapolate) { + return -1; + } else if(le_or_ge(xp, a, asc)) { + return 0; + } else { // ge_or_le(xp, b, asc) + return total_breakpoints - 2; + } + } else if (xp == b) { + return total_breakpoints - 2; + } + + int left = 0; + int right = total_breakpoints - 2; + int mid; + + if(le_or_ge(xp, *&breakpoints[left + 1], asc)) { + right = left; + } + + bool found = false; + + while(left < right && !found) { + mid = ((right + left) / 2); + if(le_or_ge(xp, *&breakpoints[mid], asc)) { + right = mid; + } else if (geq_or_leq(xp, *&breakpoints[mid + 1], asc)) { + left = mid + 1; + } else { + found = true; + left = mid; + } + } + + return left; + +} + +__global__ void find_breakpoint_position_1d( + const double* breakpoints, const double* x, long long* out, + bool extrapolate, int total_x, int total_breakpoints, + const bool* pasc) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= total_x) { + return; + } + + const double xp = *&x[idx]; + out[idx] = find_breakpoint_position( + breakpoints, xp, extrapolate, total_breakpoints, pasc); +} + +__global__ void find_breakpoint_position_nd( + const double* breakpoints, const double* x, long long* out, + bool extrapolate, int total_x, const long long* x_dims, + const long long* breakpoints_sizes, + const long long* breakpoints_strides) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= total_x) { + return; + } + + const long long x_dim = *&x_dims[idx]; + const long long stride = breakpoints_strides[x_dim]; + const double* dim_breakpoints = breakpoints + stride; + const int num_breakpoints = *&breakpoints_sizes[x_dim]; + + const bool asc = true; + const double xp = *&x[idx]; + out[idx] = find_breakpoint_position( + dim_breakpoints, xp, extrapolate, num_breakpoints, &asc); +} +''' + +INTERVAL_MODULE = cupy.RawModule( + code=INTERVAL_KERNEL, options=('-std=c++11',), + name_expressions=[ + 'find_breakpoint_position_1d', 'find_breakpoint_position_nd']) + +PPOLY_KERNEL = r""" +#include +#include + +template +__device__ T eval_poly_1( + const double s, const T* coef, long long ci, int cj, int dx, + const long long* c_dims, const long long stride_0, + const long long stride_1) { + int kp, k; + T res, z; + double prefactor; + + res = 0.0; + z = 1.0; + + if(dx < 0) { + for(int i = 0; i < -dx; i++) { + z *= s; + } + } + + int c_dim_0 = (int) *&c_dims[0]; + + for(kp = 0; kp < c_dim_0; kp++) { + if(dx == 0) { + prefactor = 1.0; + } else if(dx > 0) { + if(kp < dx) { + continue; + } else { + prefactor = 1.0; + for(k = kp; k > kp - dx; k--) { + prefactor *= k; + } + } + } else { + prefactor = 1.0; + for(k = kp; k < kp - dx; k++) { + prefactor /= k + 1; + } + } + + int off = stride_0 * (c_dim_0 - kp - 1) + stride_1 * ci + cj; + T cur_coef = *&coef[off]; + res += cur_coef * z * ((T) prefactor); + + if((kp < c_dim_0 - 1) && kp >= dx) { + z *= s; + } + + } + + return res; + +} + +template +__global__ void eval_ppoly( + const T* coef, const double* breakpoints, const double* x, + const long long* intervals, int dx, const long long* c_dims, + const long long* c_strides, int num_x, T* out) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + + if(idx >= num_x) { + return; + } + + double xp = *&x[idx]; + long long interval = *&intervals[idx]; + double breakpoint = *&breakpoints[interval]; + + const int num_c = *&c_dims[2]; + const long long stride_0 = *&c_strides[0]; + const long long stride_1 = *&c_strides[1]; + + if(interval < 0) { + for(int j = 0; j < num_c; j++) { + out[num_c * idx + j] = CUDART_NAN; + } + return; + } + + for(int j = 0; j < num_c; j++) { + T res = eval_poly_1( + xp - breakpoint, coef, interval, ((long long) (j)), dx, + c_dims, stride_0, stride_1); + out[num_c * idx + j] = res; + } +} + +template +__global__ void eval_ppoly_nd( + const T* coef, const double* xs, const double* xp, + const long long* intervals, const long long* dx, + const long long* ks, T* c2_all, const long long* c_dims, + const long long* c_strides, const long long* xs_strides, + const long long* xs_offsets, const long long* ks_strides, + const int num_x, const int ndims, const int num_ks, T* out) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + if(idx >= num_x) { + return; + } + + const long long c_dim0 = c_dims[0]; + const int num_c = *&c_dims[2]; + const long long c_stride0 = c_strides[0]; + const long long c_stride1 = c_strides[1]; + + const double* xp_dims = xp + ndims * idx; + const long long* xp_intervals = intervals + ndims * idx; + T* c2 = c2_all + c_dim0 * idx; + + bool invalid = false; + for(int i = 0; i < ndims && !invalid; i++) { + invalid = xp_intervals[i] < 0; + } + + if(invalid) { + for(int j = 0; j < num_c; j++) { + out[num_c * idx + j] = CUDART_NAN; + } + return; + } + + long long pos = 0; + for(int k = 0; k < ndims; k++) { + pos += xp_intervals[k] * xs_strides[k]; + } + + for(int jp = 0; jp < num_c; jp++) { + for(int i = 0; i < c_dim0; i++) { + c2[i] = coef[c_stride0 * i + c_stride1 * pos + jp]; + } + + for(int k = ndims - 1; k >= 0; k--) { + const long long interval = xp_intervals[k]; + const long long xs_offset = xs_offsets[k]; + const double* dim_breakpoints = xs + xs_offset; + const double xval = xp_dims[k] - dim_breakpoints[interval]; + + const long long k_off = ks_strides[k]; + const long long dim_ks = ks[k]; + int kpos = 0; + + for(int ko = 0; ko < k_off; ko++) { + const T* c2_off = c2 + kpos; + const int k_dx = dx[k]; + T res = eval_poly_1( + xval, c2_off, ((long long) 0), 0, k_dx, + &dim_ks, ((long long) 1), ((long long) 1)); + c2[ko] = res; + kpos += dim_ks; + } + } + + out[num_c * idx + jp] = c2[0]; + } +} + +template +__global__ void fix_continuity( + T* coef, const double* breakpoints, const int order, + const long long* c_dims, const long long* c_strides, + int num_breakpoints) { + + const long long c_size0 = *&c_dims[0]; + const long long c_size2 = *&c_dims[2]; + const long long stride_0 = *&c_strides[0]; + const long long stride_1 = *&c_strides[1]; + const long long stride_2 = *&c_strides[2]; + + for(int idx = 1; idx < num_breakpoints - 1; idx++) { + const double breakpoint = *&breakpoints[idx]; + const long long interval = idx - 1; + const double breakpoint_interval = *&breakpoints[interval]; + + for(int jp = 0; jp < c_size2; jp++) { + for(int dx = order; dx > -1; dx--) { + T res = eval_poly_1( + breakpoint - breakpoint_interval, coef, + interval, jp, dx, c_dims, stride_0, stride_1); + + for(int kp = 0; kp < dx; kp++) { + res /= kp + 1; + } + + const long long c_idx = ( + stride_0 * (c_size0 - dx - 1) + stride_1 * idx + + stride_2 * jp); + + coef[c_idx] = res; + } + } + } +} + +template +__global__ void integrate( + const T* coef, const double* breakpoints, + const double* a_val, const double* b_val, + const long long* start, const long long* end, + const long long* c_dims, const long long* c_strides, + const bool* pasc, T* out) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + const long long c_dim2 = *&c_dims[2]; + + if(idx >= c_dim2) { + return; + } + + const bool asc = pasc[0]; + const long long start_interval = asc ? *&start[0] : *&end[0]; + const long long end_interval = asc ? *&end[0] : *&start[0]; + const double a = asc ? *&a_val[0] : *&b_val[0]; + const double b = asc ? *&b_val[0] : *&a_val[0]; + + const long long stride_0 = *&c_strides[0]; + const long long stride_1 = *&c_strides[1]; + + if(start_interval < 0 || end_interval < 0) { + out[idx] = CUDART_NAN; + return; + } + + T vtot = 0; + T vb; + T va; + for(int interval = start_interval; interval <= end_interval; interval++) { + const double breakpoint = *&breakpoints[interval]; + if(interval == end_interval) { + vb = eval_poly_1( + b - breakpoint, coef, interval, idx, -1, c_dims, + stride_0, stride_1); + } else { + const double next_breakpoint = *&breakpoints[interval + 1]; + vb = eval_poly_1( + next_breakpoint - breakpoint, coef, interval, + idx, -1, c_dims, stride_0, stride_1); + } + + if(interval == start_interval) { + va = eval_poly_1( + a - breakpoint, coef, interval, idx, -1, c_dims, + stride_0, stride_1); + } else { + va = eval_poly_1( + 0, coef, interval, idx, -1, c_dims, + stride_0, stride_1); + } + + vtot += (vb - va); + } + + if(!asc) { + vtot = -vtot; + } + + out[idx] = vtot; + +} +""" + +PPOLY_MODULE = cupy.RawModule( + code=PPOLY_KERNEL, options=('-std=c++11',), + name_expressions=( + [f'eval_ppoly<{type_name}>' for type_name in TYPES] + + [f'eval_ppoly_nd<{type_name}>' for type_name in TYPES] + + [f'fix_continuity<{type_name}>' for type_name in TYPES] + + [f'integrate<{type_name}>' for type_name in TYPES])) + +BPOLY_KERNEL = r""" +#include +#include + +template +__device__ T eval_bpoly1( + const double s, const T* coef, const long long ci, const long long cj, + const long long c_dims_0, const long long c_strides_0, + const long long c_strides_1) { + + const long long k = c_dims_0 - 1; + const double s1 = 1 - s; + T res; + + const long long i0 = 0 * c_strides_0 + ci * c_strides_1 + cj; + const long long i1 = 1 * c_strides_0 + ci * c_strides_1 + cj; + const long long i2 = 2 * c_strides_0 + ci * c_strides_1 + cj; + const long long i3 = 3 * c_strides_0 + ci * c_strides_1 + cj; + + if(k == 0) { + res = coef[i0]; + } else if(k == 1) { + res = coef[i0] * s1 + coef[i1] * s; + } else if(k == 2) { + res = coef[i0] * s1 * s1 + coef[i1] * 2.0 * s1 * s + coef[i2] * s * s; + } else if(k == 3) { + res = (coef[i0] * s1 * s1 * s1 + coef[i1] * 3.0 * s1 * s1 * s + + coef[i2] * 3.0 * s1 * s * s + coef[i3] * s * s * s); + } else { + T comb = 1; + res = 0; + for(int j = 0; j < k + 1; j++) { + const long long idx = j * c_strides_0 + ci * c_strides_1 + cj; + res += (comb * pow(s, ((double) j)) * pow(s1, ((double) k) - j) * + coef[idx]); + comb *= 1.0 * (k - j) / (j + 1.0); + } + } + + return res; +} + +template +__device__ T eval_bpoly1_deriv( + const double s, const T* coef, const long long ci, const long long cj, + int dx, T* wrk, const long long c_dims_0, const long long c_strides_0, + const long long c_strides_1, const long long wrk_dims_0, + const long long wrk_strides_0, const long long wrk_strides_1) { + + T res, term; + double comb, poch; + + const long long k = c_dims_0 - 1; + + if(dx == 0) { + res = eval_bpoly1(s, coef, ci, cj, c_dims_0, c_strides_0, + c_strides_1); + } else { + poch = 1.0; + for(int a = 0; a < dx; a++) { + poch *= k - a; + } + + term = 0; + for(int a = 0; a < k - dx + 1; a++) { + term = 0; + comb = 1; + for(int j = 0; j < dx + 1; j++) { + const long long idx = (c_strides_0 * (j + a) + + c_strides_1 * ci + cj); + term += coef[idx] * pow(-1.0, ((double) (j + dx))) * comb; + comb *= 1.0 * (dx - j) / (j + 1); + } + wrk[a] = term * poch; + } + + res = eval_bpoly1(s, wrk, 0, 0, wrk_dims_0, wrk_strides_0, + wrk_strides_1); + } + return res; +} + +template +__global__ void eval_bpoly( + const T* coef, const double* breakpoints, const double* x, + const long long* intervals, int dx, T* wrk, const long long* c_dims, + const long long* c_strides, const long long* wrk_dims, + const long long* wrk_strides, int num_x, T* out) { + + int idx = blockDim.x * blockIdx.x + threadIdx.x; + + if(idx >= num_x) { + return; + } + + double xp = *&x[idx]; + long long interval = *&intervals[idx]; + const int num_c = *&c_dims[2]; + + const long long c_dims_0 = *&c_dims[0]; + const long long c_strides_0 = *&c_strides[0]; + const long long c_strides_1 = *&c_strides[1]; + + const long long wrk_dims_0 = *&wrk_dims[0]; + const long long wrk_strides_0 = *&wrk_strides[0]; + const long long wrk_strides_1 = *&wrk_strides[1]; + + if(interval < 0) { + for(int j = 0; j < num_c; j++) { + out[num_c * idx + j] = CUDART_NAN; + } + return; + } + + const double ds = breakpoints[interval + 1] - breakpoints[interval]; + const double ds_dx = pow(ds, ((double) dx)); + T* off_wrk = wrk + idx * (c_dims_0 - dx); + + for(int j = 0; j < num_c; j++) { + T res; + const double s = (xp - breakpoints[interval]) / ds; + if(dx == 0) { + res = eval_bpoly1( + s, coef, interval, ((long long) (j)), c_dims_0, c_strides_0, + c_strides_1); + } else { + res = eval_bpoly1_deriv( + s, coef, interval, ((long long) (j)), dx, + off_wrk, c_dims_0, c_strides_0, c_strides_1, + wrk_dims_0, wrk_strides_0, wrk_strides_1) / ds_dx; + } + out[num_c * idx + j] = res; + } + +} +""" + +BPOLY_MODULE = cupy.RawModule( + code=BPOLY_KERNEL, options=('-std=c++11',), + name_expressions=( + [f'eval_bpoly<{type_name}>' for type_name in TYPES])) + + +def _get_module_func(module, func_name, *template_args): + def _get_typename(dtype): + typename = get_typename(dtype) + if dtype.kind == 'c': + typename = 'thrust::' + typename + return typename + args_dtypes = [_get_typename(arg.dtype) for arg in template_args] + template = ', '.join(args_dtypes) + kernel_name = f'{func_name}<{template}>' if template_args else func_name + kernel = module.get_function(kernel_name) + return kernel + + +def _ppoly_evaluate(c, x, xp, dx, extrapolate, out): + """ + Evaluate a piecewise polynomial. + + Parameters + ---------- + c : ndarray, shape (k, m, n) + Coefficients local polynomials of order `k-1` in `m` intervals. + There are `n` polynomials in each interval. + Coefficient of highest order-term comes first. + x : ndarray, shape (m+1,) + Breakpoints of polynomials. + xp : ndarray, shape (r,) + Points to evaluate the piecewise polynomial at. + dx : int + Order of derivative to evaluate. The derivative is evaluated + piecewise and may have discontinuities. + extrapolate : bool + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + out : ndarray, shape (r, n) + Value of each polynomial at each of the input points. + This argument is modified in-place. + """ + # Determine if the breakpoints are in ascending order or descending one + ascending = x[-1] >= x[0] + + intervals = cupy.empty(xp.shape, dtype=cupy.int64) + interval_kernel = INTERVAL_MODULE.get_function( + 'find_breakpoint_position_1d') + interval_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,), + (x, xp, intervals, extrapolate, xp.shape[0], x.shape[0], + ascending)) + + # Compute coefficient displacement stride (in elements) + c_shape = cupy.asarray(c.shape, dtype=cupy.int64) + c_strides = cupy.asarray(c.strides, dtype=cupy.int64) // c.itemsize + + ppoly_kernel = _get_module_func(PPOLY_MODULE, 'eval_ppoly', c) + ppoly_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,), + (c, x, xp, intervals, dx, c_shape, c_strides, + xp.shape[0], out)) + + +def _ndppoly_evaluate(c, xs, ks, xp, dx, extrapolate, out): + """ + Evaluate a piecewise tensor-product polynomial. + + Parameters + ---------- + c : ndarray, shape (k_1*...*k_d, m_1*...*m_d, n) + Coefficients local polynomials of order `k-1` in + `m_1`, ..., `m_d` intervals. There are `n` polynomials + in each interval. + xs : d-tuple of ndarray of shape (m_d+1,) each + Breakpoints of polynomials + ks : ndarray of int, shape (d,) + Orders of polynomials in each dimension + xp : ndarray, shape (r, d) + Points to evaluate the piecewise polynomial at. + dx : ndarray of int, shape (d,) + Orders of derivative to evaluate. The derivative is evaluated + piecewise and may have discontinuities. + extrapolate : int, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + out : ndarray, shape (r, n) + Value of each polynomial at each of the input points. + For points outside the span ``x[0] ... x[-1]``, + ``nan`` is returned. + This argument is modified in-place. + """ + num_samples = xp.shape[0] + total_xp = xp.size + ndims = len(xs) + num_ks = ks.size + + # Compose all n-dimensional breakpoints into a single array + xs_sizes = cupy.asarray([x.size for x in xs], dtype=cupy.int64) + xs_offsets = cupy.cumsum(xs_sizes) + xs_offsets = cupy.r_[0, xs_offsets[:-1]] + xs_complete = cupy.r_[xs] + + xs_sizes_m1 = xs_sizes - 1 + xs_strides = cupy.cumprod(xs_sizes_m1[:0:-1]) + xs_strides = cupy.r_[xs_strides[::-1], 1] + + # Map each element on the input to their corresponding dimension + intervals = cupy.empty(xp.shape, dtype=cupy.int64) + dim_seq = cupy.arange(ndims, dtype=cupy.int64) + xp_dims = cupy.broadcast_to( + cupy.expand_dims(dim_seq, 0), (num_samples, ndims)) + xp_dims = xp_dims.copy() + + # Compute n-dimensional intervals + interval_kernel = INTERVAL_MODULE.get_function( + 'find_breakpoint_position_nd') + interval_kernel(((total_xp + 128 - 1) // 128,), (128,), + (xs_complete, xp, intervals, extrapolate, total_xp, + xp_dims, xs_sizes, xs_offsets)) + + # Compute coefficient displacement stride (in elements) + c_shape = cupy.asarray(c.shape, dtype=cupy.int64) + c_strides = cupy.asarray(c.strides, dtype=cupy.int64) // c.itemsize + c2 = cupy.zeros((num_samples * c.shape[0], 1, 1), dtype=_get_dtype(c)) + + # Compute order strides + ks_strides = cupy.cumprod(cupy.r_[1, ks]) + ks_strides = ks_strides[:-1] + + ppoly_kernel = _get_module_func(PPOLY_MODULE, 'eval_ppoly_nd', c) + ppoly_kernel(((num_samples + 128 - 1) // 128,), (128,), + (c, xs_complete, xp, intervals, dx, ks, c2, c_shape, + c_strides, xs_strides, xs_offsets, ks_strides, num_samples, + ndims, num_ks, out)) + + +def _fix_continuity(c, x, order): + """ + Make a piecewise polynomial continuously differentiable to given order. + + Parameters + ---------- + c : ndarray, shape (k, m, n) + Coefficients local polynomials of order `k-1` in `m` intervals. + There are `n` polynomials in each interval. + Coefficient of highest order-term comes first. + + Coefficients c[-order-1:] are modified in-place. + x : ndarray, shape (m+1,) + Breakpoints of polynomials + order : int + Order up to which enforce piecewise differentiability. + """ + # Compute coefficient displacement stride (in elements) + c_shape = cupy.asarray(c.shape, dtype=cupy.int64) + c_strides = cupy.asarray(c.strides, dtype=cupy.int64) // c.itemsize + + continuity_kernel = _get_module_func(PPOLY_MODULE, 'fix_continuity', c) + continuity_kernel((1,), (1,), + (c, x, order, c_shape, c_strides, x.shape[0])) + + +def _integrate(c, x, a, b, extrapolate, out): + """ + Compute integral over a piecewise polynomial. + + Parameters + ---------- + c : ndarray, shape (k, m, n) + Coefficients local polynomials of order `k-1` in `m` intervals. + x : ndarray, shape (m+1,) + Breakpoints of polynomials + a : double + Start point of integration. + b : double + End point of integration. + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + out : ndarray, shape (n,) + Integral of the piecewise polynomial, assuming the polynomial + is zero outside the range (x[0], x[-1]). + This argument is modified in-place. + """ + # Determine if the breakpoints are in ascending order or descending one + ascending = x[-1] >= x[0] + + a = cupy.asarray([a], dtype=cupy.float64) + b = cupy.asarray([b], dtype=cupy.float64) + + start_interval = cupy.empty(a.shape, dtype=cupy.int64) + end_interval = cupy.empty(b.shape, dtype=cupy.int64) + + interval_kernel = INTERVAL_MODULE.get_function( + 'find_breakpoint_position_1d') + interval_kernel(((a.shape[0] + 128 - 1) // 128,), (128,), + (x, a, start_interval, extrapolate, a.shape[0], x.shape[0], + ascending)) + interval_kernel(((b.shape[0] + 128 - 1) // 128,), (128,), + (x, b, end_interval, extrapolate, b.shape[0], x.shape[0], + ascending)) + + # Compute coefficient displacement stride (in elements) + c_shape = cupy.asarray(c.shape, dtype=cupy.int64) + c_strides = cupy.asarray(c.strides, dtype=cupy.int64) // c.itemsize + + int_kernel = _get_module_func(PPOLY_MODULE, 'integrate', c) + int_kernel(((c.shape[2] + 128 - 1) // 128,), (128,), + (c, x, a, b, start_interval, end_interval, c_shape, c_strides, + ascending, out)) + + +def _bpoly_evaluate(c, x, xp, dx, extrapolate, out): + """ + Evaluate a Bernstein polynomial. + + Parameters + ---------- + c : ndarray, shape (k, m, n) + Coefficients local polynomials of order `k-1` in `m` intervals. + There are `n` polynomials in each interval. + Coefficient of highest order-term comes first. + x : ndarray, shape (m+1,) + Breakpoints of polynomials. + xp : ndarray, shape (r,) + Points to evaluate the piecewise polynomial at. + dx : int + Order of derivative to evaluate. The derivative is evaluated + piecewise and may have discontinuities. + extrapolate : bool + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + out : ndarray, shape (r, n) + Value of each polynomial at each of the input points. + This argument is modified in-place. + """ + # Determine if the breakpoints are in ascending order or descending one + ascending = x[-1] >= x[0] + + intervals = cupy.empty(xp.shape, dtype=cupy.int64) + interval_kernel = INTERVAL_MODULE.get_function( + 'find_breakpoint_position_1d') + interval_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,), + (x, xp, intervals, extrapolate, xp.shape[0], x.shape[0], + ascending)) + + # Compute coefficient displacement stride (in elements) + c_shape = cupy.asarray(c.shape, dtype=cupy.int64) + c_strides = cupy.asarray(c.strides, dtype=cupy.int64) // c.itemsize + + wrk = cupy.empty((xp.shape[0] * (c.shape[0] - dx), 1, 1), + dtype=_get_dtype(c)) + wrk_shape = cupy.asarray([c.shape[0] - dx, 1, 1], dtype=cupy.int64) + wrk_strides = cupy.asarray(wrk.strides, dtype=cupy.int64) // wrk.itemsize + + bpoly_kernel = _get_module_func(BPOLY_MODULE, 'eval_bpoly', c) + bpoly_kernel(((xp.shape[0] + 128 - 1) // 128,), (128,), + (c, x, xp, intervals, dx, wrk, c_shape, c_strides, wrk_shape, + wrk_strides, xp.shape[0], out)) + + +def _ndim_coords_from_arrays(points, ndim=None): + """ + Convert a tuple of coordinate arrays to a (..., ndim)-shaped array. + """ + + if isinstance(points, tuple) and len(points) == 1: + # handle argument tuple + points = cupy.asarray(points[0]) + if isinstance(points, tuple): + p = cupy.broadcast_arrays(*[cupy.asarray(x) for x in points]) + p = [cupy.expand_dims(x, -1) for x in p] + points = cupy.concatenate(p, axis=-1) + else: + points = cupy.asarray(points) + if points.ndim == 1: + if ndim is None: + points = points.reshape(-1, 1) + else: + points = points.reshape(-1, ndim) + return points + + +class _PPolyBase: + """Base class for piecewise polynomials.""" + __slots__ = ('c', 'x', 'extrapolate', 'axis') + + def __init__(self, c, x, extrapolate=None, axis=0): + self.c = cupy.asarray(c) + self.x = cupy.ascontiguousarray(x, dtype=cupy.float64) + + if extrapolate is None: + extrapolate = True + elif extrapolate != 'periodic': + extrapolate = bool(extrapolate) + self.extrapolate = extrapolate + + if self.c.ndim < 2: + raise ValueError("Coefficients array must be at least " + "2-dimensional.") + + if not (0 <= axis < self.c.ndim - 1): + raise ValueError("axis=%s must be between 0 and %s" % + (axis, self.c.ndim-1)) + + self.axis = axis + if axis != 0: + # move the interpolation axis to be the first one in self.c + # More specifically, the target shape for self.c is (k, m, ...), + # and axis !=0 means that we have c.shape (..., k, m, ...) + # ^ + # axis + # So we roll two of them. + self.c = cupy.moveaxis(self.c, axis+1, 0) + self.c = cupy.moveaxis(self.c, axis+1, 0) + + if self.x.ndim != 1: + raise ValueError("x must be 1-dimensional") + if self.x.size < 2: + raise ValueError("at least 2 breakpoints are needed") + if self.c.ndim < 2: + raise ValueError("c must have at least 2 dimensions") + if self.c.shape[0] == 0: + raise ValueError("polynomial must be at least of order 0") + if self.c.shape[1] != self.x.size-1: + raise ValueError("number of coefficients != len(x)-1") + dx = cupy.diff(self.x) + if not (cupy.all(dx >= 0) or cupy.all(dx <= 0)): + raise ValueError("`x` must be strictly increasing or decreasing.") + + dtype = self._get_dtype(self.c.dtype) + self.c = cupy.ascontiguousarray(self.c, dtype=dtype) + + def _get_dtype(self, dtype): + if (cupy.issubdtype(dtype, cupy.complexfloating) + or cupy.issubdtype(self.c.dtype, cupy.complexfloating)): + return cupy.complex128 + else: + return cupy.float64 + + @classmethod + def construct_fast(cls, c, x, extrapolate=None, axis=0): + """ + Construct the piecewise polynomial without making checks. + Takes the same parameters as the constructor. Input arguments + ``c`` and ``x`` must be arrays of the correct shape and type. The + ``c`` array can only be of dtypes float and complex, and ``x`` + array must have dtype float. + """ + self = object.__new__(cls) + self.c = c + self.x = x + self.axis = axis + if extrapolate is None: + extrapolate = True + self.extrapolate = extrapolate + return self + + def _ensure_c_contiguous(self): + """ + c and x may be modified by the user. The Cython code expects + that they are C contiguous. + """ + if not self.x.flags.c_contiguous: + self.x = self.x.copy() + if not self.c.flags.c_contiguous: + self.c = self.c.copy() + + def extend(self, c, x): + """ + Add additional breakpoints and coefficients to the polynomial. + + Parameters + ---------- + c : ndarray, size (k, m, ...) + Additional coefficients for polynomials in intervals. Note that + the first additional interval will be formed using one of the + ``self.x`` end points. + x : ndarray, size (m,) + Additional breakpoints. Must be sorted in the same order as + ``self.x`` and either to the right or to the left of the current + breakpoints. + """ + + c = cupy.asarray(c) + x = cupy.asarray(x) + + if c.ndim < 2: + raise ValueError("invalid dimensions for c") + if x.ndim != 1: + raise ValueError("invalid dimensions for x") + if x.shape[0] != c.shape[1]: + raise ValueError("Shapes of x {} and c {} are incompatible" + .format(x.shape, c.shape)) + if c.shape[2:] != self.c.shape[2:] or c.ndim != self.c.ndim: + raise ValueError("Shapes of c {} and self.c {} are incompatible" + .format(c.shape, self.c.shape)) + + if c.size == 0: + return + + dx = cupy.diff(x) + if not (cupy.all(dx >= 0) or cupy.all(dx <= 0)): + raise ValueError("`x` is not sorted.") + + if self.x[-1] >= self.x[0]: + if not x[-1] >= x[0]: + raise ValueError("`x` is in the different order " + "than `self.x`.") + + if x[0] >= self.x[-1]: + action = 'append' + elif x[-1] <= self.x[0]: + action = 'prepend' + else: + raise ValueError("`x` is neither on the left or on the right " + "from `self.x`.") + else: + if not x[-1] <= x[0]: + raise ValueError("`x` is in the different order " + "than `self.x`.") + + if x[0] <= self.x[-1]: + action = 'append' + elif x[-1] >= self.x[0]: + action = 'prepend' + else: + raise ValueError("`x` is neither on the left or on the right " + "from `self.x`.") + + dtype = self._get_dtype(c.dtype) + + k2 = max(c.shape[0], self.c.shape[0]) + c2 = cupy.zeros( + (k2, self.c.shape[1] + c.shape[1]) + self.c.shape[2:], + dtype=dtype) + + if action == 'append': + c2[k2 - self.c.shape[0]:, :self.c.shape[1]] = self.c + c2[k2 - c.shape[0]:, self.c.shape[1]:] = c + self.x = cupy.r_[self.x, x] + elif action == 'prepend': + c2[k2 - self.c.shape[0]:, :c.shape[1]] = c + c2[k2 - c.shape[0]:, c.shape[1]:] = self.c + self.x = cupy.r_[x, self.x] + + self.c = c2 + + def __call__(self, x, nu=0, extrapolate=None): + """ + Evaluate the piecewise polynomial or its derivative. + + Parameters + ---------- + x : array_like + Points to evaluate the interpolant at. + nu : int, optional + Order of derivative to evaluate. Must be non-negative. + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. + If None (default), use `self.extrapolate`. + + Returns + ------- + y : array_like + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals are considered half-open, + ``[a, b)``, except for the last interval which is closed + ``[a, b]``. + """ + if extrapolate is None: + extrapolate = self.extrapolate + x = cupy.asarray(x) + x_shape, x_ndim = x.shape, x.ndim + x = cupy.ascontiguousarray(x.ravel(), dtype=cupy.float64) + + # With periodic extrapolation we map x to the segment + # [self.x[0], self.x[-1]]. + if extrapolate == 'periodic': + x = self.x[0] + (x - self.x[0]) % (self.x[-1] - self.x[0]) + extrapolate = False + + out = cupy.empty((len(x), int(np.prod(self.c.shape[2:]))), + dtype=self.c.dtype) + self._ensure_c_contiguous() + self._evaluate(x, nu, extrapolate, out) + out = out.reshape(x_shape + self.c.shape[2:]) + if self.axis != 0: + # transpose to move the calculated values to the interpolation axis + dims = list(range(out.ndim)) + dims = (dims[x_ndim:x_ndim + self.axis] + dims[:x_ndim] + + dims[x_ndim + self.axis:]) + out = out.transpose(dims) + return out + + +class PPoly(_PPolyBase): + """ + Piecewise polynomial in terms of coefficients and breakpoints + The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the + local power basis:: + + S = sum(c[m, i] * (xp - x[i]) ** (k - m) for m in range(k + 1)) + + where ``k`` is the degree of the polynomial. + + Parameters + ---------- + c : ndarray, shape (k, m, ...) + Polynomial coefficients, order `k` and `m` intervals. + x : ndarray, shape (m+1,) + Polynomial breakpoints. Must be sorted in either increasing or + decreasing order. + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. If 'periodic', + periodic extrapolation is used. Default is True. + axis : int, optional + Interpolation axis. Default is zero. + + Attributes + ---------- + x : ndarray + Breakpoints. + c : ndarray + Coefficients of the polynomials. They are reshaped + to a 3-D array with the last dimension representing + the trailing dimensions of the original coefficient array. + axis : int + Interpolation axis. + + See also + -------- + BPoly : piecewise polynomials in the Bernstein basis + + Notes + ----- + High-order polynomials in the power basis can be numerically + unstable. Precision problems can start to appear for orders + larger than 20-30. + + .. seealso:: :class:`scipy.interpolate.BSpline` + """ + + def _evaluate(self, x, nu, extrapolate, out): + _ppoly_evaluate(self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, x, nu, bool(extrapolate), out) + + def derivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the derivative. + + Parameters + ---------- + nu : int, optional + Order of derivative to evaluate. Default is 1, i.e., compute the + first derivative. If negative, the antiderivative is returned. + + Returns + ------- + pp : PPoly + Piecewise polynomial of order k2 = k - n representing the + derivative of this polynomial. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals are considered half-open, + ``[a, b)``, except for the last interval which is closed + ``[a, b]``. + """ + if nu < 0: + return self.antiderivative(-nu) + + # reduce order + if nu == 0: + c2 = self.c.copy() + else: + c2 = self.c[:-nu, :].copy() + + if c2.shape[0] == 0: + # derivative of order 0 is zero + c2 = cupy.zeros((1,) + c2.shape[1:], dtype=c2.dtype) + + # multiply by the correct rising factorials + factor = spec.poch(cupy.arange(c2.shape[0], 0, -1), nu) + c2 *= factor[(slice(None),) + (None,)*(c2.ndim-1)] + + # construct a compatible polynomial + return self.construct_fast(c2, self.x, self.extrapolate, self.axis) + + def antiderivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the antiderivative. + Antiderivative is also the indefinite integral of the function, + and derivative is its inverse operation. + + Parameters + ---------- + nu : int, optional + Order of antiderivative to evaluate. Default is 1, i.e., compute + the first integral. If negative, the derivative is returned. + + Returns + ------- + pp : PPoly + Piecewise polynomial of order k2 = k + n representing + the antiderivative of this polynomial. + + Notes + ----- + The antiderivative returned by this function is continuous and + continuously differentiable to order n-1, up to floating point + rounding error. + + If antiderivative is computed and ``self.extrapolate='periodic'``, + it will be set to False for the returned instance. This is done because + the antiderivative is no longer periodic and its correct evaluation + outside of the initially given x interval is difficult. + """ + if nu <= 0: + return self.derivative(-nu) + + c = cupy.zeros( + (self.c.shape[0] + nu, self.c.shape[1]) + self.c.shape[2:], + dtype=self.c.dtype) + c[:-nu] = self.c + + # divide by the correct rising factorials + factor = spec.poch(cupy.arange(self.c.shape[0], 0, -1), nu) + c[:-nu] /= factor[(slice(None),) + (None,) * (c.ndim-1)] + + # fix continuity of added degrees of freedom + self._ensure_c_contiguous() + _fix_continuity(c.reshape(c.shape[0], c.shape[1], -1), + self.x, nu - 1) + + if self.extrapolate == 'periodic': + extrapolate = False + else: + extrapolate = self.extrapolate + + # construct a compatible polynomial + return self.construct_fast(c, self.x, extrapolate, self.axis) + + def integrate(self, a, b, extrapolate=None): + """ + Compute a definite integral over a piecewise polynomial. + + Parameters + ---------- + a : float + Lower integration bound + b : float + Upper integration bound + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. + If None (default), use `self.extrapolate`. + + Returns + ------- + ig : array_like + Definite integral of the piecewise polynomial over [a, b] + """ + if extrapolate is None: + extrapolate = self.extrapolate + + # Swap integration bounds if needed + sign = 1 + if b < a: + a, b = b, a + sign = -1 + + range_int = cupy.empty( + (int(np.prod(self.c.shape[2:])),), dtype=self.c.dtype) + self._ensure_c_contiguous() + + # Compute the integral. + if extrapolate == 'periodic': + # Split the integral into the part over period (can be several + # of them) and the remaining part. + + xs, xe = self.x[0], self.x[-1] + period = xe - xs + interval = b - a + n_periods, left = divmod(interval, period) + + if n_periods > 0: + _integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, xs, xe, False, out=range_int) + range_int *= n_periods + else: + range_int.fill(0) + + # Map a to [xs, xe], b is always a + left. + a = xs + (a - xs) % period + b = a + left + + # If b <= xe then we need to integrate over [a, b], otherwise + # over [a, xe] and from xs to what is remained. + remainder_int = cupy.empty_like(range_int) + if b <= xe: + _integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, a, b, False, out=remainder_int) + range_int += remainder_int + else: + _integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, a, xe, False, out=remainder_int) + range_int += remainder_int + + _integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, xs, xs + left + a - xe, False, out=remainder_int) + range_int += remainder_int + else: + _integrate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, a, b, bool(extrapolate), out=range_int) + + # Return + range_int *= sign + return range_int.reshape(self.c.shape[2:]) + + def solve(self, y=0., discontinuity=True, extrapolate=None): + """ + Find real solutions of the equation ``pp(x) == y``. + + Parameters + ---------- + y : float, optional + Right-hand side. Default is zero. + discontinuity : bool, optional + Whether to report sign changes across discontinuities at + breakpoints as roots. + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to return roots from the polynomial + extrapolated based on first and last intervals, 'periodic' works + the same as False. If None (default), use `self.extrapolate`. + + Returns + ------- + roots : ndarray + Roots of the polynomial(s). + If the PPoly object describes multiple polynomials, the + return value is an object array whose each element is an + ndarray containing the roots. + + Notes + ----- + This routine works only on real-valued polynomials. + If the piecewise polynomial contains sections that are + identically zero, the root list will contain the start point + of the corresponding interval, followed by a ``nan`` value. + If the polynomial is discontinuous across a breakpoint, and + there is a sign change across the breakpoint, this is reported + if the `discont` parameter is True. + + At the moment, there is not an actual implementation. + """ + raise NotImplementedError( + 'At the moment there is not a GPU implementation for solve') + + def roots(self, discontinuity=True, extrapolate=None): + """ + Find real roots of the piecewise polynomial. + + Parameters + ---------- + discontinuity : bool, optional + Whether to report sign changes across discontinuities at + breakpoints as roots. + extrapolate : {bool, 'periodic', None}, optional + If bool, determines whether to return roots from the polynomial + extrapolated based on first and last intervals, 'periodic' works + the same as False. If None (default), use `self.extrapolate`. + + Returns + ------- + roots : ndarray + Roots of the polynomial(s). + If the PPoly object describes multiple polynomials, the + return value is an object array whose each element is an + ndarray containing the roots. + + See Also + -------- + PPoly.solve + """ + return self.solve(0, discontinuity, extrapolate) + + @classmethod + def from_spline(cls, tck, extrapolate=None): + """ + Construct a piecewise polynomial from a spline + + Parameters + ---------- + tck + A spline, as a (knots, coefficients, degree) tuple or + a BSpline object. + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + """ + if isinstance(tck, BSpline): + t, c, k = tck.tck + if extrapolate is None: + extrapolate = tck.extrapolate + else: + t, c, k = tck + + spl = BSpline(t, c, k, extrapolate=extrapolate) + cvals = cupy.empty((k + 1, len(t) - 1), dtype=c.dtype) + for m in range(k, -1, -1): + y = spl(t[:-1], nu=m) + cvals[k - m, :] = y / spec.gamma(m + 1) + + return cls.construct_fast(cvals, t, extrapolate) + + @classmethod + def from_bernstein_basis(cls, bp, extrapolate=None): + """ + Construct a piecewise polynomial in the power basis + from a polynomial in Bernstein basis. + + Parameters + ---------- + bp : BPoly + A Bernstein basis polynomial, as created by BPoly + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + """ + if not isinstance(bp, BPoly): + raise TypeError(".from_bernstein_basis only accepts BPoly " + "instances. Got %s instead." % type(bp)) + + dx = cupy.diff(bp.x) + k = bp.c.shape[0] - 1 # polynomial order + + rest = (None,)*(bp.c.ndim-2) + + c = cupy.zeros_like(bp.c) + for a in range(k+1): + factor = (-1)**a * _comb(k, a) * bp.c[a] + for s in range(a, k+1): + val = _comb(k-a, s-a) * (-1)**s + c[k-s] += factor * val / dx[(slice(None),)+rest]**s + + if extrapolate is None: + extrapolate = bp.extrapolate + + return cls.construct_fast(c, bp.x, extrapolate, bp.axis) + + +class BPoly(_PPolyBase): + """ + Piecewise polynomial in terms of coefficients and breakpoints. + + The polynomial between ``x[i]`` and ``x[i + 1]`` is written in the + + Bernstein polynomial basis:: + + S = sum(c[a, i] * b(a, k; x) for a in range(k+1)), + + where ``k`` is the degree of the polynomial, and:: + + b(a, k; x) = binom(k, a) * t**a * (1 - t)**(k - a), + + with ``t = (x - x[i]) / (x[i+1] - x[i])`` and ``binom`` is the binomial + coefficient. + + Parameters + ---------- + c : ndarray, shape (k, m, ...) + Polynomial coefficients, order `k` and `m` intervals + x : ndarray, shape (m+1,) + Polynomial breakpoints. Must be sorted in either increasing or + decreasing order. + extrapolate : bool, optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. If 'periodic', + periodic extrapolation is used. Default is True. + axis : int, optional + Interpolation axis. Default is zero. + + Attributes + ---------- + x : ndarray + Breakpoints. + c : ndarray + Coefficients of the polynomials. They are reshaped + to a 3-D array with the last dimension representing + the trailing dimensions of the original coefficient array. + axis : int + Interpolation axis. + + See also + -------- + PPoly : piecewise polynomials in the power basis + + Notes + ----- + Properties of Bernstein polynomials are well documented in the literature, + see for example [1]_ [2]_ [3]_. + + References + ---------- + .. [1] https://en.wikipedia.org/wiki/Bernstein_polynomial + .. [2] Kenneth I. Joy, Bernstein polynomials, + http://www.idav.ucdavis.edu/education/CAGDNotes/Bernstein-Polynomials.pdf + .. [3] E. H. Doha, A. H. Bhrawy, and M. A. Saker, Boundary Value Problems, + vol 2011, article ID 829546, + `10.1155/2011/829543 `_. + + Examples + -------- + >>> from cupyx.scipy.interpolate import BPoly + >>> x = [0, 1] + >>> c = [[1], [2], [3]] + >>> bp = BPoly(c, x) + + This creates a 2nd order polynomial + + .. math:: + + B(x) = 1 \\times b_{0, 2}(x) + 2 \\times b_{1, 2}(x) + + 3 \\times b_{2, 2}(x) \\\\ + = 1 \\times (1-x)^2 + 2 \\times 2 x (1 - x) + 3 \\times x^2 + """ + + def _evaluate(self, x, nu, extrapolate, out): + # check derivative order + if nu < 0: + raise NotImplementedError( + "Cannot do antiderivatives in the B-basis yet.") + + _bpoly_evaluate( + self.c.reshape(self.c.shape[0], self.c.shape[1], -1), + self.x, x, nu, bool(extrapolate), out) + + def derivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the derivative. + + Parameters + ---------- + nu : int, optional + Order of derivative to evaluate. Default is 1, i.e., compute the + first derivative. If negative, the antiderivative is returned. + + Returns + ------- + bp : BPoly + Piecewise polynomial of order k - nu representing the derivative of + this polynomial. + """ + if nu < 0: + return self.antiderivative(-nu) + + if nu > 1: + bp = self + for k in range(nu): + bp = bp.derivative() + return bp + + # reduce order + if nu == 0: + c2 = self.c.copy() + else: + # For a polynomial + # B(x) = \sum_{a=0}^{k} c_a b_{a, k}(x), + # we use the fact that + # b'_{a, k} = k ( b_{a-1, k-1} - b_{a, k-1} ), + # which leads to + # B'(x) = \sum_{a=0}^{k-1} (c_{a+1} - c_a) b_{a, k-1} + # + # finally, for an interval [y, y + dy] with dy != 1, + # we need to correct for an extra power of dy + + rest = (None,) * (self.c.ndim-2) + + k = self.c.shape[0] - 1 + dx = cupy.diff(self.x)[(None, slice(None))+rest] + c2 = k * cupy.diff(self.c, axis=0) / dx + + if c2.shape[0] == 0: + # derivative of order 0 is zero + c2 = cupy.zeros((1,) + c2.shape[1:], dtype=c2.dtype) + + # construct a compatible polynomial + return self.construct_fast(c2, self.x, self.extrapolate, self.axis) + + def antiderivative(self, nu=1): + """ + Construct a new piecewise polynomial representing the antiderivative. + + Parameters + ---------- + nu : int, optional + Order of antiderivative to evaluate. Default is 1, i.e., compute + the first integral. If negative, the derivative is returned. + + Returns + ------- + bp : BPoly + Piecewise polynomial of order k + nu representing the + antiderivative of this polynomial. + + Notes + ----- + If antiderivative is computed and ``self.extrapolate='periodic'``, + it will be set to False for the returned instance. This is done because + the antiderivative is no longer periodic and its correct evaluation + outside of the initially given x interval is difficult. + """ + if nu <= 0: + return self.derivative(-nu) + + if nu > 1: + bp = self + for k in range(nu): + bp = bp.antiderivative() + return bp + + # Construct the indefinite integrals on individual intervals + c, x = self.c, self.x + k = c.shape[0] + c2 = cupy.zeros((k+1,) + c.shape[1:], dtype=c.dtype) + + c2[1:, ...] = cupy.cumsum(c, axis=0) / k + delta = x[1:] - x[:-1] + c2 *= delta[(None, slice(None)) + (None,)*(c.ndim-2)] + + # Now fix continuity: on the very first interval, take the integration + # constant to be zero; on an interval [x_j, x_{j+1}) with j>0, + # the integration constant is then equal to the jump of the `bp` + # at x_j. + # The latter is given by the coefficient of B_{n+1, n+1} + # *on the previous interval* (other B. polynomials are zero at the + # breakpoint). Finally, use the fact that BPs form a partition of + # unity. + c2[:, 1:] += cupy.cumsum(c2[k, :], axis=0)[:-1] + + if self.extrapolate == 'periodic': + extrapolate = False + else: + extrapolate = self.extrapolate + + return self.construct_fast(c2, x, extrapolate, axis=self.axis) + + def integrate(self, a, b, extrapolate=None): + """ + Compute a definite integral over a piecewise polynomial. + + Parameters + ---------- + a : float + Lower integration bound + b : float + Upper integration bound + extrapolate : {bool, 'periodic', None}, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. If 'periodic', periodic + extrapolation is used. If None (default), use `self.extrapolate`. + + Returns + ------- + array_like + Definite integral of the piecewise polynomial over [a, b] + """ + # XXX: can probably use instead the fact that + # \int_0^{1} B_{j, n}(x) \dx = 1/(n+1) + ib = self.antiderivative() + if extrapolate is None: + extrapolate = self.extrapolate + + # ib.extrapolate shouldn't be 'periodic', it is converted to + # False for 'periodic. in antiderivative() call. + if extrapolate != 'periodic': + ib.extrapolate = extrapolate + + if extrapolate == 'periodic': + # Split the integral into the part over period (can be several + # of them) and the remaining part. + + # For simplicity and clarity convert to a <= b case. + if a <= b: + sign = 1 + else: + a, b = b, a + sign = -1 + + xs, xe = self.x[0], self.x[-1] + period = xe - xs + interval = b - a + n_periods, left = divmod(interval, period) + res = n_periods * (ib(xe) - ib(xs)) + + # Map a and b to [xs, xe]. + a = xs + (a - xs) % period + b = a + left + + # If b <= xe then we need to integrate over [a, b], otherwise + # over [a, xe] and from xs to what is remained. + if b <= xe: + res += ib(b) - ib(a) + else: + res += ib(xe) - ib(a) + ib(xs + left + a - xe) - ib(xs) + + return sign * res + else: + return ib(b) - ib(a) + + def extend(self, c, x): + k = max(self.c.shape[0], c.shape[0]) + self.c = self._raise_degree(self.c, k - self.c.shape[0]) + c = self._raise_degree(c, k - c.shape[0]) + return _PPolyBase.extend(self, c, x) + extend.__doc__ = _PPolyBase.extend.__doc__ + + @staticmethod + def _raise_degree(c, d): + r""" + Raise a degree of a polynomial in the Bernstein basis. + + Given the coefficients of a polynomial degree `k`, return (the + coefficients of) the equivalent polynomial of degree `k+d`. + + Parameters + ---------- + c : array_like + coefficient array, 1-D + d : integer + + Returns + ------- + array + coefficient array, 1-D array of length `c.shape[0] + d` + + Notes + ----- + This uses the fact that a Bernstein polynomial `b_{a, k}` can be + identically represented as a linear combination of polynomials of + a higher degree `k+d`: + + .. math:: b_{a, k} = comb(k, a) \sum_{j=0}^{d} b_{a+j, k+d} \ + comb(d, j) / comb(k+d, a+j) + """ + if d == 0: + return c + + k = c.shape[0] - 1 + out = cupy.zeros((c.shape[0] + d,) + c.shape[1:], dtype=c.dtype) + + for a in range(c.shape[0]): + f = c[a] * _comb(k, a) + for j in range(d + 1): + out[a + j] += f * _comb(d, j) / _comb(k + d, a + j) + return out + + @classmethod + def from_power_basis(cls, pp, extrapolate=None): + """ + Construct a piecewise polynomial in Bernstein basis + from a power basis polynomial. + + Parameters + ---------- + pp : PPoly + A piecewise polynomial in the power basis + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + """ + if not isinstance(pp, PPoly): + raise TypeError(".from_power_basis only accepts PPoly instances. " + "Got %s instead." % type(pp)) + + dx = cupy.diff(pp.x) + k = pp.c.shape[0] - 1 # polynomial order + + rest = (None,)*(pp.c.ndim-2) + + c = cupy.zeros_like(pp.c) + for a in range(k+1): + factor = pp.c[a] / _comb(k, k-a) * dx[(slice(None),)+rest]**(k-a) + for j in range(k-a, k+1): + c[j] += factor * _comb(j, k-a) + + if extrapolate is None: + extrapolate = pp.extrapolate + + return cls.construct_fast(c, pp.x, extrapolate, pp.axis) + + @classmethod + def from_derivatives(cls, xi, yi, orders=None, extrapolate=None): + """ + Construct a piecewise polynomial in the Bernstein basis, + compatible with the specified values and derivatives at breakpoints. + + Parameters + ---------- + xi : array_like + sorted 1-D array of x-coordinates + yi : array_like or list of array_likes + ``yi[i][j]`` is the ``j`` th derivative known at ``xi[i]`` + orders : None or int or array_like of ints. Default: None. + Specifies the degree of local polynomials. If not None, some + derivatives are ignored. + extrapolate : bool or 'periodic', optional + If bool, determines whether to extrapolate to out-of-bounds points + based on first and last intervals, or to return NaNs. + If 'periodic', periodic extrapolation is used. Default is True. + + Notes + ----- + If ``k`` derivatives are specified at a breakpoint ``x``, the + constructed polynomial is exactly ``k`` times continuously + differentiable at ``x``, unless the ``order`` is provided explicitly. + In the latter case, the smoothness of the polynomial at + the breakpoint is controlled by the ``order``. + + Deduces the number of derivatives to match at each end + from ``order`` and the number of derivatives available. If + possible it uses the same number of derivatives from + each end; if the number is odd it tries to take the + extra one from y2. In any case if not enough derivatives + are available at one end or another it draws enough to + make up the total from the other end. + + If the order is too high and not enough derivatives are available, + an exception is raised. + + Examples + -------- + >>> from cupyx.scipy.interpolate import BPoly + >>> BPoly.from_derivatives([0, 1], [[1, 2], [3, 4]]) + + Creates a polynomial `f(x)` of degree 3, defined on `[0, 1]` + such that `f(0) = 1, df/dx(0) = 2, f(1) = 3, df/dx(1) = 4` + + >>> BPoly.from_derivatives([0, 1, 2], [[0, 1], [0], [2]]) + + Creates a piecewise polynomial `f(x)`, such that + `f(0) = f(1) = 0`, `f(2) = 2`, and `df/dx(0) = 1`. + Based on the number of derivatives provided, the order of the + local polynomials is 2 on `[0, 1]` and 1 on `[1, 2]`. + Notice that no restriction is imposed on the derivatives at + ``x = 1`` and ``x = 2``. + + Indeed, the explicit form of the polynomial is:: + + f(x) = | x * (1 - x), 0 <= x < 1 + | 2 * (x - 1), 1 <= x <= 2 + + So that f'(1-0) = -1 and f'(1+0) = 2 + """ + xi = cupy.asarray(xi) + if len(xi) != len(yi): + raise ValueError("xi and yi need to have the same length") + if cupy.any(xi[1:] - xi[:1] <= 0): + raise ValueError("x coordinates are not in increasing order") + + # number of intervals + m = len(xi) - 1 + + # global poly order is k-1, local orders are <=k and can vary + try: + k = max(len(yi[i]) + len(yi[i+1]) for i in range(m)) + except TypeError as e: + raise ValueError( + "Using a 1-D array for y? Please .reshape(-1, 1)." + ) from e + + if orders is None: + orders = [None] * m + else: + if isinstance(orders, (int, cupy.integer)): + orders = [orders] * m + k = max(k, max(orders)) + + if any(o <= 0 for o in orders): + raise ValueError("Orders must be positive.") + + c = [] + for i in range(m): + y1, y2 = yi[i], yi[i+1] + if orders[i] is None: + n1, n2 = len(y1), len(y2) + else: + n = orders[i]+1 + n1 = min(n//2, len(y1)) + n2 = min(n - n1, len(y2)) + n1 = min(n - n2, len(y2)) + if n1+n2 != n: + mesg = ("Point %g has %d derivatives, point %g" + " has %d derivatives, but order %d requested" % ( + xi[i], len(y1), xi[i+1], len(y2), orders[i])) + raise ValueError(mesg) + + if not (n1 <= len(y1) and n2 <= len(y2)): + raise ValueError("`order` input incompatible with" + " length y1 or y2.") + + b = BPoly._construct_from_derivatives(xi[i], xi[i+1], + y1[:n1], y2[:n2]) + if len(b) < k: + b = BPoly._raise_degree(b, k - len(b)) + c.append(b) + + c = cupy.asarray(c) + return cls(c.swapaxes(0, 1), xi, extrapolate) + + @staticmethod + def _construct_from_derivatives(xa, xb, ya, yb): + r""" + Compute the coefficients of a polynomial in the Bernstein basis + given the values and derivatives at the edges. + + Return the coefficients of a polynomial in the Bernstein basis + defined on ``[xa, xb]`` and having the values and derivatives at the + endpoints `xa` and `xb` as specified by `ya`` and `yb`. + + The polynomial constructed is of the minimal possible degree, i.e., + if the lengths of `ya` and `yb` are `na` and `nb`, the degree + of the polynomial is ``na + nb - 1``. + + Parameters + ---------- + xa : float + Left-hand end point of the interval + xb : float + Right-hand end point of the interval + ya : array_like + Derivatives at `xa`. `ya[0]` is the value of the function, and + `ya[i]` for ``i > 0`` is the value of the ``i``th derivative. + yb : array_like + Derivatives at `xb`. + + Returns + ------- + array + coefficient array of a polynomial having specified derivatives + + Notes + ----- + This uses several facts from life of Bernstein basis functions. + First of all, + + .. math:: b'_{a, n} = n (b_{a-1, n-1} - b_{a, n-1}) + + If B(x) is a linear combination of the form + + .. math:: B(x) = \sum_{a=0}^{n} c_a b_{a, n}, + + then :math: B'(x) = n \sum_{a=0}^{n-1} (c_{a+1} - c_{a}) b_{a, n-1}. + Iterating the latter one, one finds for the q-th derivative + + .. math:: B^{q}(x) = n!/(n-q)! \sum_{a=0}^{n-q} Q_a b_{a, n-q}, + + with + + .. math:: Q_a = \sum_{j=0}^{q} (-)^{j+q} comb(q, j) c_{j+a} + + This way, only `a=0` contributes to :math: `B^{q}(x = xa)`, and + `c_q` are found one by one by iterating `q = 0, ..., na`. + + At ``x = xb`` it's the same with ``a = n - q``. + """ + ya, yb = cupy.asarray(ya), cupy.asarray(yb) + if ya.shape[1:] != yb.shape[1:]: + raise ValueError('Shapes of ya {} and yb {} are incompatible' + .format(ya.shape, yb.shape)) + + dta, dtb = ya.dtype, yb.dtype + if (cupy.issubdtype(dta, cupy.complexfloating) or + cupy.issubdtype(dtb, cupy.complexfloating)): + dt = cupy.complex128 + else: + dt = cupy.float64 + + na, nb = len(ya), len(yb) + n = na + nb + + c = cupy.empty((na+nb,) + ya.shape[1:], dtype=dt) + + # compute coefficients of a polynomial degree na+nb-1 + # walk left-to-right + for q in range(0, na): + c[q] = ya[q] / spec.poch(n - q, q) * (xb - xa)**q + for j in range(0, q): + c[q] -= (-1)**(j+q) * _comb(q, j) * c[j] + + # now walk right-to-left + for q in range(0, nb): + c[-q-1] = yb[q] / spec.poch(n - q, q) * (-1)**q * (xb - xa)**q + for j in range(0, q): + c[-q-1] -= (-1)**(j+1) * _comb(q, j+1) * c[-q+j] + + return c + + +class NdPPoly: + """ + Piecewise tensor product polynomial + + The value at point ``xp = (x', y', z', ...)`` is evaluated by first + computing the interval indices `i` such that:: + + x[0][i[0]] <= x' < x[0][i[0]+1] + x[1][i[1]] <= y' < x[1][i[1]+1] + ... + + and then computing:: + + S = sum(c[k0-m0-1,...,kn-mn-1,i[0],...,i[n]] + * (xp[0] - x[0][i[0]])**m0 + * ... + * (xp[n] - x[n][i[n]])**mn + for m0 in range(k[0]+1) + ... + for mn in range(k[n]+1)) + + where ``k[j]`` is the degree of the polynomial in dimension j. This + representation is the piecewise multivariate power basis. + + Parameters + ---------- + c : ndarray, shape (k0, ..., kn, m0, ..., mn, ...) + Polynomial coefficients, with polynomial order `kj` and + `mj+1` intervals for each dimension `j`. + x : ndim-tuple of ndarrays, shapes (mj+1,) + Polynomial breakpoints for each dimension. These must be + sorted in increasing order. + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. Default: True. + + Attributes + ---------- + x : tuple of ndarrays + Breakpoints. + c : ndarray + Coefficients of the polynomials. + + See also + -------- + PPoly : piecewise polynomials in 1D + + Notes + ----- + High-order polynomials in the power basis can be numerically + unstable. + """ + + def __init__(self, c, x, extrapolate=None): + self.x = tuple(cupy.ascontiguousarray( + v, dtype=cupy.float64) for v in x) + self.c = cupy.asarray(c) + if extrapolate is None: + extrapolate = True + self.extrapolate = bool(extrapolate) + + ndim = len(self.x) + if any(v.ndim != 1 for v in self.x): + raise ValueError("x arrays must all be 1-dimensional") + if any(v.size < 2 for v in self.x): + raise ValueError("x arrays must all contain at least 2 points") + if c.ndim < 2*ndim: + raise ValueError("c must have at least 2*len(x) dimensions") + if any(cupy.any(v[1:] - v[:-1] < 0) for v in self.x): + raise ValueError("x-coordinates are not in increasing order") + if any(a != b.size - 1 for a, b in zip(c.shape[ndim:2*ndim], self.x)): + raise ValueError("x and c do not agree on the number of intervals") + + dtype = self._get_dtype(self.c.dtype) + self.c = cupy.ascontiguousarray(self.c, dtype=dtype) + + @classmethod + def construct_fast(cls, c, x, extrapolate=None): + """ + Construct the piecewise polynomial without making checks. + + Takes the same parameters as the constructor. Input arguments + ``c`` and ``x`` must be arrays of the correct shape and type. The + ``c`` array can only be of dtypes float and complex, and ``x`` + array must have dtype float. + """ + self = object.__new__(cls) + self.c = c + self.x = x + if extrapolate is None: + extrapolate = True + self.extrapolate = extrapolate + return self + + def _get_dtype(self, dtype): + if (cupy.issubdtype(dtype, cupy.complexfloating) + or cupy.issubdtype(self.c.dtype, cupy.complexfloating)): + return cupy.complex128 + else: + return cupy.float64 + + def _ensure_c_contiguous(self): + if not self.c.flags.c_contiguous: + self.c = self.c.copy() + if not isinstance(self.x, tuple): + self.x = tuple(self.x) + + def __call__(self, x, nu=None, extrapolate=None): + """ + Evaluate the piecewise polynomial or its derivative + + Parameters + ---------- + x : array-like + Points to evaluate the interpolant at. + nu : tuple, optional + Orders of derivatives to evaluate. Each must be non-negative. + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + Returns + ------- + y : array-like + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals are considered half-open, + ``[a, b)``, except for the last interval which is closed + ``[a, b]``. + """ + if extrapolate is None: + extrapolate = self.extrapolate + else: + extrapolate = bool(extrapolate) + + ndim = len(self.x) + + x = _ndim_coords_from_arrays(x) + x_shape = x.shape + x = cupy.ascontiguousarray(x.reshape(-1, x.shape[-1]), + dtype=cupy.float64) + + if nu is None: + nu = cupy.zeros((ndim,), dtype=cupy.int64) + else: + nu = cupy.asarray(nu, dtype=cupy.int64) + if nu.ndim != 1 or nu.shape[0] != ndim: + raise ValueError("invalid number of derivative orders nu") + + dim1 = int(np.prod(self.c.shape[:ndim])) + dim2 = int(np.prod(self.c.shape[ndim:2*ndim])) + dim3 = int(np.prod(self.c.shape[2*ndim:])) + ks = cupy.asarray(self.c.shape[:ndim], dtype=cupy.int64) + + out = cupy.empty((x.shape[0], dim3), dtype=self.c.dtype) + self._ensure_c_contiguous() + + _ndppoly_evaluate( + self.c.reshape(dim1, dim2, dim3), self.x, ks, x, nu, + bool(extrapolate), out) + + return out.reshape(x_shape[:-1] + self.c.shape[2*ndim:]) + + def _derivative_inplace(self, nu, axis): + """ + Compute 1-D derivative along a selected dimension in-place + May result to non-contiguous c array. + """ + if nu < 0: + return self._antiderivative_inplace(-nu, axis) + + ndim = len(self.x) + axis = axis % ndim + + # reduce order + if nu == 0: + # noop + return + else: + sl = [slice(None)]*ndim + sl[axis] = slice(None, -nu, None) + c2 = self.c[tuple(sl)] + + if c2.shape[axis] == 0: + # derivative of order 0 is zero + shp = list(c2.shape) + shp[axis] = 1 + c2 = cupy.zeros(shp, dtype=c2.dtype) + + # multiply by the correct rising factorials + factor = spec.poch(cupy.arange(c2.shape[axis], 0, -1), nu) + sl = [None] * c2.ndim + sl[axis] = slice(None) + c2 *= factor[tuple(sl)] + + self.c = c2 + + def _antiderivative_inplace(self, nu, axis): + """ + Compute 1-D antiderivative along a selected dimension + May result to non-contiguous c array. + """ + if nu <= 0: + return self._derivative_inplace(-nu, axis) + + ndim = len(self.x) + axis = axis % ndim + + perm = list(range(ndim)) + perm[0], perm[axis] = perm[axis], perm[0] + perm = perm + list(range(ndim, self.c.ndim)) + + c = self.c.transpose(perm) + + c2 = cupy.zeros((c.shape[0] + nu,) + c.shape[1:], + dtype=c.dtype) + c2[:-nu] = c + + # divide by the correct rising factorials + factor = spec.poch(cupy.arange(c.shape[0], 0, -1), nu) + c2[:-nu] /= factor[(slice(None),) + (None,)*(c.ndim-1)] + + # fix continuity of added degrees of freedom + perm2 = list(range(c2.ndim)) + perm2[1], perm2[ndim+axis] = perm2[ndim+axis], perm2[1] + + c2 = c2.transpose(perm2) + c2 = c2.copy() + _fix_continuity( + c2.reshape(c2.shape[0], c2.shape[1], -1), self.x[axis], nu - 1) + + c2 = c2.transpose(perm2) + c2 = c2.transpose(perm) + + # Done + self.c = c2 + + def derivative(self, nu): + """ + Construct a new piecewise polynomial representing the derivative. + + Parameters + ---------- + nu : ndim-tuple of int + Order of derivatives to evaluate for each dimension. + If negative, the antiderivative is returned. + + Returns + ------- + pp : NdPPoly + Piecewise polynomial of orders (k[0] - nu[0], ..., k[n] - nu[n]) + representing the derivative of this polynomial. + + Notes + ----- + Derivatives are evaluated piecewise for each polynomial + segment, even if the polynomial is not differentiable at the + breakpoints. The polynomial intervals in each dimension are + considered half-open, ``[a, b)``, except for the last interval + which is closed ``[a, b]``. + """ + p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) + + for axis, n in enumerate(nu): + p._derivative_inplace(n, axis) + + p._ensure_c_contiguous() + return p + + def antiderivative(self, nu): + """ + Construct a new piecewise polynomial representing the antiderivative. + Antiderivative is also the indefinite integral of the function, + and derivative is its inverse operation. + + Parameters + ---------- + nu : ndim-tuple of int + Order of derivatives to evaluate for each dimension. + If negative, the derivative is returned. + + Returns + ------- + pp : PPoly + Piecewise polynomial of order k2 = k + n representing + the antiderivative of this polynomial. + + Notes + ----- + The antiderivative returned by this function is continuous and + continuously differentiable to order n-1, up to floating point + rounding error. + """ + p = self.construct_fast(self.c.copy(), self.x, self.extrapolate) + + for axis, n in enumerate(nu): + p._antiderivative_inplace(n, axis) + + p._ensure_c_contiguous() + return p + + def integrate_1d(self, a, b, axis, extrapolate=None): + r""" + Compute NdPPoly representation for one dimensional definite integral + The result is a piecewise polynomial representing the integral: + + .. math:: + p(y, z, ...) = \int_a^b dx\, p(x, y, z, ...) + + where the dimension integrated over is specified with the + `axis` parameter. + + Parameters + ---------- + a, b : float + Lower and upper bound for integration. + axis : int + Dimension over which to compute the 1-D integrals + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + Returns + ------- + ig : NdPPoly or array-like + Definite integral of the piecewise polynomial over [a, b]. + If the polynomial was 1D, an array is returned, + otherwise, an NdPPoly object. + """ + if extrapolate is None: + extrapolate = self.extrapolate + else: + extrapolate = bool(extrapolate) + + ndim = len(self.x) + axis = int(axis) % ndim + + # reuse 1-D integration routines + c = self.c + swap = list(range(c.ndim)) + swap.insert(0, swap[axis]) + del swap[axis + 1] + swap.insert(1, swap[ndim + axis]) + del swap[ndim + axis + 1] + + c = c.transpose(swap) + p = PPoly.construct_fast(c.reshape(c.shape[0], c.shape[1], -1), + self.x[axis], + extrapolate=extrapolate) + out = p.integrate(a, b, extrapolate=extrapolate) + + # Construct result + if ndim == 1: + return out.reshape(c.shape[2:]) + else: + c = out.reshape(c.shape[2:]) + x = self.x[:axis] + self.x[axis+1:] + return self.construct_fast(c, x, extrapolate=extrapolate) + + def integrate(self, ranges, extrapolate=None): + """ + Compute a definite integral over a piecewise polynomial. + + Parameters + ---------- + ranges : ndim-tuple of 2-tuples float + Sequence of lower and upper bounds for each dimension, + ``[(a[0], b[0]), ..., (a[ndim-1], b[ndim-1])]`` + extrapolate : bool, optional + Whether to extrapolate to out-of-bounds points based on first + and last intervals, or to return NaNs. + + Returns + ------- + ig : array_like + Definite integral of the piecewise polynomial over + [a[0], b[0]] x ... x [a[ndim-1], b[ndim-1]] + """ + + ndim = len(self.x) + + if extrapolate is None: + extrapolate = self.extrapolate + else: + extrapolate = bool(extrapolate) + + if not hasattr(ranges, '__len__') or len(ranges) != ndim: + raise ValueError("Range not a sequence of correct length") + + self._ensure_c_contiguous() + + # Reuse 1D integration routine + c = self.c + for n, (a, b) in enumerate(ranges): + swap = list(range(c.ndim)) + swap.insert(1, swap[ndim - n]) + del swap[ndim - n + 1] + + c = c.transpose(swap) + + p = PPoly.construct_fast(c, self.x[n], extrapolate=extrapolate) + out = p.integrate(a, b, extrapolate=extrapolate) + c = out.reshape(c.shape[2:]) + + return c diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_polyint.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_polyint.py new file mode 100644 index 0000000000000000000000000000000000000000..e510a975e120a13422f9d644eceb134393b9ec68 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_polyint.py @@ -0,0 +1,527 @@ +import cupy +from cupyx.scipy._lib._util import _asarray_validated, float_factorial + + +def _isscalar(x): + """Check whether x is if a scalar type, or 0-dim""" + return cupy.isscalar(x) or hasattr(x, 'shape') and x.shape == () + + +class _Interpolator1D: + """Common features in univariate interpolation. + + Deal with input data type and interpolation axis rolling. The + actual interpolator can assume the y-data is of shape (n, r) where + `n` is the number of x-points, and `r` the number of variables, + and use self.dtype as the y-data type. + + Attributes + ---------- + _y_axis : Axis along which the interpolation goes in the + original array + _y_extra_shape : Additional shape of the input arrays, excluding + the interpolation axis + dtype : Dtype of the y-data arrays. It can be set via _set_dtype, + which forces it to be float or complex + + Methods + ------- + __call__ + _prepare_x + _finish_y + _reshape_y + _reshape_yi + _set_yi + _set_dtype + _evaluate + + """ + + def __init__(self, xi=None, yi=None, axis=None): + self._y_axis = axis + self._y_extra_shape = None + self.dtype = None + if yi is not None: + self._set_yi(yi, xi=xi, axis=axis) + + def __call__(self, x): + """Evaluate the interpolant + + Parameters + ---------- + x : cupy.ndarray + The points to evaluate the interpolant + + Returns + ------- + y : cupy.ndarray + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the shape of x + + Notes + ----- + Input values `x` must be convertible to `float` values like `int` + or `float`. + + """ + x, x_shape = self._prepare_x(x) + y = self._evaluate(x) + return self._finish_y(y, x_shape) + + def _evaluate(self, x): + """ + Actually evaluate the value of the interpolator + """ + raise NotImplementedError() + + def _prepare_x(self, x): + """ + Reshape input array to 1-D + """ + x = _asarray_validated(x, check_finite=False, as_inexact=True) + x_shape = x.shape + return x.ravel(), x_shape + + def _finish_y(self, y, x_shape): + """ + Reshape interpolated y back to an N-D array similar to initial y + """ + y = y.reshape(x_shape + self._y_extra_shape) + if self._y_axis != 0 and x_shape != (): + nx = len(x_shape) + ny = len(self._y_extra_shape) + s = (list(range(nx, nx + self._y_axis)) + + list(range(nx)) + list(range(nx + self._y_axis, nx + ny))) + y = y.transpose(s) + return y + + def _reshape_yi(self, yi, check=False): + """ + Reshape the updated yi to a 1-D array + """ + yi = cupy.moveaxis(yi, self._y_axis, 0) + if check and yi.shape[1:] != self._y_extra_shape: + ok_shape = "%r + (N,) + %r" % (self._y_extra_shape[-self._y_axis:], + self._y_extra_shape[:-self._y_axis]) + raise ValueError("Data must be of shape %s" % ok_shape) + return yi.reshape((yi.shape[0], -1)) + + def _set_yi(self, yi, xi=None, axis=None): + if axis is None: + axis = self._y_axis + if axis is None: + raise ValueError("no interpolation axis specified") + + shape = yi.shape + if shape == (): + shape = (1,) + if xi is not None and shape[axis] != len(xi): + raise ValueError("x and y arrays must be equal in length along " + "interpolation axis.") + + self._y_axis = (axis % yi.ndim) + self._y_extra_shape = yi.shape[:self._y_axis]+yi.shape[self._y_axis+1:] + self.dtype = None + self._set_dtype(yi.dtype) + + def _set_dtype(self, dtype, union=False): + if cupy.issubdtype(dtype, cupy.complexfloating) \ + or cupy.issubdtype(self.dtype, cupy.complexfloating): + self.dtype = cupy.complex128 + else: + if not union or self.dtype != cupy.complex128: + self.dtype = cupy.float64 + + +class _Interpolator1DWithDerivatives(_Interpolator1D): + + def derivatives(self, x, der=None): + """Evaluate many derivatives of the polynomial at the point x. + + The function produce an array of all derivative values at + the point x. + + Parameters + ---------- + x : cupy.ndarray + Point or points at which to evaluate the derivatives + der : int or None, optional + How many derivatives to extract; None for all potentially + nonzero derivatives (that is a number equal to the number + of points). This number includes the function value as 0th + derivative + + Returns + ------- + d : cupy.ndarray + Array with derivatives; d[j] contains the jth derivative. + Shape of d[j] is determined by replacing the interpolation + axis in the original array with the shape of x + + """ + x, x_shape = self._prepare_x(x) + y = self._evaluate_derivatives(x, der) + + y = y.reshape((y.shape[0],) + x_shape + self._y_extra_shape) + if self._y_axis != 0 and x_shape != (): + nx = len(x_shape) + ny = len(self._y_extra_shape) + s = ([0] + list(range(nx+1, nx + self._y_axis+1)) + + list(range(1, nx+1)) + + list(range(nx+1+self._y_axis, nx+ny+1))) + y = y.transpose(s) + return y + + def derivative(self, x, der=1): + """Evaluate one derivative of the polynomial at the point x + + Parameters + ---------- + x : cupy.ndarray + Point or points at which to evaluate the derivatives + der : integer, optional + Which derivative to extract. This number includes the + function value as 0th derivative + + Returns + ------- + d : cupy.ndarray + Derivative interpolated at the x-points. Shape of d is + determined by replacing the interpolation axis in the + original array with the shape of x + + Notes + ----- + This is computed by evaluating all derivatives up to the desired + one (using self.derivatives()) and then discarding the rest. + + """ + x, x_shape = self._prepare_x(x) + y = self._evaluate_derivatives(x, der+1) + return self._finish_y(y[der], x_shape) + + +class BarycentricInterpolator(_Interpolator1D): + """The interpolating polynomial for a set of points. + + Constructs a polynomial that passes through a given set of points. + Allows evaluation of the polynomial, efficient changing of the y + values to be interpolated, and updating by adding more x values. + For reasons of numerical stability, this function does not compute + the coefficients of the polynomial. + The value `yi` need to be provided before the function is + evaluated, but none of the preprocessing depends on them, + so rapid updates are possible. + + Parameters + ---------- + xi : cupy.ndarray + 1-D array of x-coordinates of the points the polynomial should + pass through + yi : cupy.ndarray, optional + The y-coordinates of the points the polynomial should pass through. + If None, the y values will be supplied later via the `set_y` method + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values + + See Also + -------- + scipy.interpolate.BarycentricInterpolator + + """ + + def __init__(self, xi, yi=None, axis=0): + _Interpolator1D.__init__(self, xi, yi, axis) + + self.xi = xi.astype(cupy.float64) + self.set_yi(yi) + self.n = len(self.xi) + + self._inv_capacity = 4.0 / (cupy.max(self.xi) - cupy.min(self.xi)) + permute = cupy.random.permutation(self.n) + inv_permute = cupy.zeros(self.n, dtype=cupy.int32) + inv_permute[permute] = cupy.arange(self.n) + + self.wi = cupy.zeros(self.n) + for i in range(self.n): + dist = self._inv_capacity * (self.xi[i] - self.xi[permute]) + dist[inv_permute[i]] = 1.0 + self.wi[i] = 1.0 / cupy.prod(dist) + + def set_yi(self, yi, axis=None): + """Update the y values to be interpolated. + + The barycentric interpolation algorithm requires the calculation + of weights, but these depend only on the xi. The yi can be changed + at any time. + + Parameters + ---------- + yi : cupy.ndarray + The y-coordinates of the points the polynomial should pass + through. If None, the y values will be supplied later. + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values + + """ + + if yi is None: + self.yi = None + return + self._set_yi(yi, xi=self.xi, axis=axis) + self.yi = self._reshape_yi(yi) + self.n, self.r = self.yi.shape + + def add_xi(self, xi, yi=None): + """Add more x values to the set to be interpolated. + + The barycentric interpolation algorithm allows easy updating + by adding more points for the polynomial to pass through. + + Parameters + ---------- + xi : cupy.ndarray + The x-coordinates of the points that the polynomial should + pass through + yi : cupy.ndarray, optional + The y-coordinates of the points the polynomial should pass + through. Should have shape ``(xi.size, R)``; if R > 1 then + the polynomial is vector-valued + If `yi` is not given, the y values will be supplied later. + `yi` should be given if and only if the interpolator has y + values specified + + """ + + if yi is not None: + if self.yi is None: + raise ValueError("No previous yi value to update!") + yi = self._reshape_yi(yi, check=True) + self.yi = cupy.vstack((self.yi, yi)) + else: + if self.yi is not None: + raise ValueError("No update to yi provided!") + old_n = self.n + self.xi = cupy.concatenate((self.xi, xi)) + self.n = len(self.xi) + self.wi **= -1 + old_wi = self.wi + self.wi = cupy.zeros(self.n) + self.wi[:old_n] = old_wi + for j in range(old_n, self.n): + self.wi[:j] *= self._inv_capacity * (self.xi[j] - self.xi[:j]) + self.wi[j] = cupy.prod( + self._inv_capacity * (self.xi[:j] - self.xi[j]) + ) + self.wi **= -1 + + def __call__(self, x): + """Evaluate the interpolating polynomial at the points x. + + Parameters + ---------- + x : cupy.ndarray + Points to evaluate the interpolant at + + Returns + ------- + y : cupy.ndarray + Interpolated values. Shape is determined by replacing the + interpolation axis in the original array with the shape of x + + Notes + ----- + Currently the code computes an outer product between x and the + weights, that is, it constructs an intermediate array of size + N by len(x), where N is the degree of the polynomial. + + """ + + return super().__call__(x) + + def _evaluate(self, x): + if x.size == 0: + p = cupy.zeros((0, self.r), dtype=self.dtype) + else: + c = x[..., cupy.newaxis] - self.xi + z = c == 0 + c[z] = 1 + c = self.wi / c + p = cupy.dot(c, self.yi) / cupy.sum(c, axis=-1)[..., cupy.newaxis] + r = cupy.nonzero(z) + if len(r) == 1: # evaluation at a scalar + if len(r[0]) > 0: # equals one of the points + p = self.yi[r[0][0]] + else: + p[r[:-1]] = self.yi[r[-1]] + return p + + +def barycentric_interpolate(xi, yi, x, axis=0): + """Convenience function for polynomial interpolation. + + Constructs a polynomial that passes through a given + set of points, then evaluates the polynomial. For + reasons of numerical stability, this function does + not compute the coefficients of the polynomial. + + Parameters + ---------- + xi : cupy.ndarray + 1-D array of coordinates of the points the polynomial + should pass through + yi : cupy.ndarray + y-coordinates of the points the polynomial should pass + through + x : scalar or cupy.ndarray + Points to evaluate the interpolator at + axis : int, optional + Axis in the yi array corresponding to the x-coordinate + values + + Returns + ------- + y : scalar or cupy.ndarray + Interpolated values. Shape is determined by replacing + the interpolation axis in the original array with the + shape x + + See Also + -------- + scipy.interpolate.barycentric_interpolate + + """ + + return BarycentricInterpolator(xi, yi, axis=axis)(x) + + +class KroghInterpolator(_Interpolator1DWithDerivatives): + """Interpolating polynomial for a set of points. + + The polynomial passes through all the pairs (xi,yi). One may + additionally specify a number of derivatives at each point xi; + this is done by repeating the value xi and specifying the + derivatives as successive yi values + Allows evaluation of the polynomial and all its derivatives. + For reasons of numerical stability, this function does not compute + the coefficients of the polynomial, although they can be obtained + by evaluating all the derivatives. + + Parameters + ---------- + xi : cupy.ndarray, length N + x-coordinate, must be sorted in increasing order + yi : cupy.ndarray + y-coordinate, when a xi occurs two or more times in a row, + the corresponding yi's represent derivative values + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values. + + """ + + def __init__(self, xi, yi, axis=0): + _Interpolator1DWithDerivatives.__init__(self, xi, yi, axis) + + self.xi = xi.astype(cupy.float64) + self.yi = self._reshape_yi(yi) + self.n, self.r = self.yi.shape + + c = cupy.zeros((self.n+1, self.r), dtype=self.dtype) + c[0] = self.yi[0] + Vk = cupy.zeros((self.n, self.r), dtype=self.dtype) + for k in range(1, self.n): + s = 0 + while s <= k and xi[k-s] == xi[k]: + s += 1 + s -= 1 + Vk[0] = self.yi[k]/float_factorial(s) + for i in range(k-s): + if xi[i] == xi[k]: + raise ValueError("Elements if `xi` can't be equal.") + if s == 0: + Vk[i+1] = (c[i]-Vk[i])/(xi[i]-xi[k]) + else: + Vk[i+1] = (Vk[i+1]-Vk[i])/(xi[i]-xi[k]) + c[k] = Vk[k-s] + self.c = c + + def _evaluate(self, x): + pi = 1 + p = cupy.zeros((len(x), self.r), dtype=self.dtype) + p += self.c[0, cupy.newaxis, :] + for k in range(1, self.n): + w = x - self.xi[k-1] + pi = w*pi + p += pi[:, cupy.newaxis] * self.c[k] + return p + + def _evaluate_derivatives(self, x, der=None): + n = self.n + r = self.r + + if der is None: + der = self.n + pi = cupy.zeros((n, len(x))) + w = cupy.zeros((n, len(x))) + pi[0] = 1 + p = cupy.zeros((len(x), self.r), dtype=self.dtype) + p += self.c[0, cupy.newaxis, :] + + for k in range(1, n): + w[k-1] = x - self.xi[k-1] + pi[k] = w[k-1] * pi[k-1] + p += pi[k, :, cupy.newaxis] * self.c[k] + + cn = cupy.zeros((max(der, n+1), len(x), r), dtype=self.dtype) + cn[:n+1, :, :] += self.c[:n+1, cupy.newaxis, :] + cn[0] = p + for k in range(1, n): + for i in range(1, n-k+1): + pi[i] = w[k+i-1]*pi[i-1] + pi[i] + cn[k] = cn[k] + pi[i, :, cupy.newaxis]*cn[k+i] + cn[k] *= float_factorial(k) + + cn[n, :, :] = 0 + return cn[:der] + + +def krogh_interpolate(xi, yi, x, der=0, axis=0): + """Convenience function for polynomial interpolation + + Parameters + ---------- + xi : cupy.ndarray + x-coordinate + yi : cupy.ndarray + y-coordinates, of shape ``(xi.size, R)``. Interpreted as + vectors of length R, or scalars if R=1 + x : cupy.ndarray + Point or points at which to evaluate the derivatives + der : int or list, optional + How many derivatives to extract; None for all potentially + nonzero derivatives (that is a number equal to the number + of points), or a list of derivatives to extract. This number + includes the function value as 0th derivative + axis : int, optional + Axis in the yi array corresponding to the x-coordinate values + + Returns + ------- + d : cupy.ndarray + If the interpolator's values are R-D then the + returned array will be the number of derivatives by N by R. + If `x` is a scalar, the middle dimension will be dropped; if + the `yi` are scalars then the last dimension will be dropped + + See Also + -------- + scipy.interpolate.krogh_interpolate + + """ + P = KroghInterpolator(xi, yi, axis=axis) + if der == 0: + return P(x) + elif _isscalar(der): + return P.derivative(x, der=der) + else: + return P.derivatives(x, der=cupy.amax(der)+1)[der] diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_rbfinterp.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_rbfinterp.py new file mode 100644 index 0000000000000000000000000000000000000000..e3eb775e2578514c8ca6646a2a9d1185060e1f03 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_rbfinterp.py @@ -0,0 +1,790 @@ +"""Module for RBF interpolation.""" +import math +import warnings +from itertools import combinations_with_replacement + +import cupy as cp + + +# Define the kernel functions. + +kernel_definitions = """ +static __device__ double linear(double r) +{ + return -r; +} + +static __device__ float linear_f(float r) +{ + return -r; +} + + +static __device__ double cubic(double r) +{ + return r*r*r; +} + +static __device__ float cubic_f(float r) +{ + return r*r*r; +} + + +static __device__ double thin_plate_spline(double r) +{ + if (r == 0.0) { + return 0.0; + } + else { + return r*r*log(r); + } +} + +static __device__ float thin_plate_spline_f(float r) +{ + if (r == 0.0) { + return 0.0; + } + else { + return r*r*log(r); + } +} + + +static __device__ double multiquadric(double r) +{ + return -sqrt(r*r + 1); +} + +static __device__ float multiquadric_f(float r) +{ + return -sqrt(r*r + 1); +} + + +static __device__ double inverse_multiquadric(double r) +{ + return 1.0 / sqrt(r*r + 1); +} + +static __device__ float inverse_multiquadric_f(float r) +{ + return 1.0 / sqrt(r*r + 1); +} + + +static __device__ double inverse_quadratic(double r) +{ + return 1.0 / (r*r + 1); +} + +static __device__ float inverse_quadrtic_f(float r) +{ + return 1.0 / (r*r + 1); +} + + +static __device__ double gaussian(double r) +{ + return exp(-r*r); +} + +static __device__ float gaussian_f(float r) +{ + return exp(-r*r); +} + + +static __device__ double quintic(double r) +{ + double r2 = r*r; + return -r2*r2*r; +} + +static __device__ float qunitic_f(float r) +{ + float r2 = r*r; + return -r2*r2*r; +} + +""" + +linear = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_linear', + (('f->f', 'out0 = linear_f(in0)'), + 'd->d'), + 'out0 = linear(in0)', + preamble=kernel_definitions, + doc="""Linear kernel function. + + ``-r`` + """, +) + +cubic = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_cubic', + (('f->f', 'out0 = cubic_f(in0)'), + 'd->d'), + 'out0 = cubic(in0)', + preamble=kernel_definitions, + doc="""Cubic kernel function. + + ``r**3`` + """, +) + +thin_plate_spline = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_thin_plate_spline', + (('f->f', 'out0 = thin_plate_spline_f(in0)'), + 'd->d'), + 'out0 = thin_plate_spline(in0)', + preamble=kernel_definitions, + doc="""Thin-plate spline kernel function. + + ``r**2 * log(r) if r != 0 else 0`` + """, +) + + +multiquadric = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_multiquadric', + (('f->f', 'out0 = multiquadric_f(in0)'), + 'd->d'), + 'out0 = multiquadric(in0)', + preamble=kernel_definitions, + doc="""Multiquadric kernel function. + + ``-sqrt(r**2 + 1)`` + """, +) + + +inverse_multiquadric = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_inverse_multiquadric', + (('f->f', 'out0 = inverse_multiquadric_f(in0)'), + 'd->d'), + 'out0 = inverse_multiquadric(in0)', + preamble=kernel_definitions, + doc="""Inverse multiquadric kernel function. + + ``1 / sqrt(r**2 + 1)`` + """, +) + + +inverse_quadratic = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_inverse_quadratic', + (('f->f', 'out0 = inverse_quadratic_f(in0)'), + 'd->d'), + 'out0 = inverse_quadratic(in0)', + preamble=kernel_definitions, + doc="""Inverse quadratic kernel function. + + ``1 / (r**2 + 1)`` + """, +) + + +gaussian = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_gaussian', + (('f->f', 'out0 = gaussian_f(in0)'), + 'd->d'), + 'out0 = gaussian(in0)', + preamble=kernel_definitions, + doc="""Gaussian kernel function. + + ``exp(-r**2)`` + """, +) + + +quintic = cp._core.create_ufunc( + 'cupyx_scipy_interpolate_quintic', + (('f->f', 'out0 = quintic_f(in0)'), + 'd->d'), + 'out0 = quintic(in0)', + preamble=kernel_definitions, + doc="""Quintic kernel function. + + ``-r**5`` + """, +) + + +NAME_TO_FUNC = { + "linear": linear, + "thin_plate_spline": thin_plate_spline, + "cubic": cubic, + "quintic": quintic, + "multiquadric": multiquadric, + "inverse_multiquadric": inverse_multiquadric, + "inverse_quadratic": inverse_quadratic, + "gaussian": gaussian +} + + +def kernel_matrix(x, kernel_func, out): + """Evaluate RBFs, with centers at `x`, at `x`.""" + delta = x[None, :, :] - x[:, None, :] + out[...] = kernel_func(cp.linalg.norm(delta, axis=-1)) +# The above is equivalent to the original semi-scalar version: +# for j in range(i+1): +# out[i, j] = kernel_func(cp.linalg.norm(x[i] - x[j])) +# out[j, i] = out[i, j] + + +def polynomial_matrix(x, powers, out): + """Evaluate monomials, with exponents from `powers`, at `x`.""" + pwr = x[:, None, :] ** powers[None, :, :] + cp.prod(pwr, axis=-1, out=out) +# The above is equivalent to the following loop +# for i in range(x.shape[0]): +# for j in range(powers.shape[0]): +# out[i, j] = cp.prod(x[i]**powers[j]) + + +def _build_system(y, d, smoothing, kernel, epsilon, powers): + """Build the system used to solve for the RBF interpolant coefficients. + + Parameters + ---------- + y : (P, N) float ndarray + Data point coordinates. + d : (P, S) float ndarray + Data values at `y`. + smoothing : (P,) float ndarray + Smoothing parameter for each data point. + kernel : str + Name of the RBF. + epsilon : float + Shape parameter. + powers : (R, N) int ndarray + The exponents for each monomial in the polynomial. + + Returns + ------- + lhs : (P + R, P + R) float ndarray + Left-hand side matrix. + rhs : (P + R, S) float ndarray + Right-hand side matrix. + shift : (N,) float ndarray + Domain shift used to create the polynomial matrix. + scale : (N,) float ndarray + Domain scaling used to create the polynomial matrix. + + """ + p = d.shape[0] + s = d.shape[1] + r = powers.shape[0] + kernel_func = NAME_TO_FUNC[kernel] + + # Shift and scale the polynomial domain to be between -1 and 1 + mins = cp.min(y, axis=0) + maxs = cp.max(y, axis=0) + shift = (maxs + mins)/2 + scale = (maxs - mins)/2 + # The scale may be zero if there is a single point or all the points have + # the same value for some dimension. Avoid division by zero by replacing + # zeros with ones. + scale[scale == 0.0] = 1.0 + + yeps = y * epsilon + yhat = (y - shift)/scale + + # Transpose to make the array fortran contiguous. This is required for + # dgesv to not make a copy of lhs. + lhs = cp.empty((p + r, p + r), dtype=float).T + kernel_matrix(yeps, kernel_func, lhs[:p, :p]) + polynomial_matrix(yhat, powers, lhs[:p, p:]) + lhs[p:, :p] = lhs[:p, p:].T + lhs[p:, p:] = 0.0 + for i in range(p): + lhs[i, i] += smoothing[i] + + # Transpose to make the array fortran contiguous. + rhs = cp.empty((s, p + r), dtype=float).T + rhs[:p] = d + rhs[p:] = 0.0 + + return lhs, rhs, shift, scale + + +def _build_evaluation_coefficients(x, y, kernel, epsilon, powers, + shift, scale): + """Construct the coefficients needed to evaluate + the RBF. + + Parameters + ---------- + x : (Q, N) float ndarray + Evaluation point coordinates. + y : (P, N) float ndarray + Data point coordinates. + kernel : str + Name of the RBF. + epsilon : float + Shape parameter. + powers : (R, N) int ndarray + The exponents for each monomial in the polynomial. + shift : (N,) float ndarray + Shifts the polynomial domain for numerical stability. + scale : (N,) float ndarray + Scales the polynomial domain for numerical stability. + + Returns + ------- + (Q, P + R) float ndarray + + """ + q = x.shape[0] + p = y.shape[0] + r = powers.shape[0] + kernel_func = NAME_TO_FUNC[kernel] + + yeps = y*epsilon + xeps = x*epsilon + xhat = (x - shift)/scale + + vec = cp.empty((q, p + r), dtype=float) + + # Evaluate RBFs, with centers at `y`, at the point `x`. + delta = xeps[:, None, :] - yeps[None, :, :] + vec[:, :p] = kernel_func(cp.linalg.norm(delta, axis=-1)) + + # Evaluate monomials, with exponents from `powers`, at the point `x`. + pwr = xhat[:, None, :]**powers[None, :, :] + vec[:, p:] = cp.prod(pwr, axis=-1) +# for i in range(q): +# polynomial_vector(xhat[i], powers, vec[i, p:]) + + return vec + + +############################################################################### + +# These RBFs are implemented. +_AVAILABLE = { + "linear", + "thin_plate_spline", + "cubic", + "quintic", + "multiquadric", + "inverse_multiquadric", + "inverse_quadratic", + "gaussian" +} + + +# The shape parameter does not need to be specified when using these RBFs. +_SCALE_INVARIANT = {"linear", "thin_plate_spline", "cubic", "quintic"} + + +# For RBFs that are conditionally positive definite of order m, the interpolant +# should include polynomial terms with degree >= m - 1. Define the minimum +# degrees here. These values are from Chapter 8 of Fasshauer's "Meshfree +# Approximation Methods with MATLAB". The RBFs that are not in this dictionary +# are positive definite and do not need polynomial terms. +_NAME_TO_MIN_DEGREE = { + "multiquadric": 0, + "linear": 0, + "thin_plate_spline": 1, + "cubic": 1, + "quintic": 2 +} + + +try: + _comb = math.comb +except AttributeError: + # Naive combination for Python 3.7 + def _comb(n, k): + return math.factorial(n) // (math.factorial(n - k) * math.factorial(k)) + + +def _monomial_powers(ndim, degree): + """Return the powers for each monomial in a polynomial. + + Parameters + ---------- + ndim : int + Number of variables in the polynomial. + degree : int + Degree of the polynomial. + + Returns + ------- + (nmonos, ndim) int ndarray + Array where each row contains the powers for each variable in a + monomial. + + """ + nmonos = _comb(degree + ndim, ndim) + out = cp.zeros((nmonos, ndim), dtype=int) + count = 0 + for deg in range(degree + 1): + for mono in combinations_with_replacement(range(ndim), deg): + # `mono` is a tuple of variables in the current monomial with + # multiplicity indicating power (e.g., (0, 1, 1) represents x*y**2) + for var in mono: + out[count, var] += 1 + + count += 1 + + return out + + +def _build_and_solve_system(y, d, smoothing, kernel, epsilon, powers): + """Build and solve the RBF interpolation system of equations. + + Parameters + ---------- + y : (P, N) float ndarray + Data point coordinates. + d : (P, S) float ndarray + Data values at `y`. + smoothing : (P,) float ndarray + Smoothing parameter for each data point. + kernel : str + Name of the RBF. + epsilon : float + Shape parameter. + powers : (R, N) int ndarray + The exponents for each monomial in the polynomial. + + Returns + ------- + coeffs : (P + R, S) float ndarray + Coefficients for each RBF and monomial. + shift : (N,) float ndarray + Domain shift used to create the polynomial matrix. + scale : (N,) float ndarray + Domain scaling used to create the polynomial matrix. + + """ + lhs, rhs, shift, scale = _build_system( + y, d, smoothing, kernel, epsilon, powers + ) + coeffs = cp.linalg.solve(lhs, rhs) + return shift, scale, coeffs + + +class RBFInterpolator: + """Radial basis function (RBF) interpolation in N dimensions. + + Parameters + ---------- + y : (P, N) array_like + Data point coordinates. + d : (P, ...) array_like + Data values at `y`. + neighbors : int, optional + If specified, the value of the interpolant at each evaluation point + will be computed using only this many nearest data points. All the data + points are used by default. + smoothing : float or (P,) array_like, optional + Smoothing parameter. The interpolant perfectly fits the data when this + is set to 0. For large values, the interpolant approaches a least + squares fit of a polynomial with the specified degree. Default is 0. + kernel : str, optional + Type of RBF. This should be one of + + - 'linear' : ``-r`` + - 'thin_plate_spline' : ``r**2 * log(r)`` + - 'cubic' : ``r**3`` + - 'quintic' : ``-r**5`` + - 'multiquadric' : ``-sqrt(1 + r**2)`` + - 'inverse_multiquadric' : ``1/sqrt(1 + r**2)`` + - 'inverse_quadratic' : ``1/(1 + r**2)`` + - 'gaussian' : ``exp(-r**2)`` + + Default is 'thin_plate_spline'. + epsilon : float, optional + Shape parameter that scales the input to the RBF. If `kernel` is + 'linear', 'thin_plate_spline', 'cubic', or 'quintic', this defaults to + 1 and can be ignored because it has the same effect as scaling the + smoothing parameter. Otherwise, this must be specified. + degree : int, optional + Degree of the added polynomial. For some RBFs the interpolant may not + be well-posed if the polynomial degree is too small. Those RBFs and + their corresponding minimum degrees are + + - 'multiquadric' : 0 + - 'linear' : 0 + - 'thin_plate_spline' : 1 + - 'cubic' : 1 + - 'quintic' : 2 + + The default value is the minimum degree for `kernel` or 0 if there is + no minimum degree. Set this to -1 for no added polynomial. + + Notes + ----- + An RBF is a scalar valued function in N-dimensional space whose value at + :math:`x` can be expressed in terms of :math:`r=||x - c||`, where :math:`c` + is the center of the RBF. + + An RBF interpolant for the vector of data values :math:`d`, which are from + locations :math:`y`, is a linear combination of RBFs centered at :math:`y` + plus a polynomial with a specified degree. The RBF interpolant is written + as + + .. math:: + f(x) = K(x, y) a + P(x) b, + + where :math:`K(x, y)` is a matrix of RBFs with centers at :math:`y` + evaluated at the points :math:`x`, and :math:`P(x)` is a matrix of + monomials, which span polynomials with the specified degree, evaluated at + :math:`x`. The coefficients :math:`a` and :math:`b` are the solution to the + linear equations + + .. math:: + (K(y, y) + \\lambda I) a + P(y) b = d + + and + + .. math:: + P(y)^T a = 0, + + where :math:`\\lambda` is a non-negative smoothing parameter that controls + how well we want to fit the data. The data are fit exactly when the + smoothing parameter is 0. + + The above system is uniquely solvable if the following requirements are + met: + + - :math:`P(y)` must have full column rank. :math:`P(y)` always has full + column rank when `degree` is -1 or 0. When `degree` is 1, + :math:`P(y)` has full column rank if the data point locations are not + all collinear (N=2), coplanar (N=3), etc. + - If `kernel` is 'multiquadric', 'linear', 'thin_plate_spline', + 'cubic', or 'quintic', then `degree` must not be lower than the + minimum value listed above. + - If `smoothing` is 0, then each data point location must be distinct. + + When using an RBF that is not scale invariant ('multiquadric', + 'inverse_multiquadric', 'inverse_quadratic', or 'gaussian'), an appropriate + shape parameter must be chosen (e.g., through cross validation). Smaller + values for the shape parameter correspond to wider RBFs. The problem can + become ill-conditioned or singular when the shape parameter is too small. + + The memory required to solve for the RBF interpolation coefficients + increases quadratically with the number of data points, which can become + impractical when interpolating more than about a thousand data points. + To overcome memory limitations for large interpolation problems, the + `neighbors` argument can be specified to compute an RBF interpolant for + each evaluation point using only the nearest data points. + + See Also + -------- + scipy.interpolate.RBFInterpolator + + """ + + def __init__(self, y, d, + neighbors=None, + smoothing=0.0, + kernel="thin_plate_spline", + epsilon=None, + degree=None): + y = cp.asarray(y, dtype=float, order="C") + if y.ndim != 2: + raise ValueError("`y` must be a 2-dimensional array.") + + ny, ndim = y.shape + + d_dtype = complex if cp.iscomplexobj(d) else float + d = cp.asarray(d, dtype=d_dtype, order="C") + if d.shape[0] != ny: + raise ValueError( + f"Expected the first axis of `d` to have length {ny}." + ) + + d_shape = d.shape[1:] + d = d.reshape((ny, -1)) + # If `d` is complex, convert it to a float array with twice as many + # columns. Otherwise, the LHS matrix would need to be converted to + # complex and take up 2x more memory than necessary. + d = d.view(float) + + isscalar = cp.isscalar(smoothing) or smoothing.shape == () + if isscalar: + smoothing = cp.full(ny, smoothing, dtype=float) + else: + smoothing = cp.asarray(smoothing, dtype=float, order="C") + if smoothing.shape != (ny,): + raise ValueError( + "Expected `smoothing` to be a scalar or have shape " + f"({ny},)." + ) + + kernel = kernel.lower() + if kernel not in _AVAILABLE: + raise ValueError(f"`kernel` must be one of {_AVAILABLE}.") + + if epsilon is None: + if kernel in _SCALE_INVARIANT: + epsilon = 1.0 + else: + raise ValueError( + "`epsilon` must be specified if `kernel` is not one of " + f"{_SCALE_INVARIANT}." + ) + else: + epsilon = float(epsilon) + + min_degree = _NAME_TO_MIN_DEGREE.get(kernel, -1) + if degree is None: + degree = max(min_degree, 0) + else: + degree = int(degree) + if degree < -1: + raise ValueError("`degree` must be at least -1.") + elif -1 < degree < min_degree: + warnings.warn( + f"`degree` should not be below {min_degree} when `kernel` " + f"is '{kernel}'. The interpolant may not be uniquely " + "solvable, and the smoothing parameter may have an " + "unintuitive effect.", + UserWarning + ) + + if neighbors is None: + nobs = ny + else: + raise NotImplementedError("neighbors is not implemented yet") + # Make sure the number of nearest neighbors used for interpolation + # does not exceed the number of observations. + neighbors = int(min(neighbors, ny)) + nobs = neighbors + + powers = _monomial_powers(ndim, degree) + # The polynomial matrix must have full column rank in order for the + # interpolant to be well-posed, which is not possible if there are + # fewer observations than monomials. + if powers.shape[0] > nobs: + raise ValueError( + f"At least {powers.shape[0]} data points are required when " + f"`degree` is {degree} and the number of dimensions is {ndim}." + ) + + if neighbors is None: + shift, scale, coeffs = _build_and_solve_system( + y, d, smoothing, kernel, epsilon, powers + ) + + # Make these attributes private since they do not always exist. + self._shift = shift + self._scale = scale + self._coeffs = coeffs + + else: + raise NotImplementedError + # self._tree = KDTree(y) + + self.y = y + self.d = d + self.d_shape = d_shape + self.d_dtype = d_dtype + self.neighbors = neighbors + self.smoothing = smoothing + self.kernel = kernel + self.epsilon = epsilon + self.powers = powers + + def _chunk_evaluator(self, x, y, shift, scale, coeffs, + memory_budget=1000000): + """ + Evaluate the interpolation. + + Parameters + ---------- + x : (Q, N) float ndarray + array of points on which to evaluate + y: (P, N) float ndarray + array of points on which we know function values + shift: (N, ) ndarray + Domain shift used to create the polynomial matrix. + scale : (N,) float ndarray + Domain scaling used to create the polynomial matrix. + coeffs: (P+R, S) float ndarray + Coefficients in front of basis functions + + Returns + ------- + (Q, S) float ndarray + Interpolated array + """ + nx, ndim = x.shape + nnei = len(y) + + # in each chunk we consume the same space we already occupy + chunksize = memory_budget // ((self.powers.shape[0] + nnei)) + 1 + if chunksize <= nx: + out = cp.empty((nx, self.d.shape[1]), dtype=float) + for i in range(0, nx, chunksize): + vec = _build_evaluation_coefficients( + x[i:i + chunksize, :], + y, + self.kernel, + self.epsilon, + self.powers, + shift, + scale) + out[i:i + chunksize, :] = cp.dot(vec, coeffs) + else: + vec = _build_evaluation_coefficients( + x, + y, + self.kernel, + self.epsilon, + self.powers, + shift, + scale) + out = cp.dot(vec, coeffs) + + return out + + def __call__(self, x): + """Evaluate the interpolant at `x`. + + Parameters + ---------- + x : (Q, N) array_like + Evaluation point coordinates. + + Returns + ------- + (Q, ...) ndarray + Values of the interpolant at `x`. + + """ + x = cp.asarray(x, dtype=float, order="C") + if x.ndim != 2: + raise ValueError("`x` must be a 2-dimensional array.") + + nx, ndim = x.shape + if ndim != self.y.shape[1]: + raise ValueError("Expected the second axis of `x` to have length " + f"{self.y.shape[1]}.") + + # Our memory budget for storing RBF coefficients is + # based on how many floats in memory we already occupy + # If this number is below 1e6 we just use 1e6 + # This memory budget is used to decide how we chunk + # the inputs + memory_budget = max(x.size + self.y.size + self.d.size, 1000000) + + if self.neighbors is None: + out = self._chunk_evaluator( + x, + self.y, + self._shift, + self._scale, + self._coeffs, memory_budget=memory_budget) + else: + raise NotImplementedError # XXX: needs KDTree + + out = out.view(self.d_dtype) + out = out.reshape((nx, ) + self.d_shape) + return out diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_rgi.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_rgi.py new file mode 100644 index 0000000000000000000000000000000000000000..35d210e09749d9d98c5eb6609aa856b499bcd1c0 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/interpolate/_rgi.py @@ -0,0 +1,622 @@ +__all__ = ['RegularGridInterpolator', 'interpn'] + +import itertools +import cupy as cp +from cupyx.scipy.interpolate._bspline2 import make_interp_spline +from cupyx.scipy.interpolate._cubic import PchipInterpolator + + +def _ndim_coords_from_arrays(points, ndim=None): + """ + Convert a tuple of coordinate arrays to a (..., ndim)-shaped array. + """ + if isinstance(points, tuple) and len(points) == 1: + # handle argument tuple + points = points[0] + if isinstance(points, tuple): + p = cp.broadcast_arrays(*points) + n = len(p) + for j in range(1, n): + if p[j].shape != p[0].shape: + raise ValueError( + "coordinate arrays do not have the same shape") + points = cp.empty(p[0].shape + (len(points),), dtype=float) + for j, item in enumerate(p): + points[..., j] = item + else: + points = cp.asanyarray(points) + if points.ndim == 1: + if ndim is None: + points = points.reshape(-1, 1) + else: + points = points.reshape(-1, ndim) + return points + + +def _check_points(points): + descending_dimensions = [] + grid = [] + for i, p in enumerate(points): + # early make points float + # see https://github.com/scipy/scipy/pull/17230 + p = cp.asarray(p, dtype=float) + if not cp.all(p[1:] > p[:-1]): + if cp.all(p[1:] < p[:-1]): + # input is descending, so make it ascending + descending_dimensions.append(i) + p = cp.flip(p) + p = cp.ascontiguousarray(p) + else: + raise ValueError( + "The points in dimension %d must be strictly " + "ascending or descending" % i) + grid.append(p) + return tuple(grid), tuple(descending_dimensions) + + +def _check_dimensionality(points, values): + if len(points) > values.ndim: + raise ValueError("There are %d point arrays, but values has %d " + "dimensions" % (len(points), values.ndim)) + for i, p in enumerate(points): + if not cp.asarray(p).ndim == 1: + raise ValueError("The points in dimension %d must be " + "1-dimensional" % i) + if not values.shape[i] == len(p): + raise ValueError("There are %d points and %d values in " + "dimension %d" % (len(p), values.shape[i], i)) + + +class RegularGridInterpolator: + """ + Interpolation on a regular or rectilinear grid in arbitrary dimensions. + + The data must be defined on a rectilinear grid; that is, a rectangular + grid with even or uneven spacing. Linear and nearest-neighbor + interpolations are supported. After setting up the interpolator object, + the interpolation method may be chosen at each evaluation. + + Parameters + ---------- + points : tuple of ndarray of float, with shapes (m1, ), ..., (mn, ) + The points defining the regular grid in n dimensions. The points in + each dimension (i.e. every elements of the points tuple) must be + strictly ascending or descending. + + values : ndarray, shape (m1, ..., mn, ...) + The data on the regular grid in n dimensions. Complex data can be + acceptable. + + method : str, optional + The method of interpolation to perform. Supported are "linear", + "nearest", "slinear", "cubic", "quintic" and "pchip". + This parameter will become the default for the object's + ``__call__`` method. Default is "linear". + + bounds_error : bool, optional + If True, when interpolated values are requested outside of the + domain of the input data, a ValueError is raised. + If False, then `fill_value` is used. + Default is True. + + fill_value : float or None, optional + The value to use for points outside of the interpolation domain. + If None, values outside the domain are extrapolated. + Default is ``cp.nan``. + + Notes + ----- + Contrary to scipy's `LinearNDInterpolator` and `NearestNDInterpolator`, + this class avoids expensive triangulation of the input data by taking + advantage of the regular grid structure. + + In other words, this class assumes that the data is defined on a + *rectilinear* grid. + + If the input data is such that dimensions have incommensurate + units and differ by many orders of magnitude, the interpolant may have + numerical artifacts. Consider rescaling the data before interpolating. + + Examples + -------- + **Evaluate a function on the points of a 3-D grid** + + As a first example, we evaluate a simple example function on the points of + a 3-D grid: + + >>> from cupyx.scipy.interpolate import RegularGridInterpolator + >>> import cupy as cp + >>> def f(x, y, z): + ... return 2 * x**3 + 3 * y**2 - z + >>> x = cp.linspace(1, 4, 11) + >>> y = cp.linspace(4, 7, 22) + >>> z = cp.linspace(7, 9, 33) + >>> xg, yg ,zg = cp.meshgrid(x, y, z, indexing='ij', sparse=True) + >>> data = f(xg, yg, zg) + + ``data`` is now a 3-D array with ``data[i, j, k] = f(x[i], y[j], z[k])``. + Next, define an interpolating function from this data: + + >>> interp = RegularGridInterpolator((x, y, z), data) + + Evaluate the interpolating function at the two points + ``(x,y,z) = (2.1, 6.2, 8.3)`` and ``(3.3, 5.2, 7.1)``: + + >>> pts = cp.array([[2.1, 6.2, 8.3], + ... [3.3, 5.2, 7.1]]) + >>> interp(pts) + array([ 125.80469388, 146.30069388]) + + which is indeed a close approximation to + + >>> f(2.1, 6.2, 8.3), f(3.3, 5.2, 7.1) + (125.54200000000002, 145.894) + + **Interpolate and extrapolate a 2D dataset** + + As a second example, we interpolate and extrapolate a 2D data set: + + >>> x, y = cp.array([-2, 0, 4]), cp.array([-2, 0, 2, 5]) + >>> def ff(x, y): + ... return x**2 + y**2 + + >>> xg, yg = cp.meshgrid(x, y, indexing='ij') + >>> data = ff(xg, yg) + >>> interp = RegularGridInterpolator((x, y), data, + ... bounds_error=False, fill_value=None) + + >>> import matplotlib.pyplot as plt + >>> fig = plt.figure() + >>> ax = fig.add_subplot(projection='3d') + >>> ax.scatter(xg.ravel().get(), yg.ravel().get(), data.ravel().get(), + ... s=60, c='k', label='data') + + Evaluate and plot the interpolator on a finer grid + + >>> xx = cp.linspace(-4, 9, 31) + >>> yy = cp.linspace(-4, 9, 31) + >>> X, Y = cp.meshgrid(xx, yy, indexing='ij') + + >>> # interpolator + >>> ax.plot_wireframe(X.get(), Y.get(), interp((X, Y)).get(), + rstride=3, cstride=3, alpha=0.4, color='m', + label='linear interp') + + >>> # ground truth + >>> ax.plot_wireframe(X.get(), Y.get(), ff(X, Y).get(), + rstride=3, cstride=3, + ... alpha=0.4, label='ground truth') + >>> plt.legend() + >>> plt.show() + + See Also + -------- + interpn : a convenience function which wraps `RegularGridInterpolator` + + scipy.ndimage.map_coordinates : interpolation on grids with equal spacing + (suitable for e.g., N-D image resampling) + + References + ---------- + [1] Python package *regulargrid* by Johannes Buchner, see + https://pypi.python.org/pypi/regulargrid/ + [2] Wikipedia, "Trilinear interpolation", + https://en.wikipedia.org/wiki/Trilinear_interpolation + [3] Weiser, Alan, and Sergio E. Zarantonello. "A note on piecewise + linear and multilinear table interpolation in many dimensions." + MATH. COMPUT. 50.181 (1988): 189-196. + https://www.ams.org/journals/mcom/1988-50-181/S0025-5718-1988-0917826-0/S0025-5718-1988-0917826-0.pdf + """ + # this class is based on code originally programmed by Johannes Buchner, + # see https://github.com/JohannesBuchner/regulargrid + + _SPLINE_DEGREE_MAP = {"slinear": 1, "cubic": 3, "quintic": 5, 'pchip': 3} + _SPLINE_METHODS = list(_SPLINE_DEGREE_MAP.keys()) + _ALL_METHODS = ["linear", "nearest"] + _SPLINE_METHODS + + def __init__(self, points, values, method="linear", bounds_error=True, + fill_value=cp.nan): + if method not in self._ALL_METHODS: + raise ValueError("Method '%s' is not defined" % method) + elif method in self._SPLINE_METHODS: + self._validate_grid_dimensions(points, method) + + self.method = method + self.bounds_error = bounds_error + self.grid, self._descending_dimensions = _check_points(points) + self.values = self._check_values(values) + self._check_dimensionality(self.grid, self.values) + self.fill_value = self._check_fill_value(self.values, fill_value) + if self._descending_dimensions: + self.values = cp.flip(values, axis=self._descending_dimensions) + + def _check_dimensionality(self, grid, values): + _check_dimensionality(grid, values) + + def _validate_grid_dimensions(self, points, method): + k = self._SPLINE_DEGREE_MAP[method] + for i, point in enumerate(points): + ndim = len(cp.atleast_1d(point)) + if ndim <= k: + raise ValueError(f"There are {ndim} points in dimension {i}," + f" but method {method} requires at least " + f" {k+1} points per dimension.") + + def _check_points(self, points): + return _check_points(points) + + def _check_values(self, values): + if not cp.issubdtype(values.dtype, cp.inexact): + values = values.astype(float) + + return values + + def _check_fill_value(self, values, fill_value): + if fill_value is not None: + fill_value_dtype = cp.asarray(fill_value).dtype + if (hasattr(values, 'dtype') and + not cp.can_cast(fill_value_dtype, values.dtype, + casting='same_kind')): + raise ValueError("fill_value must be either 'None' or " + "of a type compatible with values") + return fill_value + + def __call__(self, xi, method=None): + """ + Interpolation at coordinates. + + Parameters + ---------- + xi : cupy.ndarray of shape (..., ndim) + The coordinates to evaluate the interpolator at. + + method : str, optional + The method of interpolation to perform. Supported are "linear" and + "nearest". Default is the method chosen when the interpolator was + created. + + Returns + ------- + values_x : cupy.ndarray, shape xi.shape[:-1] + values.shape[ndim:] + Interpolated values at `xi`. See notes for behaviour when + ``xi.ndim == 1``. + + Notes + ----- + In the case that ``xi.ndim == 1`` a new axis is inserted into + the 0 position of the returned array, values_x, so its shape is + instead ``(1,) + values.shape[ndim:]``. + + Examples + -------- + Here we define a nearest-neighbor interpolator of a simple function + + >>> import cupy as cp + >>> x, y = cp.array([0, 1, 2]), cp.array([1, 3, 7]) + >>> def f(x, y): + ... return x**2 + y**2 + >>> data = f(*cp.meshgrid(x, y, indexing='ij', sparse=True)) + >>> from cupyx.scipy.interpolate import RegularGridInterpolator + >>> interp = RegularGridInterpolator((x, y), data, method='nearest') + + By construction, the interpolator uses the nearest-neighbor + interpolation + + >>> interp([[1.5, 1.3], [0.3, 4.5]]) + array([2., 9.]) + + We can however evaluate the linear interpolant by overriding the + `method` parameter + + >>> interp([[1.5, 1.3], [0.3, 4.5]], method='linear') + array([ 4.7, 24.3]) + """ + is_method_changed = self.method != method + method = self.method if method is None else method + if method not in self._ALL_METHODS: + raise ValueError("Method '%s' is not defined" % method) + + xi, xi_shape, ndim, nans, out_of_bounds = self._prepare_xi(xi) + + if method == "linear": + indices, norm_distances = self._find_indices(xi.T) + result = self._evaluate_linear(indices, norm_distances) + elif method == "nearest": + indices, norm_distances = self._find_indices(xi.T) + result = self._evaluate_nearest(indices, norm_distances) + elif method in self._SPLINE_METHODS: + if is_method_changed: + self._validate_grid_dimensions(self.grid, method) + result = self._evaluate_spline(xi, method) + + if not self.bounds_error and self.fill_value is not None: + result[out_of_bounds] = self.fill_value + + if nans.ndim < result.ndim: + nans = nans[..., None] + result = cp.where(nans, cp.nan, result) + return result.reshape(xi_shape[:-1] + self.values.shape[ndim:]) + + def _prepare_xi(self, xi): + ndim = len(self.grid) + xi = _ndim_coords_from_arrays(xi, ndim=ndim) + if xi.shape[-1] != len(self.grid): + raise ValueError("The requested sample points xi have dimension " + f"{xi.shape[-1]} but this " + f"RegularGridInterpolator has dimension {ndim}") + + xi_shape = xi.shape + xi = xi.reshape(-1, xi_shape[-1]) + xi = cp.asarray(xi, dtype=float) + + # find nans in input + is_nans = cp.isnan(xi).T + nans = is_nans[0].copy() + for is_nan in is_nans[1:]: + cp.logical_or(nans, is_nan, nans) + + if self.bounds_error: + for i, p in enumerate(xi.T): + if not cp.logical_and(cp.all(self.grid[i][0] <= p), + cp.all(p <= self.grid[i][-1])): + raise ValueError("One of the requested xi is out of bounds" + " in dimension %d" % i) + out_of_bounds = None + else: + out_of_bounds = self._find_out_of_bounds(xi.T) + + return xi, xi_shape, ndim, nans, out_of_bounds + + def _evaluate_linear(self, indices, norm_distances): + # slice for broadcasting over trailing dimensions in self.values + vslice = (slice(None),) + (None,)*(self.values.ndim - len(indices)) + + # Compute shifting up front before zipping everything together + shift_norm_distances = [1 - yi for yi in norm_distances] + shift_indices = [i + 1 for i in indices] + + # The formula for linear interpolation in 2d takes the form: + # values = self.values[(i0, i1)] * (1 - y0) * (1 - y1) + \ + # self.values[(i0, i1 + 1)] * (1 - y0) * y1 + \ + # self.values[(i0 + 1, i1)] * y0 * (1 - y1) + \ + # self.values[(i0 + 1, i1 + 1)] * y0 * y1 + # We pair i with 1 - yi (zipped1) and i + 1 with yi (zipped2) + zipped1 = zip(indices, shift_norm_distances) + zipped2 = zip(shift_indices, norm_distances) + + # Take all products of zipped1 and zipped2 and iterate over them + # to get the terms in the above formula. This corresponds to iterating + # over the vertices of a hypercube. + hypercube = itertools.product(*zip(zipped1, zipped2)) + value = cp.array([0.]) + for h in hypercube: + edge_indices, weights = zip(*h) + term = cp.asarray(self.values[edge_indices]) + for w in weights: + term *= w[vslice] + value = value + term # cannot use += because broadcasting + return value + + def _evaluate_nearest(self, indices, norm_distances): + idx_res = [cp.where(yi <= .5, i, i + 1) + for i, yi in zip(indices, norm_distances)] + return self.values[tuple(idx_res)] + + def _evaluate_spline(self, xi, method): + # ensure xi is 2D list of points to evaluate (`m` is the number of + # points and `n` is the number of interpolation dimensions, + # ``n == len(self.grid)``.) + if xi.ndim == 1: + xi = xi.reshape((1, xi.size)) + m, n = xi.shape + + # Reorder the axes: n-dimensional process iterates over the + # interpolation axes from the last axis downwards: E.g. for a 4D grid + # the order of axes is 3, 2, 1, 0. Each 1D interpolation works along + # the 0th axis of its argument array (for 1D routine it's its ``y`` + # array). Thus permute the interpolation axes of `values` *and keep + # trailing dimensions trailing*. + axes = tuple(range(self.values.ndim)) + axx = axes[:n][::-1] + axes[n:] + values = self.values.transpose(axx) + + if method == 'pchip': + _eval_func = self._do_pchip + else: + _eval_func = self._do_spline_fit + k = self._SPLINE_DEGREE_MAP[method] + + # Non-stationary procedure: difficult to vectorize this part entirely + # into numpy-level operations. Unfortunately this requires explicit + # looping over each point in xi. + + # can at least vectorize the first pass across all points in the + # last variable of xi. + last_dim = n - 1 + first_values = _eval_func(self.grid[last_dim], + values, + xi[:, last_dim], + k) + + # the rest of the dimensions have to be on a per point-in-xi basis + shape = (m, *self.values.shape[n:]) + result = cp.empty(shape, dtype=self.values.dtype) + for j in range(m): + # Main process: Apply 1D interpolate in each dimension + # sequentially, starting with the last dimension. + # These are then "folded" into the next dimension in-place. + folded_values = first_values[j, ...] + for i in range(last_dim-1, -1, -1): + # Interpolate for each 1D from the last dimensions. + # This collapses each 1D sequence into a scalar. + folded_values = _eval_func(self.grid[i], + folded_values, + xi[j, i], + k) + result[j, ...] = folded_values + + return result + + @staticmethod + def _do_spline_fit(x, y, pt, k): + local_interp = make_interp_spline(x, y, k=k, axis=0) + values = local_interp(pt) + return values + + @staticmethod + def _do_pchip(x, y, pt, k): + local_interp = PchipInterpolator(x, y, axis=0) + values = local_interp(pt) + return values + + def _find_indices(self, xi): + # find relevant edges between which xi are situated + indices = [] + # compute distance to lower edge in unity units + norm_distances = [] + # iterate through dimensions + for x, grid in zip(xi, self.grid): + i = cp.searchsorted(grid, x) - 1 + cp.clip(i, 0, grid.size - 2, i) + indices.append(i) + + # compute norm_distances, incl length-1 grids, + # where `grid[i+1] == grid[i]` + denom = grid[i + 1] - grid[i] + norm_dist = cp.where(denom != 0, (x - grid[i]) / denom, 0) + norm_distances.append(norm_dist) + + return indices, norm_distances + + def _find_out_of_bounds(self, xi): + # check for out of bounds xi + out_of_bounds = cp.zeros((xi.shape[1]), dtype=bool) + # iterate through dimensions + for x, grid in zip(xi, self.grid): + out_of_bounds += x < grid[0] + out_of_bounds += x > grid[-1] + return out_of_bounds + + +def interpn(points, values, xi, method="linear", bounds_error=True, + fill_value=cp.nan): + """ + Multidimensional interpolation on regular or rectilinear grids. + + Strictly speaking, not all regular grids are supported - this function + works on *rectilinear* grids, that is, a rectangular grid with even or + uneven spacing. + + Parameters + ---------- + points : tuple of cupy.ndarray of float, with shapes (m1, ), ..., (mn, ) + The points defining the regular grid in n dimensions. The points in + each dimension (i.e. every elements of the points tuple) must be + strictly ascending or descending. + + values : cupy.ndarray of shape (m1, ..., mn, ...) + The data on the regular grid in n dimensions. Complex data can be + acceptable. + + xi : cupy.ndarray of shape (..., ndim) + The coordinates to sample the gridded data at + + method : str, optional + The method of interpolation to perform. Supported are "linear", + "nearest", "slinear", "cubic", "quintic" and "pchip". + + bounds_error : bool, optional + If True, when interpolated values are requested outside of the + domain of the input data, a ValueError is raised. + If False, then `fill_value` is used. + + fill_value : number, optional + If provided, the value to use for points outside of the + interpolation domain. If None, values outside + the domain are extrapolated. + + Returns + ------- + values_x : ndarray, shape xi.shape[:-1] + values.shape[ndim:] + Interpolated values at `xi`. See notes for behaviour when + ``xi.ndim == 1``. + + Notes + ----- + + In the case that ``xi.ndim == 1`` a new axis is inserted into + the 0 position of the returned array, values_x, so its shape is + instead ``(1,) + values.shape[ndim:]``. + + If the input data is such that input dimensions have incommensurate + units and differ by many orders of magnitude, the interpolant may have + numerical artifacts. Consider rescaling the data before interpolation. + + Examples + -------- + Evaluate a simple example function on the points of a regular 3-D grid: + + >>> import cupy as cp + >>> from cupyx.scipy.interpolate import interpn + >>> def value_func_3d(x, y, z): + ... return 2 * x + 3 * y - z + >>> x = cp.linspace(0, 4, 5) + >>> y = cp.linspace(0, 5, 6) + >>> z = cp.linspace(0, 6, 7) + >>> points = (x, y, z) + >>> values = value_func_3d(*cp.meshgrid(*points, indexing='ij')) + + Evaluate the interpolating function at a point + + >>> point = cp.array([2.21, 3.12, 1.15]) + >>> print(interpn(points, values, point)) + [12.63] + + See Also + -------- + RegularGridInterpolator : interpolation on a regular or rectilinear grid + in arbitrary dimensions (`interpn` wraps this + class). + + cupyx.scipy.ndimage.map_coordinates : interpolation on grids with equal + spacing (suitable for e.g., N-D image + resampling) + """ + # sanity check 'method' kwarg + if method not in ["linear", "nearest", "slinear", "cubic", + "quintic", "pchip"]: + raise ValueError( + "interpn only understands the methods 'linear', 'nearest', " + "'slinear', 'cubic', 'quintic' and 'pchip'. " + "You provided {method}.") + + ndim = values.ndim + + # sanity check consistency of input dimensions + if len(points) > ndim: + raise ValueError("There are %d point arrays, but values has %d " + "dimensions" % (len(points), ndim)) + + grid, descending_dimensions = _check_points(points) + _check_dimensionality(grid, values) + + # sanity check requested xi + xi = _ndim_coords_from_arrays(xi, ndim=len(grid)) + if xi.shape[-1] != len(grid): + raise ValueError("The requested sample points xi have dimension " + "%d, but this RegularGridInterpolator has " + "dimension %d" % (xi.shape[-1], len(grid))) + + if bounds_error: + for i, p in enumerate(xi.T): + if not cp.logical_and(cp.all(grid[i][0] <= p), + cp.all(p <= grid[i][-1])): + raise ValueError("One of the requested xi is out of bounds " + "in dimension %d" % i) + + # perform interpolation + if method in ["linear", "nearest", "slinear", "cubic", "quintic", "pchip"]: + interp = RegularGridInterpolator(points, values, method=method, + bounds_error=bounds_error, + fill_value=fill_value) + return interp(xi) diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..17d7a1087ae989ff65bed08e2f82a988b00b1185 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_arraytools.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_arraytools.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1383933c7a8839eef2f5d55e868be321d85e4711 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_arraytools.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_bsplines.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_bsplines.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d17a461231e0f4a1f4e0a4a526ab95a9fc71736e Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_bsplines.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..04b4479f633bce3210125683acc989eb77a1b53b Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_fir_filter_design.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_optimize.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_optimize.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9146350f96f27b6849849c7fcede858109192cb2 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_optimize.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_peak_finding.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_peak_finding.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..923121dcd858a7736d0ed1d46ac66d99cee5ac69 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_peak_finding.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_polyutils.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_polyutils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..45f4b9f454295091e9c146f49e3ddb1643ea5adb Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_polyutils.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0c22b749f1de6f7cd346ce78af211f68d3f90d59 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_savitzky_golay.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_spectral.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_spectral.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..792bf9cd9745a5211a97d764c2387abfe5e3e4d9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_spectral.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_waveforms.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_waveforms.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65e5da69ce3c2c1300c1860f8a76ce4b666b84fe Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_waveforms.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_wavelets.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_wavelets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d4ec4288691313c730ecfc603f5cf5beacce00b9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/signal/__pycache__/_wavelets.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__init__.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5e6583966bfe5407722ab3dc28fadaf1adac68f3 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__init__.py @@ -0,0 +1 @@ +from cupyx.scipy.spatial.distance import distance_matrix # NOQA diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6824424912041c878f2e4bb81547f1af2cf77b2f Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__pycache__/distance.cpython-310.pyc b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__pycache__/distance.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cbbfd8bf7b462fc3fc6c2f5ee887ec4d47189abf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/__pycache__/distance.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/distance.py b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/distance.py new file mode 100644 index 0000000000000000000000000000000000000000..9e50c48224076778d42c537d19c9e2863f28af84 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/cupyx/scipy/spatial/distance.py @@ -0,0 +1,694 @@ +import cupy + +cuvs_available = False +pylibraft_available = False +try: + from cuvs.distance import pairwise_distance + cuvs_available = True +except ImportError: + try: + # cuVS distance primitives were previously in pylibraft + from pylibraft.distance import pairwise_distance + pylibraft_available = True + except ImportError: + cuvs_available = False + pylibraft_available = False + + +def _convert_to_type(X, out_type): + return cupy.ascontiguousarray(X, dtype=out_type) + + +def _validate_pdist_input(X, m, n, metric_info, **kwargs): + # get supported types + types = metric_info.types + # choose best type + typ = types[types.index(X.dtype)] if X.dtype in types else types[0] + # validate data + X = _convert_to_type(X, out_type=typ) + + # validate kwargs + _validate_kwargs = metric_info.validator + if _validate_kwargs: + kwargs = _validate_kwargs(X, m, n, **kwargs) + return X, typ, kwargs + + +class MetricInfo: + + def __init__(self, canonical_name=None, aka=None, + validator=None, types=None): + self.canonical_name_ = canonical_name + self.aka_ = aka + self.validator_ = validator + self.types_ = types + + +_METRIC_INFOS = [ + MetricInfo( + canonical_name="canberra", + aka={'canberra'} + ), + MetricInfo( + canonical_name="chebyshev", + aka={"chebychev", "chebyshev", "cheby", "cheb", "ch"} + ), + MetricInfo( + canonical_name="cityblock", + aka={"cityblock", "cblock", "cb", "c"} + ), + MetricInfo( + canonical_name="correlation", + aka={"correlation", "co"} + ), + MetricInfo( + canonical_name="cosine", + aka={"cosine", "cos"} + ), + MetricInfo( + canonical_name="hamming", + aka={"matching", "hamming", "hamm", "ha", "h"}, + types=["double", "bool"] + ), + MetricInfo( + canonical_name="euclidean", + aka={"euclidean", "euclid", "eu", "e"}, + ), + MetricInfo( + canonical_name="jensenshannon", + aka={"jensenshannon", "js"} + ), + MetricInfo( + canonical_name="minkowski", + aka={"minkowski", "mi", "m", "pnorm"} + ), + MetricInfo( + canonical_name="russellrao", + aka={"russellrao"}, + types=["bool"] + ), + MetricInfo( + canonical_name="sqeuclidean", + aka={"sqeuclidean", "sqe", "sqeuclid"} + ), + MetricInfo( + canonical_name="hellinger", + aka={"hellinger"} + ), + MetricInfo( + canonical_name="kl_divergence", + aka={"kl_divergence", "kl_div", "kld"} + ) + + +] + +_METRICS = {info.canonical_name_: info for info in _METRIC_INFOS} +_METRIC_ALIAS = dict((alias, info) + for info in _METRIC_INFOS + for alias in info.aka_) + +_METRICS_NAMES = list(_METRICS.keys()) + + +def check_soft_dependencies(): + if not cuvs_available: + if not pylibraft_available: + raise RuntimeError('cuVS >= 24.12 or pylibraft < ' + '24.12 should be installed to use this feature') + + +def minkowski(u, v, p): + """Compute the Minkowski distance between two 1-D arrays. + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + p (float): The order of the norm of the difference + :math:`{\\|u-v\\|}_p`. Note that for :math:`0 < p < 1`, + the triangle inequality only holds with an additional + multiplicative factor, i.e. it is only a quasi-metric. + + Returns: + minkowski (double): The Minkowski distance between vectors `u` and `v`. + """ + + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "minkowski", p) + + return output_arr[0, 0] + + +def canberra(u, v): + """Compute the Canberra distance between two 1-D arrays. + + The Canberra distance is defined as + + .. math:: + d(u, v) = \\sum_{i} \\frac{| u_i - v_i |}{|u_i| + |v_i|} + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + canberra (double): The Canberra distance between vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "canberra") + + return output_arr[0, 0] + + +def chebyshev(u, v): + """Compute the Chebyshev distance between two 1-D arrays. + + The Chebyshev distance is defined as + + .. math:: + d(u, v) = \\max_{i} |u_i - v_i| + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + chebyshev (double): The Chebyshev distance between vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "chebyshev") + + return output_arr[0, 0] + + +def cityblock(u, v): + """Compute the City Block (Manhattan) distance between two 1-D arrays. + + The City Block distance is defined as + + .. math:: + d(u, v) = \\sum_{i} |u_i - v_i| + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + cityblock (double): The City Block distance between + vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "cityblock") + + return output_arr[0, 0] + + +def correlation(u, v): + """Compute the correlation distance between two 1-D arrays. + + The correlation distance is defined as + + .. math:: + d(u, v) = 1 - \\frac{(u - \\bar{u}) \\cdot (v - \\bar{v})}{ + \\|(u - \\bar{u})\\|_2 \\|(v - \\bar{v})\\|_2} + + where :math:`\\bar{u}` is the mean of the elements of :math:`u` and + :math:`x \\cdot y` is the dot product. + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + correlation (double): The correlation distance between + vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "correlation") + + return output_arr[0, 0] + + +def cosine(u, v): + """Compute the Cosine distance between two 1-D arrays. + + The Cosine distance is defined as + + .. math:: + d(u, v) = 1 - \\frac{u \\cdot v}{\\|u\\|_2 \\|v\\|_2} + + where :math:`x \\cdot y` is the dot product. + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + cosine (double): The Cosine distance between vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "cosine") + + return output_arr[0, 0] + + +def hamming(u, v): + """Compute the Hamming distance between two 1-D arrays. + + The Hamming distance is defined as the proportion of elements + in both `u` and `v` that are not in the exact same position: + + .. math:: + d(u, v) = \\frac{1}{n} \\sum_{k=0}^n u_i \\neq v_i + + where :math:`x \\neq y` is one if :math:`x` is different from :math:`y` + and zero otherwise. + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + hamming (double): The Hamming distance between vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "hamming") + + return output_arr[0, 0] + + +def euclidean(u, v): + """Compute the Euclidean distance between two 1-D arrays. + + The Euclidean distance is defined as + + .. math:: + d(u, v) = \\left(\\sum_{i} (u_i - v_i)^2\\right)^{\\sfrac{1}{2}} + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + euclidean (double): The Euclidean distance between vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "euclidean") + + return output_arr[0, 0] + + +def jensenshannon(u, v): + """Compute the Jensen-Shannon distance between two 1-D arrays. + + The Jensen-Shannon distance is defined as + + .. math:: + d(u, v) = \\sqrt{\\frac{KL(u \\| m) + KL(v \\| m)}{2}} + + where :math:`KL` is the Kullback-Leibler divergence and :math:`m` is the + pointwise mean of `u` and `v`. + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + jensenshannon (double): The Jensen-Shannon distance between + vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "jensenshannon") + + return output_arr[0, 0] + + +def russellrao(u, v): + """Compute the Russell-Rao distance between two 1-D arrays. + + The Russell-Rao distance is defined as the proportion of elements + in both `u` and `v` that are in the exact same position: + + .. math:: + d(u, v) = \\frac{1}{n} \\sum_{k=0}^n u_i = v_i + + where :math:`x = y` is one if :math:`x` is different from :math:`y` + and zero otherwise. + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + hamming (double): The Hamming distance between vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "russellrao") + + return output_arr[0, 0] + + +def sqeuclidean(u, v): + """Compute the squared Euclidean distance between two 1-D arrays. + + The squared Euclidean distance is defined as + + .. math:: + d(u, v) = \\sum_{i} (u_i - v_i)^2 + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + sqeuclidean (double): The squared Euclidean distance between + vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "sqeuclidean") + + return output_arr[0, 0] + + +def hellinger(u, v): + """Compute the Hellinger distance between two 1-D arrays. + + The Hellinger distance is defined as + + .. math:: + d(u, v) = \\frac{1}{\\sqrt{2}} \\sqrt{ + \\sum_{i} (\\sqrt{u_i} - \\sqrt{v_i})^2} + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + hellinger (double): The Hellinger distance between + vectors `u` and `v`. + """ + check_soft_dependencies() + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + + pairwise_distance(u, v, output_arr, "hellinger") + + return output_arr[0, 0] + + +def kl_divergence(u, v): + """Compute the Kullback-Leibler divergence between two 1-D arrays. + + The Kullback-Leibler divergence is defined as + + .. math:: + KL(U \\| V) = \\sum_{i} U_i \\log{\\left(\\frac{U_i}{V_i}\\right)} + + Args: + u (array_like): Input array of size (N,) + v (array_like): Input array of size (N,) + + Returns: + kl_divergence (double): The Kullback-Leibler divergence between + vectors `u` and `v`. + """ + check_soft_dependencies() + + u = cupy.asarray(u) + v = cupy.asarray(v) + + u_order = "F" if cupy.isfortran(u) else "C" + v_order = "F" if cupy.isfortran(v) else "C" + + if u_order != v_order: + raise ValueError('u and v must have the same layout ' + '(u.order=%s, v.order=%s' % (u_order, v_order)) + + output_arr = cupy.empty((1, 1), dtype=u.dtype, order=u_order) + pairwise_distance(u, v, output_arr, "kl_divergence") + + return output_arr[0, 0] + + +def cdist(XA, XB, metric='euclidean', out=None, **kwargs): + """Compute distance between each pair of the two collections of inputs. + + Args: + XA (array_like): An :math:`m_A` by :math:`n` array of :math:`m_A` + original observations in an :math:`n`-dimensional space. + Inputs are converted to float type. + XB (array_like): An :math:`m_B` by :math:`n` array of :math:`m_B` + original observations in an :math:`n`-dimensional space. + Inputs are converted to float type. + metric (str, optional): The distance metric to use. + The distance function can be 'canberra', 'chebyshev', + 'cityblock', 'correlation', 'cosine', 'euclidean', 'hamming', + 'hellinger', 'jensenshannon', 'kl_divergence', 'matching', + 'minkowski', 'russellrao', 'sqeuclidean'. + out (cupy.ndarray, optional): The output array. If not None, the + distance matrix Y is stored in this array. + **kwargs (dict, optional): Extra arguments to `metric`: refer to each + metric documentation for a list of all possible arguments. + Some possible arguments: + p (float): The p-norm to apply for Minkowski, weighted and + unweighted. Default: 2.0 + + Returns: + Y (cupy.ndarray): A :math:`m_A` by :math:`m_B` distance matrix is + returned. For each :math:`i` and :math:`j`, the metric + ``dist(u=XA[i], v=XB[j])`` is computed and stored in the + :math:`ij` th entry. + """ + check_soft_dependencies() + + if pylibraft_available or \ + (cuvs_available and XA.dtype not in ['float32', 'float64']): + XA = cupy.asarray(XA, dtype='float32') + + if pylibraft_available or \ + (cuvs_available and XB.dtype not in ['float32', 'float64']): + XB = cupy.asarray(XB, dtype='float32') + + XA_order = "F" if cupy.isfortran(XA) else "C" + XB_order = "F" if cupy.isfortran(XB) else "C" + + if XA_order != XB_order: + raise ValueError('XA and XB must have the same layout ' + '(XA.order=%s, XB.order=%s' % (XA_order, XB_order)) + + s = XA.shape + sB = XB.shape + + if len(s) != 2: + raise ValueError('XA must be a 2-dimensional array.') + if len(sB) != 2: + raise ValueError('XB must be a 2-dimensional array.') + if s[1] != sB[1]: + raise ValueError('XA and XB must have the same number of columns ' + '(i.e. feature dimension.)') + + mA = s[0] + mB = sB[0] + + p = kwargs["p"] if "p" in kwargs else 2.0 + + if out is not None: + if (pylibraft_available and out.dtype != 'float32') or \ + (cuvs_available and out.dtype not in ['float32', 'float64']): + out_order = "F" if cupy.isfortran(out) else "C" + if out_order != XA_order: + raise ValueError('out must have same layout as input ' + '(out.order=%s)' % out_order) + out = out.astype('float32', copy=False) + if out.shape != (mA, mB): + cupy.resize(out, (mA, mB)) + + if isinstance(metric, str): + mstr = metric.lower() + metric_info = _METRIC_ALIAS.get(mstr, None) + if metric_info is not None: + output_arr = out if out is not None else cupy.empty((mA, mB), + dtype=XA.dtype, + order=XA_order) + pairwise_distance(XA, XB, output_arr, metric, p) + return output_arr + else: + raise ValueError('Unknown Distance Metric: %s' % mstr) + else: + raise TypeError('2nd argument metric must be a string identifier') + + +def pdist(X, metric='euclidean', *, out=None, **kwargs): + """Compute distance between observations in n-dimensional space. + + Args: + X (array_like): An :math:`m` by :math:`n` array of :math:`m` + original observations in an :math:`n`-dimensional space. + Inputs are converted to float type. + metric (str, optional): The distance metric to use. + The distance function can be 'canberra', 'chebyshev', + 'cityblock', 'correlation', 'cosine', 'euclidean', 'hamming', + 'hellinger', 'jensenshannon', 'kl_divergence', 'matching', + 'minkowski', 'russellrao', 'sqeuclidean'. + out (cupy.ndarray, optional): The output array. If not None, the + distance matrix Y is stored in this array. + **kwargs (dict, optional): Extra arguments to `metric`: refer to each + metric documentation for a list of all possible arguments. + Some possible arguments: + p (float): The p-norm to apply for Minkowski, weighted and + unweighted. Default: 2.0 + + Returns: + Y (cupy.ndarray): + Returns a condensed distance matrix Y. For each :math:`i` and + :math:`j` and (where :math:`i < j < m`), where m is the number of + original observations. The metric ``dist(u=X[i], v=X[j])`` is + computed and stored in entry + ``m * i + j - ((i + 2) * (i + 1)) // 2``. + """ + all_dist = cdist(X, X, metric=metric, out=out, **kwargs) + up_idx = cupy.triu_indices_from(all_dist, 1) + return all_dist[up_idx] + + +def distance_matrix(x, y, p=2.0): + """Compute the distance matrix. + + Returns the matrix of all pair-wise distances. + + Args: + x (array_like): Matrix of M vectors in K dimensions. + y (array_like): Matrix of N vectors in K dimensions. + p (float): Which Minkowski p-norm to use (1 <= p <= infinity). + Default=2.0 + Returns: + result (cupy.ndarray): Matrix containing the distance from every + vector in `x` to every vector in `y`, (size M, N). + """ + x = cupy.asarray(x) + m, k = x.shape + y = cupy.asarray(y) + n, kk = y.shape + + if k != kk: + raise ValueError("x contains %d-dimensional vectors but y " + "contains %d-dimensional vectors" % (k, kk)) + + return cdist(x, y, metric="minkowski", p=p)