Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_sparse_backward.h +47 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_cuda_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_cuda_dispatch.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_copy_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cuda_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_compositeexplicitautograd_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h +91 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_cuda_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_ops.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_compositeimplicitautograd_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/from_file.h +43 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h +43 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm.h +47 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/less_ops.h +83 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_native.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor_compositeimplicitautograd_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cpu_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_meta_dispatch.h +28 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_native.h +27 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h +91 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/nested_schemas.py +56 -0
- vllm/lib/python3.10/site-packages/jsonschema/benchmarks/unused_registry.py +35 -0
- vllm/lib/python3.10/site-packages/mpmath/ctx_base.py +494 -0
- vllm/lib/python3.10/site-packages/mpmath/ctx_fp.py +253 -0
- vllm/lib/python3.10/site-packages/mpmath/ctx_mp.py +1339 -0
- vllm/lib/python3.10/site-packages/mpmath/functions/__init__.py +14 -0
- vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/signals.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/functions/bessel.py +1108 -0
- vllm/lib/python3.10/site-packages/mpmath/functions/elliptic.py +1431 -0
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_autocast_to_full_precision_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _autocast_to_full_precision(const at::Tensor & self, bool cuda_enabled, bool cpu_enabled);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_embedding_bag_sparse_backward.h
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/_embedding_bag_sparse_backward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
|
| 26 |
+
inline at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
|
| 27 |
+
return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, int64_t num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
|
| 32 |
+
return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::_embedding_bag_sparse_backward(Tensor grad, Tensor indices, Tensor offsets, Tensor offset2bag, Tensor bag_size, SymInt num_weights, bool scale_grad_by_freq, int mode, Tensor? per_sample_weights, int padding_idx=-1) -> Tensor
|
| 37 |
+
inline at::Tensor _embedding_bag_sparse_backward_symint(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
|
| 38 |
+
return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor _embedding_bag_sparse_backward(const at::Tensor & grad, const at::Tensor & indices, const at::Tensor & offsets, const at::Tensor & offset2bag, const at::Tensor & bag_size, c10::SymInt num_weights, bool scale_grad_by_freq, int64_t mode, const c10::optional<at::Tensor> & per_sample_weights, int64_t padding_idx=-1) {
|
| 43 |
+
return at::_ops::_embedding_bag_sparse_backward::call(grad, indices, offsets, offset2bag, bag_size, num_weights, scale_grad_by_freq, mode, per_sample_weights, padding_idx);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_log2_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_log2(at::TensorList self);
|
| 21 |
+
TORCH_API void _foreach_log2_(at::TensorList self);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_sign_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::vector<at::Tensor> _foreach_sign(at::TensorList self);
|
| 21 |
+
TORCH_API void _foreach_sign_(at::TensorList self);
|
| 22 |
+
|
| 23 |
+
} // namespace cuda
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_indices_copy_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor & _indices_copy_out(const at::Tensor & self, at::Tensor & out);
|
| 20 |
+
TORCH_API at::Tensor _indices_copy(const at::Tensor & self);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_log_softmax_backward_data_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor _log_softmax_backward_data(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
|
| 21 |
+
TORCH_API at::Tensor & _log_softmax_backward_data_out(at::Tensor & out, const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype);
|
| 22 |
+
TORCH_API at::Tensor & _log_softmax_backward_data_outf(const at::Tensor & grad_output, const at::Tensor & output, int64_t dim, at::ScalarType input_dtype, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace cuda
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_rowwise_prune_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> _rowwise_prune(const at::Tensor & weight, const at::Tensor & mask, at::ScalarType compressed_indices_dtype);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/baddbmm.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/baddbmm_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::baddbmm(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1) -> Tensor
|
| 26 |
+
inline at::Tensor baddbmm(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
|
| 27 |
+
return at::_ops::baddbmm::call(self, batch1, batch2, beta, alpha);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & baddbmm_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta=1, const at::Scalar & alpha=1) {
|
| 32 |
+
return at::_ops::baddbmm_out::call(self, batch1, batch2, beta, alpha, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::baddbmm.out(Tensor self, Tensor batch1, Tensor batch2, *, Scalar beta=1, Scalar alpha=1, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & baddbmm_outf(const at::Tensor & self, const at::Tensor & batch1, const at::Tensor & batch2, const at::Scalar & beta, const at::Scalar & alpha, at::Tensor & out) {
|
| 36 |
+
return at::_ops::baddbmm_out::call(self, batch1, batch2, beta, alpha, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/block_diag_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor block_diag(at::TensorList tensors);
|
| 21 |
+
TORCH_API at::Tensor & block_diag_out(at::Tensor & out, at::TensorList tensors);
|
| 22 |
+
TORCH_API at::Tensor & block_diag_outf(at::TensorList tensors, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace compositeexplicitautograd
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/col2im.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/col2im_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 27 |
+
return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 32 |
+
return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 37 |
+
inline at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
|
| 38 |
+
return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 42 |
+
at::Tensor & col2im_outf(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
|
| 43 |
+
return at::_ops::col2im_out::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride, out);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 48 |
+
inline at::Tensor & col2im_symint_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 49 |
+
return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 53 |
+
at::Tensor & col2im_out(at::Tensor & out, const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 54 |
+
return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::col2im.out(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride, *, Tensor(a!) out) -> Tensor(a!)
|
| 59 |
+
inline at::Tensor & col2im_symint_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
|
| 60 |
+
return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 64 |
+
at::Tensor & col2im_outf(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride, at::Tensor & out) {
|
| 65 |
+
return at::_ops::col2im_out::call(self, output_size, kernel_size, dilation, padding, stride, out);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
|
| 70 |
+
inline at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 71 |
+
return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride);
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 75 |
+
at::Tensor col2im(const at::Tensor & self, at::IntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 76 |
+
return at::_ops::col2im::call(self, c10::fromIntArrayRefSlow(output_size), kernel_size, dilation, padding, stride);
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::col2im(Tensor self, SymInt[2] output_size, int[2] kernel_size, int[2] dilation, int[2] padding, int[2] stride) -> Tensor
|
| 81 |
+
inline at::Tensor col2im_symint(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 82 |
+
return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 86 |
+
at::Tensor col2im(const at::Tensor & self, c10::SymIntArrayRef output_size, at::IntArrayRef kernel_size, at::IntArrayRef dilation, at::IntArrayRef padding, at::IntArrayRef stride) {
|
| 87 |
+
return at::_ops::col2im::call(self, output_size, kernel_size, dilation, padding, stride);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/expm1_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor expm1(const at::Tensor & self);
|
| 21 |
+
TORCH_API at::Tensor & expm1_out(at::Tensor & out, const at::Tensor & self);
|
| 22 |
+
TORCH_API at::Tensor & expm1_outf(const at::Tensor & self, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & expm1_(at::Tensor & self);
|
| 24 |
+
|
| 25 |
+
} // namespace cuda
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fake_quantize_per_channel_affine_ops.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API fake_quantize_per_channel_affine {
|
| 18 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, const at::Tensor &, int64_t, int64_t, int64_t);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::fake_quantize_per_channel_affine")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "fake_quantize_per_channel_affine(Tensor self, Tensor scale, Tensor zero_point, int axis, int quant_min, int quant_max) -> Tensor")
|
| 24 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max);
|
| 25 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & scale, const at::Tensor & zero_point, int64_t axis, int64_t quant_min, int64_t quant_max);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/fft_rfft_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor fft_rfft(const at::Tensor & self, c10::optional<int64_t> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 21 |
+
TORCH_API at::Tensor fft_rfft_symint(const at::Tensor & self, c10::optional<c10::SymInt> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 22 |
+
TORCH_API at::Tensor & fft_rfft_out(at::Tensor & out, const at::Tensor & self, c10::optional<int64_t> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 23 |
+
TORCH_API at::Tensor & fft_rfft_outf(const at::Tensor & self, c10::optional<int64_t> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor & fft_rfft_symint_out(at::Tensor & out, const at::Tensor & self, c10::optional<c10::SymInt> n=c10::nullopt, int64_t dim=-1, c10::optional<c10::string_view> norm=c10::nullopt);
|
| 25 |
+
TORCH_API at::Tensor & fft_rfft_symint_outf(const at::Tensor & self, c10::optional<c10::SymInt> n, int64_t dim, c10::optional<c10::string_view> norm, at::Tensor & out);
|
| 26 |
+
|
| 27 |
+
} // namespace compositeimplicitautograd
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/frexp_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> frexp(const at::Tensor & self);
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> frexp_out(const at::Tensor & self, at::Tensor & mantissa, at::Tensor & exponent);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/from_file.h
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/from_file_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 26 |
+
inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared=c10::nullopt, c10::optional<int64_t> size=0, at::TensorOptions options={}) {
|
| 27 |
+
return at::_ops::from_file::call(filename, shared, size, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt());
|
| 28 |
+
}
|
| 29 |
+
// aten::from_file(str filename, bool? shared=None, int? size=0, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None) -> Tensor
|
| 30 |
+
inline at::Tensor from_file(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory) {
|
| 31 |
+
return at::_ops::from_file::call(filename, shared, size, dtype, layout, device, pin_memory);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & from_file_out(at::Tensor & out, c10::string_view filename, c10::optional<bool> shared=c10::nullopt, c10::optional<int64_t> size=0) {
|
| 36 |
+
return at::_ops::from_file_out::call(filename, shared, size, out);
|
| 37 |
+
}
|
| 38 |
+
// aten::from_file.out(str filename, bool? shared=None, int? size=0, *, Tensor(a!) out) -> Tensor(a!)
|
| 39 |
+
inline at::Tensor & from_file_outf(c10::string_view filename, c10::optional<bool> shared, c10::optional<int64_t> size, at::Tensor & out) {
|
| 40 |
+
return at::_ops::from_file_out::call(filename, shared, size, out);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/full_like.h
ADDED
|
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/full_like_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
|
| 26 |
+
inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, at::TensorOptions options={}, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
|
| 27 |
+
return at::_ops::full_like::call(self, fill_value, optTypeMetaToScalarType(options.dtype_opt()), options.layout_opt(), options.device_opt(), options.pinned_memory_opt(), c10::impl::check_tensor_options_and_extract_memory_format(options, memory_format));
|
| 28 |
+
}
|
| 29 |
+
// aten::full_like(Tensor self, Scalar fill_value, *, ScalarType? dtype=None, Layout? layout=None, Device? device=None, bool? pin_memory=None, MemoryFormat? memory_format=None) -> Tensor
|
| 30 |
+
inline at::Tensor full_like(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory, c10::optional<at::MemoryFormat> memory_format) {
|
| 31 |
+
return at::_ops::full_like::call(self, fill_value, dtype, layout, device, pin_memory, memory_format);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & full_like_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format=c10::nullopt) {
|
| 36 |
+
return at::_ops::full_like_out::call(self, fill_value, memory_format, out);
|
| 37 |
+
}
|
| 38 |
+
// aten::full_like.out(Tensor self, Scalar fill_value, *, MemoryFormat? memory_format=None, Tensor(a!) out) -> Tensor(a!)
|
| 39 |
+
inline at::Tensor & full_like_outf(const at::Tensor & self, const at::Scalar & fill_value, c10::optional<at::MemoryFormat> memory_format, at::Tensor & out) {
|
| 40 |
+
return at::_ops::full_like_out::call(self, fill_value, memory_format, out);
|
| 41 |
+
}
|
| 42 |
+
|
| 43 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/glu_jvp.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/glu_jvp_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::glu_jvp(Tensor glu, Tensor x, Tensor dx, int dim) -> Tensor
|
| 26 |
+
inline at::Tensor glu_jvp(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
|
| 27 |
+
return at::_ops::glu_jvp::call(glu, x, dx, dim);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & glu_jvp_out(at::Tensor & out, const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim) {
|
| 32 |
+
return at::_ops::glu_jvp_out::call(glu, x, dx, dim, out);
|
| 33 |
+
}
|
| 34 |
+
// aten::glu_jvp.out(Tensor glu, Tensor x, Tensor dx, int dim, *, Tensor(a!) out) -> Tensor(a!)
|
| 35 |
+
inline at::Tensor & glu_jvp_outf(const at::Tensor & glu, const at::Tensor & x, const at::Tensor & dx, int64_t dim, at::Tensor & out) {
|
| 36 |
+
return at::_ops::glu_jvp_out::call(glu, x, dx, dim, out);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/layer_norm.h
ADDED
|
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/layer_norm_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
|
| 26 |
+
inline at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
|
| 27 |
+
return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor layer_norm(const at::Tensor & input, at::IntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
|
| 32 |
+
return at::_ops::layer_norm::call(input, c10::fromIntArrayRefSlow(normalized_shape), weight, bias, eps, cudnn_enable);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::layer_norm(Tensor input, SymInt[] normalized_shape, Tensor? weight=None, Tensor? bias=None, float eps=1e-05, bool cudnn_enable=True) -> Tensor
|
| 37 |
+
inline at::Tensor layer_norm_symint(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
|
| 38 |
+
return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 42 |
+
at::Tensor layer_norm(const at::Tensor & input, c10::SymIntArrayRef normalized_shape, const c10::optional<at::Tensor> & weight={}, const c10::optional<at::Tensor> & bias={}, double eps=1e-05, bool cudnn_enable=true) {
|
| 43 |
+
return at::_ops::layer_norm::call(input, normalized_shape, weight, bias, eps, cudnn_enable);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/leaky_relu_backward_meta_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor leaky_relu_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result);
|
| 21 |
+
TORCH_API at::Tensor & leaky_relu_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result);
|
| 22 |
+
TORCH_API at::Tensor & leaky_relu_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, const at::Scalar & negative_slope, bool self_is_result, at::Tensor & grad_input);
|
| 23 |
+
|
| 24 |
+
} // namespace meta
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/less_ops.h
ADDED
|
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API less_Scalar_out {
|
| 18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Scalar &, at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar_out")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less.Scalar_out(Tensor self, Scalar other, *, Tensor(a!) out) -> Tensor(a!)")
|
| 24 |
+
static at::Tensor & call(const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
| 25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other, at::Tensor & out);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API less_Scalar {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Scalar &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less.Scalar(Tensor self, Scalar other) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, const at::Scalar & other);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Scalar & other);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API less_Tensor_out {
|
| 40 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::Tensor &);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor_out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less.Tensor_out(Tensor self, Tensor other, *, Tensor(a!) out) -> Tensor(a!)")
|
| 46 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 47 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
struct TORCH_API less_Tensor {
|
| 51 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &);
|
| 52 |
+
using ptr_schema = schema*;
|
| 53 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 54 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less")
|
| 55 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
| 56 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less.Tensor(Tensor self, Tensor other) -> Tensor")
|
| 57 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & other);
|
| 58 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & other);
|
| 59 |
+
};
|
| 60 |
+
|
| 61 |
+
struct TORCH_API less__Scalar {
|
| 62 |
+
using schema = at::Tensor & (at::Tensor &, const at::Scalar &);
|
| 63 |
+
using ptr_schema = schema*;
|
| 64 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 65 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_")
|
| 66 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Scalar")
|
| 67 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_.Scalar(Tensor(a!) self, Scalar other) -> Tensor(a!)")
|
| 68 |
+
static at::Tensor & call(at::Tensor & self, const at::Scalar & other);
|
| 69 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Scalar & other);
|
| 70 |
+
};
|
| 71 |
+
|
| 72 |
+
struct TORCH_API less__Tensor {
|
| 73 |
+
using schema = at::Tensor & (at::Tensor &, const at::Tensor &);
|
| 74 |
+
using ptr_schema = schema*;
|
| 75 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 76 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::less_")
|
| 77 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "Tensor")
|
| 78 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "less_.Tensor(Tensor(a!) self, Tensor other) -> Tensor(a!)")
|
| 79 |
+
static at::Tensor & call(at::Tensor & self, const at::Tensor & other);
|
| 80 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, at::Tensor & self, const at::Tensor & other);
|
| 81 |
+
};
|
| 82 |
+
|
| 83 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_ex_meta.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeMetaFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/TensorIterator.h>
|
| 13 |
+
#include <ATen/TensorMeta.h>
|
| 14 |
+
#include <tuple>
|
| 15 |
+
#include <vector>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
struct TORCH_API structured_linalg_ldl_factor_ex : public at::impl::MetaBase {
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
void meta(const at::Tensor & self, bool hermitian, bool check_errors);
|
| 24 |
+
};
|
| 25 |
+
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_lstsq_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API linalg_lstsq {
|
| 18 |
+
using schema = ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> (const at::Tensor &, const at::Tensor &, c10::optional<double>, c10::optional<c10::string_view>);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lstsq")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lstsq(Tensor self, Tensor b, float? rcond=None, *, str? driver=None) -> (Tensor solution, Tensor residuals, Tensor rank, Tensor singular_values)")
|
| 24 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> call(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver);
|
| 25 |
+
static ::std::tuple<at::Tensor,at::Tensor,at::Tensor,at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API linalg_lstsq_out {
|
| 29 |
+
using schema = ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> (const at::Tensor &, const at::Tensor &, c10::optional<double>, c10::optional<c10::string_view>, at::Tensor &, at::Tensor &, at::Tensor &, at::Tensor &);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::linalg_lstsq")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "linalg_lstsq.out(Tensor self, Tensor b, float? rcond=None, *, str? driver=None, Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values) -> (Tensor(a!) solution, Tensor(b!) residuals, Tensor(c!) rank, Tensor(d!) singular_values)")
|
| 35 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> call(const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values);
|
| 36 |
+
static ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &,at::Tensor &> redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & b, c10::optional<double> rcond, c10::optional<c10::string_view> driver, at::Tensor & solution, at::Tensor & residuals, at::Tensor & rank, at::Tensor & singular_values);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logcumsumexp_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor logcumsumexp(const at::Tensor & self, int64_t dim);
|
| 21 |
+
TORCH_API at::Tensor & logcumsumexp_out(at::Tensor & out, const at::Tensor & self, int64_t dim);
|
| 22 |
+
TORCH_API at::Tensor & logcumsumexp_outf(const at::Tensor & self, int64_t dim, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace compositeexplicitautograd
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/logsumexp_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::IntArrayRef dim, bool keepdim=false);
|
| 20 |
+
TORCH_API at::Tensor & logsumexp_out(const at::Tensor & self, at::IntArrayRef dim, bool keepdim, at::Tensor & out);
|
| 21 |
+
TORCH_API at::Tensor logsumexp(const at::Tensor & self, at::DimnameList dim, bool keepdim=false);
|
| 22 |
+
TORCH_API at::Tensor & logsumexp_out(const at::Tensor & self, at::DimnameList dim, bool keepdim, at::Tensor & out);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/nested_to_padded_tensor_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor nested_to_padded_tensor(const at::Tensor & self, double padding, at::OptionalIntArrayRef output_size=c10::nullopt);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeimplicitautograd
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_cpu_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cpu {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding);
|
| 21 |
+
TORCH_API at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding);
|
| 22 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding);
|
| 23 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input);
|
| 24 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding);
|
| 25 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input);
|
| 26 |
+
|
| 27 |
+
} // namespace cpu
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/reflection_pad3d_backward_meta_dispatch.h
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor reflection_pad3d_backward(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding);
|
| 21 |
+
TORCH_API at::Tensor reflection_pad3d_backward_symint(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding);
|
| 22 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding);
|
| 23 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_outf(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef padding, at::Tensor & grad_input);
|
| 24 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding);
|
| 25 |
+
TORCH_API at::Tensor & reflection_pad3d_backward_symint_outf(const at::Tensor & grad_output, const at::Tensor & self, c10::SymIntArrayRef padding, at::Tensor & grad_input);
|
| 26 |
+
|
| 27 |
+
} // namespace meta
|
| 28 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_entr_compositeexplicitautogradnonfunctional_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautogradnonfunctional {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor special_entr(const at::Tensor & self);
|
| 21 |
+
|
| 22 |
+
} // namespace compositeexplicitautogradnonfunctional
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/special_legendre_polynomial_p_native.h
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
#include <ATen/ops/special_legendre_polynomial_p_meta.h>
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
struct TORCH_API structured_special_legendre_polynomial_p_out : public at::meta::structured_special_legendre_polynomial_p {
|
| 20 |
+
void impl(const at::Tensor & x, const at::Tensor & n, const at::Tensor & out);
|
| 21 |
+
};
|
| 22 |
+
TORCH_API at::Tensor special_legendre_polynomial_p(const at::Scalar & x, const at::Tensor & n);
|
| 23 |
+
TORCH_API at::Tensor & special_legendre_polynomial_p_out(const at::Scalar & x, const at::Tensor & n, at::Tensor & out);
|
| 24 |
+
TORCH_API at::Tensor special_legendre_polynomial_p(const at::Tensor & x, const at::Scalar & n);
|
| 25 |
+
TORCH_API at::Tensor & special_legendre_polynomial_p_out(const at::Tensor & x, const at::Scalar & n, at::Tensor & out);
|
| 26 |
+
} // namespace native
|
| 27 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/upsample_linear1d_backward.h
ADDED
|
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/upsample_linear1d_backward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 27 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
|
| 28 |
+
}
|
| 29 |
+
namespace symint {
|
| 30 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 31 |
+
at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 32 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
|
| 33 |
+
}
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 37 |
+
inline at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
|
| 38 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
|
| 39 |
+
}
|
| 40 |
+
namespace symint {
|
| 41 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 42 |
+
at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
|
| 43 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales, grad_input);
|
| 44 |
+
}
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 48 |
+
inline at::Tensor & upsample_linear1d_backward_symint_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 49 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
|
| 50 |
+
}
|
| 51 |
+
namespace symint {
|
| 52 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 53 |
+
at::Tensor & upsample_linear1d_backward_out(at::Tensor & grad_input, const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 54 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
|
| 55 |
+
}
|
| 56 |
+
}
|
| 57 |
+
|
| 58 |
+
// aten::upsample_linear1d_backward.grad_input(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None, *, Tensor(a!) grad_input) -> Tensor(a!)
|
| 59 |
+
inline at::Tensor & upsample_linear1d_backward_symint_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
|
| 60 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
|
| 61 |
+
}
|
| 62 |
+
namespace symint {
|
| 63 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 64 |
+
at::Tensor & upsample_linear1d_backward_outf(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales, at::Tensor & grad_input) {
|
| 65 |
+
return at::_ops::upsample_linear1d_backward_grad_input::call(grad_output, output_size, input_size, align_corners, scales, grad_input);
|
| 66 |
+
}
|
| 67 |
+
}
|
| 68 |
+
|
| 69 |
+
// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
|
| 70 |
+
inline at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 71 |
+
return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales);
|
| 72 |
+
}
|
| 73 |
+
namespace symint {
|
| 74 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, int64_t>::value>>
|
| 75 |
+
at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, at::IntArrayRef output_size, at::IntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 76 |
+
return at::_ops::upsample_linear1d_backward::call(grad_output, c10::fromIntArrayRefSlow(output_size), c10::fromIntArrayRefSlow(input_size), align_corners, scales);
|
| 77 |
+
}
|
| 78 |
+
}
|
| 79 |
+
|
| 80 |
+
// aten::upsample_linear1d_backward(Tensor grad_output, SymInt[1] output_size, SymInt[3] input_size, bool align_corners, float? scales=None) -> Tensor
|
| 81 |
+
inline at::Tensor upsample_linear1d_backward_symint(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 82 |
+
return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
|
| 83 |
+
}
|
| 84 |
+
namespace symint {
|
| 85 |
+
template <typename T, typename = std::enable_if_t<std::is_same<T, c10::SymInt>::value>>
|
| 86 |
+
at::Tensor upsample_linear1d_backward(const at::Tensor & grad_output, c10::SymIntArrayRef output_size, c10::SymIntArrayRef input_size, bool align_corners, c10::optional<double> scales=c10::nullopt) {
|
| 87 |
+
return at::_ops::upsample_linear1d_backward::call(grad_output, output_size, input_size, align_corners, scales);
|
| 88 |
+
}
|
| 89 |
+
}
|
| 90 |
+
|
| 91 |
+
}
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/const_vs_enum.cpython-310.pyc
ADDED
|
Binary file (1.17 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/contains.cpython-310.pyc
ADDED
|
Binary file (1.22 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/issue232.cpython-310.pyc
ADDED
|
Binary file (708 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/json_schema_test_suite.cpython-310.pyc
ADDED
|
Binary file (544 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/subcomponents.cpython-310.pyc
ADDED
|
Binary file (1.58 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/unused_registry.cpython-310.pyc
ADDED
|
Binary file (1.19 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_applicator_schemas.cpython-310.pyc
ADDED
|
Binary file (2.22 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/__pycache__/useless_keywords.cpython-310.pyc
ADDED
|
Binary file (1.39 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/nested_schemas.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Validating highly nested schemas shouldn't cause exponential time blowups.
|
| 3 |
+
|
| 4 |
+
See https://github.com/python-jsonschema/jsonschema/issues/1097.
|
| 5 |
+
"""
|
| 6 |
+
from itertools import cycle
|
| 7 |
+
|
| 8 |
+
from jsonschema.validators import validator_for
|
| 9 |
+
|
| 10 |
+
metaschemaish = {
|
| 11 |
+
"$id": "https://example.com/draft/2020-12/schema/strict",
|
| 12 |
+
"$schema": "https://json-schema.org/draft/2020-12/schema",
|
| 13 |
+
|
| 14 |
+
"$vocabulary": {
|
| 15 |
+
"https://json-schema.org/draft/2020-12/vocab/core": True,
|
| 16 |
+
"https://json-schema.org/draft/2020-12/vocab/applicator": True,
|
| 17 |
+
"https://json-schema.org/draft/2020-12/vocab/unevaluated": True,
|
| 18 |
+
"https://json-schema.org/draft/2020-12/vocab/validation": True,
|
| 19 |
+
"https://json-schema.org/draft/2020-12/vocab/meta-data": True,
|
| 20 |
+
"https://json-schema.org/draft/2020-12/vocab/format-annotation": True,
|
| 21 |
+
"https://json-schema.org/draft/2020-12/vocab/content": True,
|
| 22 |
+
},
|
| 23 |
+
"$dynamicAnchor": "meta",
|
| 24 |
+
|
| 25 |
+
"$ref": "https://json-schema.org/draft/2020-12/schema",
|
| 26 |
+
"unevaluatedProperties": False,
|
| 27 |
+
}
|
| 28 |
+
|
| 29 |
+
|
| 30 |
+
def nested_schema(levels):
|
| 31 |
+
"""
|
| 32 |
+
Produce a schema which validates deeply nested objects and arrays.
|
| 33 |
+
"""
|
| 34 |
+
|
| 35 |
+
names = cycle(["foo", "bar", "baz", "quux", "spam", "eggs"])
|
| 36 |
+
schema = {"type": "object", "properties": {"ham": {"type": "string"}}}
|
| 37 |
+
for _, name in zip(range(levels - 1), names):
|
| 38 |
+
schema = {"type": "object", "properties": {name: schema}}
|
| 39 |
+
return schema
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
validator = validator_for(metaschemaish)(metaschemaish)
|
| 43 |
+
|
| 44 |
+
if __name__ == "__main__":
|
| 45 |
+
from pyperf import Runner
|
| 46 |
+
runner = Runner()
|
| 47 |
+
|
| 48 |
+
not_nested = nested_schema(levels=1)
|
| 49 |
+
runner.bench_func("not nested", lambda: validator.is_valid(not_nested))
|
| 50 |
+
|
| 51 |
+
for levels in range(1, 11, 3):
|
| 52 |
+
schema = nested_schema(levels=levels)
|
| 53 |
+
runner.bench_func(
|
| 54 |
+
f"nested * {levels}",
|
| 55 |
+
lambda schema=schema: validator.is_valid(schema),
|
| 56 |
+
)
|
vllm/lib/python3.10/site-packages/jsonschema/benchmarks/unused_registry.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
An unused schema registry should not cause slower validation.
|
| 3 |
+
|
| 4 |
+
"Unused" here means one where no reference resolution is occurring anyhow.
|
| 5 |
+
|
| 6 |
+
See https://github.com/python-jsonschema/jsonschema/issues/1088.
|
| 7 |
+
"""
|
| 8 |
+
from pyperf import Runner
|
| 9 |
+
from referencing import Registry
|
| 10 |
+
from referencing.jsonschema import DRAFT201909
|
| 11 |
+
|
| 12 |
+
from jsonschema import Draft201909Validator
|
| 13 |
+
|
| 14 |
+
registry = Registry().with_resource(
|
| 15 |
+
"urn:example:foo",
|
| 16 |
+
DRAFT201909.create_resource({}),
|
| 17 |
+
)
|
| 18 |
+
|
| 19 |
+
schema = {"$ref": "https://json-schema.org/draft/2019-09/schema"}
|
| 20 |
+
instance = {"maxLength": 4}
|
| 21 |
+
|
| 22 |
+
no_registry = Draft201909Validator(schema)
|
| 23 |
+
with_useless_registry = Draft201909Validator(schema, registry=registry)
|
| 24 |
+
|
| 25 |
+
if __name__ == "__main__":
|
| 26 |
+
runner = Runner()
|
| 27 |
+
|
| 28 |
+
runner.bench_func(
|
| 29 |
+
"no registry",
|
| 30 |
+
lambda: no_registry.is_valid(instance),
|
| 31 |
+
)
|
| 32 |
+
runner.bench_func(
|
| 33 |
+
"useless registry",
|
| 34 |
+
lambda: with_useless_registry.is_valid(instance),
|
| 35 |
+
)
|
vllm/lib/python3.10/site-packages/mpmath/ctx_base.py
ADDED
|
@@ -0,0 +1,494 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from operator import gt, lt
|
| 2 |
+
|
| 3 |
+
from .libmp.backend import xrange
|
| 4 |
+
|
| 5 |
+
from .functions.functions import SpecialFunctions
|
| 6 |
+
from .functions.rszeta import RSCache
|
| 7 |
+
from .calculus.quadrature import QuadratureMethods
|
| 8 |
+
from .calculus.inverselaplace import LaplaceTransformInversionMethods
|
| 9 |
+
from .calculus.calculus import CalculusMethods
|
| 10 |
+
from .calculus.optimization import OptimizationMethods
|
| 11 |
+
from .calculus.odes import ODEMethods
|
| 12 |
+
from .matrices.matrices import MatrixMethods
|
| 13 |
+
from .matrices.calculus import MatrixCalculusMethods
|
| 14 |
+
from .matrices.linalg import LinearAlgebraMethods
|
| 15 |
+
from .matrices.eigen import Eigen
|
| 16 |
+
from .identification import IdentificationMethods
|
| 17 |
+
from .visualization import VisualizationMethods
|
| 18 |
+
|
| 19 |
+
from . import libmp
|
| 20 |
+
|
| 21 |
+
class Context(object):
|
| 22 |
+
pass
|
| 23 |
+
|
| 24 |
+
class StandardBaseContext(Context,
|
| 25 |
+
SpecialFunctions,
|
| 26 |
+
RSCache,
|
| 27 |
+
QuadratureMethods,
|
| 28 |
+
LaplaceTransformInversionMethods,
|
| 29 |
+
CalculusMethods,
|
| 30 |
+
MatrixMethods,
|
| 31 |
+
MatrixCalculusMethods,
|
| 32 |
+
LinearAlgebraMethods,
|
| 33 |
+
Eigen,
|
| 34 |
+
IdentificationMethods,
|
| 35 |
+
OptimizationMethods,
|
| 36 |
+
ODEMethods,
|
| 37 |
+
VisualizationMethods):
|
| 38 |
+
|
| 39 |
+
NoConvergence = libmp.NoConvergence
|
| 40 |
+
ComplexResult = libmp.ComplexResult
|
| 41 |
+
|
| 42 |
+
def __init__(ctx):
|
| 43 |
+
ctx._aliases = {}
|
| 44 |
+
# Call those that need preinitialization (e.g. for wrappers)
|
| 45 |
+
SpecialFunctions.__init__(ctx)
|
| 46 |
+
RSCache.__init__(ctx)
|
| 47 |
+
QuadratureMethods.__init__(ctx)
|
| 48 |
+
LaplaceTransformInversionMethods.__init__(ctx)
|
| 49 |
+
CalculusMethods.__init__(ctx)
|
| 50 |
+
MatrixMethods.__init__(ctx)
|
| 51 |
+
|
| 52 |
+
def _init_aliases(ctx):
|
| 53 |
+
for alias, value in ctx._aliases.items():
|
| 54 |
+
try:
|
| 55 |
+
setattr(ctx, alias, getattr(ctx, value))
|
| 56 |
+
except AttributeError:
|
| 57 |
+
pass
|
| 58 |
+
|
| 59 |
+
_fixed_precision = False
|
| 60 |
+
|
| 61 |
+
# XXX
|
| 62 |
+
verbose = False
|
| 63 |
+
|
| 64 |
+
def warn(ctx, msg):
|
| 65 |
+
print("Warning:", msg)
|
| 66 |
+
|
| 67 |
+
def bad_domain(ctx, msg):
|
| 68 |
+
raise ValueError(msg)
|
| 69 |
+
|
| 70 |
+
def _re(ctx, x):
|
| 71 |
+
if hasattr(x, "real"):
|
| 72 |
+
return x.real
|
| 73 |
+
return x
|
| 74 |
+
|
| 75 |
+
def _im(ctx, x):
|
| 76 |
+
if hasattr(x, "imag"):
|
| 77 |
+
return x.imag
|
| 78 |
+
return ctx.zero
|
| 79 |
+
|
| 80 |
+
def _as_points(ctx, x):
|
| 81 |
+
return x
|
| 82 |
+
|
| 83 |
+
def fneg(ctx, x, **kwargs):
|
| 84 |
+
return -ctx.convert(x)
|
| 85 |
+
|
| 86 |
+
def fadd(ctx, x, y, **kwargs):
|
| 87 |
+
return ctx.convert(x)+ctx.convert(y)
|
| 88 |
+
|
| 89 |
+
def fsub(ctx, x, y, **kwargs):
|
| 90 |
+
return ctx.convert(x)-ctx.convert(y)
|
| 91 |
+
|
| 92 |
+
def fmul(ctx, x, y, **kwargs):
|
| 93 |
+
return ctx.convert(x)*ctx.convert(y)
|
| 94 |
+
|
| 95 |
+
def fdiv(ctx, x, y, **kwargs):
|
| 96 |
+
return ctx.convert(x)/ctx.convert(y)
|
| 97 |
+
|
| 98 |
+
def fsum(ctx, args, absolute=False, squared=False):
|
| 99 |
+
if absolute:
|
| 100 |
+
if squared:
|
| 101 |
+
return sum((abs(x)**2 for x in args), ctx.zero)
|
| 102 |
+
return sum((abs(x) for x in args), ctx.zero)
|
| 103 |
+
if squared:
|
| 104 |
+
return sum((x**2 for x in args), ctx.zero)
|
| 105 |
+
return sum(args, ctx.zero)
|
| 106 |
+
|
| 107 |
+
def fdot(ctx, xs, ys=None, conjugate=False):
|
| 108 |
+
if ys is not None:
|
| 109 |
+
xs = zip(xs, ys)
|
| 110 |
+
if conjugate:
|
| 111 |
+
cf = ctx.conj
|
| 112 |
+
return sum((x*cf(y) for (x,y) in xs), ctx.zero)
|
| 113 |
+
else:
|
| 114 |
+
return sum((x*y for (x,y) in xs), ctx.zero)
|
| 115 |
+
|
| 116 |
+
def fprod(ctx, args):
|
| 117 |
+
prod = ctx.one
|
| 118 |
+
for arg in args:
|
| 119 |
+
prod *= arg
|
| 120 |
+
return prod
|
| 121 |
+
|
| 122 |
+
def nprint(ctx, x, n=6, **kwargs):
|
| 123 |
+
"""
|
| 124 |
+
Equivalent to ``print(nstr(x, n))``.
|
| 125 |
+
"""
|
| 126 |
+
print(ctx.nstr(x, n, **kwargs))
|
| 127 |
+
|
| 128 |
+
def chop(ctx, x, tol=None):
|
| 129 |
+
"""
|
| 130 |
+
Chops off small real or imaginary parts, or converts
|
| 131 |
+
numbers close to zero to exact zeros. The input can be a
|
| 132 |
+
single number or an iterable::
|
| 133 |
+
|
| 134 |
+
>>> from mpmath import *
|
| 135 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 136 |
+
>>> chop(5+1e-10j, tol=1e-9)
|
| 137 |
+
mpf('5.0')
|
| 138 |
+
>>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2]))
|
| 139 |
+
[1.0, 0.0, 3.0, -4.0, 2.0]
|
| 140 |
+
|
| 141 |
+
The tolerance defaults to ``100*eps``.
|
| 142 |
+
"""
|
| 143 |
+
if tol is None:
|
| 144 |
+
tol = 100*ctx.eps
|
| 145 |
+
try:
|
| 146 |
+
x = ctx.convert(x)
|
| 147 |
+
absx = abs(x)
|
| 148 |
+
if abs(x) < tol:
|
| 149 |
+
return ctx.zero
|
| 150 |
+
if ctx._is_complex_type(x):
|
| 151 |
+
#part_tol = min(tol, absx*tol)
|
| 152 |
+
part_tol = max(tol, absx*tol)
|
| 153 |
+
if abs(x.imag) < part_tol:
|
| 154 |
+
return x.real
|
| 155 |
+
if abs(x.real) < part_tol:
|
| 156 |
+
return ctx.mpc(0, x.imag)
|
| 157 |
+
except TypeError:
|
| 158 |
+
if isinstance(x, ctx.matrix):
|
| 159 |
+
return x.apply(lambda a: ctx.chop(a, tol))
|
| 160 |
+
if hasattr(x, "__iter__"):
|
| 161 |
+
return [ctx.chop(a, tol) for a in x]
|
| 162 |
+
return x
|
| 163 |
+
|
| 164 |
+
def almosteq(ctx, s, t, rel_eps=None, abs_eps=None):
|
| 165 |
+
r"""
|
| 166 |
+
Determine whether the difference between `s` and `t` is smaller
|
| 167 |
+
than a given epsilon, either relatively or absolutely.
|
| 168 |
+
|
| 169 |
+
Both a maximum relative difference and a maximum difference
|
| 170 |
+
('epsilons') may be specified. The absolute difference is
|
| 171 |
+
defined as `|s-t|` and the relative difference is defined
|
| 172 |
+
as `|s-t|/\max(|s|, |t|)`.
|
| 173 |
+
|
| 174 |
+
If only one epsilon is given, both are set to the same value.
|
| 175 |
+
If none is given, both epsilons are set to `2^{-p+m}` where
|
| 176 |
+
`p` is the current working precision and `m` is a small
|
| 177 |
+
integer. The default setting typically allows :func:`~mpmath.almosteq`
|
| 178 |
+
to be used to check for mathematical equality
|
| 179 |
+
in the presence of small rounding errors.
|
| 180 |
+
|
| 181 |
+
**Examples**
|
| 182 |
+
|
| 183 |
+
>>> from mpmath import *
|
| 184 |
+
>>> mp.dps = 15
|
| 185 |
+
>>> almosteq(3.141592653589793, 3.141592653589790)
|
| 186 |
+
True
|
| 187 |
+
>>> almosteq(3.141592653589793, 3.141592653589700)
|
| 188 |
+
False
|
| 189 |
+
>>> almosteq(3.141592653589793, 3.141592653589700, 1e-10)
|
| 190 |
+
True
|
| 191 |
+
>>> almosteq(1e-20, 2e-20)
|
| 192 |
+
True
|
| 193 |
+
>>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0)
|
| 194 |
+
False
|
| 195 |
+
|
| 196 |
+
"""
|
| 197 |
+
t = ctx.convert(t)
|
| 198 |
+
if abs_eps is None and rel_eps is None:
|
| 199 |
+
rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec+4)
|
| 200 |
+
if abs_eps is None:
|
| 201 |
+
abs_eps = rel_eps
|
| 202 |
+
elif rel_eps is None:
|
| 203 |
+
rel_eps = abs_eps
|
| 204 |
+
diff = abs(s-t)
|
| 205 |
+
if diff <= abs_eps:
|
| 206 |
+
return True
|
| 207 |
+
abss = abs(s)
|
| 208 |
+
abst = abs(t)
|
| 209 |
+
if abss < abst:
|
| 210 |
+
err = diff/abst
|
| 211 |
+
else:
|
| 212 |
+
err = diff/abss
|
| 213 |
+
return err <= rel_eps
|
| 214 |
+
|
| 215 |
+
def arange(ctx, *args):
|
| 216 |
+
r"""
|
| 217 |
+
This is a generalized version of Python's :func:`~mpmath.range` function
|
| 218 |
+
that accepts fractional endpoints and step sizes and
|
| 219 |
+
returns a list of ``mpf`` instances. Like :func:`~mpmath.range`,
|
| 220 |
+
:func:`~mpmath.arange` can be called with 1, 2 or 3 arguments:
|
| 221 |
+
|
| 222 |
+
``arange(b)``
|
| 223 |
+
`[0, 1, 2, \ldots, x]`
|
| 224 |
+
``arange(a, b)``
|
| 225 |
+
`[a, a+1, a+2, \ldots, x]`
|
| 226 |
+
``arange(a, b, h)``
|
| 227 |
+
`[a, a+h, a+h, \ldots, x]`
|
| 228 |
+
|
| 229 |
+
where `b-1 \le x < b` (in the third case, `b-h \le x < b`).
|
| 230 |
+
|
| 231 |
+
Like Python's :func:`~mpmath.range`, the endpoint is not included. To
|
| 232 |
+
produce ranges where the endpoint is included, :func:`~mpmath.linspace`
|
| 233 |
+
is more convenient.
|
| 234 |
+
|
| 235 |
+
**Examples**
|
| 236 |
+
|
| 237 |
+
>>> from mpmath import *
|
| 238 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 239 |
+
>>> arange(4)
|
| 240 |
+
[mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0')]
|
| 241 |
+
>>> arange(1, 2, 0.25)
|
| 242 |
+
[mpf('1.0'), mpf('1.25'), mpf('1.5'), mpf('1.75')]
|
| 243 |
+
>>> arange(1, -1, -0.75)
|
| 244 |
+
[mpf('1.0'), mpf('0.25'), mpf('-0.5')]
|
| 245 |
+
|
| 246 |
+
"""
|
| 247 |
+
if not len(args) <= 3:
|
| 248 |
+
raise TypeError('arange expected at most 3 arguments, got %i'
|
| 249 |
+
% len(args))
|
| 250 |
+
if not len(args) >= 1:
|
| 251 |
+
raise TypeError('arange expected at least 1 argument, got %i'
|
| 252 |
+
% len(args))
|
| 253 |
+
# set default
|
| 254 |
+
a = 0
|
| 255 |
+
dt = 1
|
| 256 |
+
# interpret arguments
|
| 257 |
+
if len(args) == 1:
|
| 258 |
+
b = args[0]
|
| 259 |
+
elif len(args) >= 2:
|
| 260 |
+
a = args[0]
|
| 261 |
+
b = args[1]
|
| 262 |
+
if len(args) == 3:
|
| 263 |
+
dt = args[2]
|
| 264 |
+
a, b, dt = ctx.mpf(a), ctx.mpf(b), ctx.mpf(dt)
|
| 265 |
+
assert a + dt != a, 'dt is too small and would cause an infinite loop'
|
| 266 |
+
# adapt code for sign of dt
|
| 267 |
+
if a > b:
|
| 268 |
+
if dt > 0:
|
| 269 |
+
return []
|
| 270 |
+
op = gt
|
| 271 |
+
else:
|
| 272 |
+
if dt < 0:
|
| 273 |
+
return []
|
| 274 |
+
op = lt
|
| 275 |
+
# create list
|
| 276 |
+
result = []
|
| 277 |
+
i = 0
|
| 278 |
+
t = a
|
| 279 |
+
while 1:
|
| 280 |
+
t = a + dt*i
|
| 281 |
+
i += 1
|
| 282 |
+
if op(t, b):
|
| 283 |
+
result.append(t)
|
| 284 |
+
else:
|
| 285 |
+
break
|
| 286 |
+
return result
|
| 287 |
+
|
| 288 |
+
def linspace(ctx, *args, **kwargs):
|
| 289 |
+
"""
|
| 290 |
+
``linspace(a, b, n)`` returns a list of `n` evenly spaced
|
| 291 |
+
samples from `a` to `b`. The syntax ``linspace(mpi(a,b), n)``
|
| 292 |
+
is also valid.
|
| 293 |
+
|
| 294 |
+
This function is often more convenient than :func:`~mpmath.arange`
|
| 295 |
+
for partitioning an interval into subintervals, since
|
| 296 |
+
the endpoint is included::
|
| 297 |
+
|
| 298 |
+
>>> from mpmath import *
|
| 299 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 300 |
+
>>> linspace(1, 4, 4)
|
| 301 |
+
[mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')]
|
| 302 |
+
|
| 303 |
+
You may also provide the keyword argument ``endpoint=False``::
|
| 304 |
+
|
| 305 |
+
>>> linspace(1, 4, 4, endpoint=False)
|
| 306 |
+
[mpf('1.0'), mpf('1.75'), mpf('2.5'), mpf('3.25')]
|
| 307 |
+
|
| 308 |
+
"""
|
| 309 |
+
if len(args) == 3:
|
| 310 |
+
a = ctx.mpf(args[0])
|
| 311 |
+
b = ctx.mpf(args[1])
|
| 312 |
+
n = int(args[2])
|
| 313 |
+
elif len(args) == 2:
|
| 314 |
+
assert hasattr(args[0], '_mpi_')
|
| 315 |
+
a = args[0].a
|
| 316 |
+
b = args[0].b
|
| 317 |
+
n = int(args[1])
|
| 318 |
+
else:
|
| 319 |
+
raise TypeError('linspace expected 2 or 3 arguments, got %i' \
|
| 320 |
+
% len(args))
|
| 321 |
+
if n < 1:
|
| 322 |
+
raise ValueError('n must be greater than 0')
|
| 323 |
+
if not 'endpoint' in kwargs or kwargs['endpoint']:
|
| 324 |
+
if n == 1:
|
| 325 |
+
return [ctx.mpf(a)]
|
| 326 |
+
step = (b - a) / ctx.mpf(n - 1)
|
| 327 |
+
y = [i*step + a for i in xrange(n)]
|
| 328 |
+
y[-1] = b
|
| 329 |
+
else:
|
| 330 |
+
step = (b - a) / ctx.mpf(n)
|
| 331 |
+
y = [i*step + a for i in xrange(n)]
|
| 332 |
+
return y
|
| 333 |
+
|
| 334 |
+
def cos_sin(ctx, z, **kwargs):
|
| 335 |
+
return ctx.cos(z, **kwargs), ctx.sin(z, **kwargs)
|
| 336 |
+
|
| 337 |
+
def cospi_sinpi(ctx, z, **kwargs):
|
| 338 |
+
return ctx.cospi(z, **kwargs), ctx.sinpi(z, **kwargs)
|
| 339 |
+
|
| 340 |
+
def _default_hyper_maxprec(ctx, p):
|
| 341 |
+
return int(1000 * p**0.25 + 4*p)
|
| 342 |
+
|
| 343 |
+
_gcd = staticmethod(libmp.gcd)
|
| 344 |
+
list_primes = staticmethod(libmp.list_primes)
|
| 345 |
+
isprime = staticmethod(libmp.isprime)
|
| 346 |
+
bernfrac = staticmethod(libmp.bernfrac)
|
| 347 |
+
moebius = staticmethod(libmp.moebius)
|
| 348 |
+
_ifac = staticmethod(libmp.ifac)
|
| 349 |
+
_eulernum = staticmethod(libmp.eulernum)
|
| 350 |
+
_stirling1 = staticmethod(libmp.stirling1)
|
| 351 |
+
_stirling2 = staticmethod(libmp.stirling2)
|
| 352 |
+
|
| 353 |
+
def sum_accurately(ctx, terms, check_step=1):
|
| 354 |
+
prec = ctx.prec
|
| 355 |
+
try:
|
| 356 |
+
extraprec = 10
|
| 357 |
+
while 1:
|
| 358 |
+
ctx.prec = prec + extraprec + 5
|
| 359 |
+
max_mag = ctx.ninf
|
| 360 |
+
s = ctx.zero
|
| 361 |
+
k = 0
|
| 362 |
+
for term in terms():
|
| 363 |
+
s += term
|
| 364 |
+
if (not k % check_step) and term:
|
| 365 |
+
term_mag = ctx.mag(term)
|
| 366 |
+
max_mag = max(max_mag, term_mag)
|
| 367 |
+
sum_mag = ctx.mag(s)
|
| 368 |
+
if sum_mag - term_mag > ctx.prec:
|
| 369 |
+
break
|
| 370 |
+
k += 1
|
| 371 |
+
cancellation = max_mag - sum_mag
|
| 372 |
+
if cancellation != cancellation:
|
| 373 |
+
break
|
| 374 |
+
if cancellation < extraprec or ctx._fixed_precision:
|
| 375 |
+
break
|
| 376 |
+
extraprec += min(ctx.prec, cancellation)
|
| 377 |
+
return s
|
| 378 |
+
finally:
|
| 379 |
+
ctx.prec = prec
|
| 380 |
+
|
| 381 |
+
def mul_accurately(ctx, factors, check_step=1):
|
| 382 |
+
prec = ctx.prec
|
| 383 |
+
try:
|
| 384 |
+
extraprec = 10
|
| 385 |
+
while 1:
|
| 386 |
+
ctx.prec = prec + extraprec + 5
|
| 387 |
+
max_mag = ctx.ninf
|
| 388 |
+
one = ctx.one
|
| 389 |
+
s = one
|
| 390 |
+
k = 0
|
| 391 |
+
for factor in factors():
|
| 392 |
+
s *= factor
|
| 393 |
+
term = factor - one
|
| 394 |
+
if (not k % check_step):
|
| 395 |
+
term_mag = ctx.mag(term)
|
| 396 |
+
max_mag = max(max_mag, term_mag)
|
| 397 |
+
sum_mag = ctx.mag(s-one)
|
| 398 |
+
#if sum_mag - term_mag > ctx.prec:
|
| 399 |
+
# break
|
| 400 |
+
if -term_mag > ctx.prec:
|
| 401 |
+
break
|
| 402 |
+
k += 1
|
| 403 |
+
cancellation = max_mag - sum_mag
|
| 404 |
+
if cancellation != cancellation:
|
| 405 |
+
break
|
| 406 |
+
if cancellation < extraprec or ctx._fixed_precision:
|
| 407 |
+
break
|
| 408 |
+
extraprec += min(ctx.prec, cancellation)
|
| 409 |
+
return s
|
| 410 |
+
finally:
|
| 411 |
+
ctx.prec = prec
|
| 412 |
+
|
| 413 |
+
def power(ctx, x, y):
|
| 414 |
+
r"""Converts `x` and `y` to mpmath numbers and evaluates
|
| 415 |
+
`x^y = \exp(y \log(x))`::
|
| 416 |
+
|
| 417 |
+
>>> from mpmath import *
|
| 418 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 419 |
+
>>> power(2, 0.5)
|
| 420 |
+
1.41421356237309504880168872421
|
| 421 |
+
|
| 422 |
+
This shows the leading few digits of a large Mersenne prime
|
| 423 |
+
(performing the exact calculation ``2**43112609-1`` and
|
| 424 |
+
displaying the result in Python would be very slow)::
|
| 425 |
+
|
| 426 |
+
>>> power(2, 43112609)-1
|
| 427 |
+
3.16470269330255923143453723949e+12978188
|
| 428 |
+
"""
|
| 429 |
+
return ctx.convert(x) ** ctx.convert(y)
|
| 430 |
+
|
| 431 |
+
def _zeta_int(ctx, n):
|
| 432 |
+
return ctx.zeta(n)
|
| 433 |
+
|
| 434 |
+
def maxcalls(ctx, f, N):
|
| 435 |
+
"""
|
| 436 |
+
Return a wrapped copy of *f* that raises ``NoConvergence`` when *f*
|
| 437 |
+
has been called more than *N* times::
|
| 438 |
+
|
| 439 |
+
>>> from mpmath import *
|
| 440 |
+
>>> mp.dps = 15
|
| 441 |
+
>>> f = maxcalls(sin, 10)
|
| 442 |
+
>>> print(sum(f(n) for n in range(10)))
|
| 443 |
+
1.95520948210738
|
| 444 |
+
>>> f(10) # doctest: +IGNORE_EXCEPTION_DETAIL
|
| 445 |
+
Traceback (most recent call last):
|
| 446 |
+
...
|
| 447 |
+
NoConvergence: maxcalls: function evaluated 10 times
|
| 448 |
+
|
| 449 |
+
"""
|
| 450 |
+
counter = [0]
|
| 451 |
+
def f_maxcalls_wrapped(*args, **kwargs):
|
| 452 |
+
counter[0] += 1
|
| 453 |
+
if counter[0] > N:
|
| 454 |
+
raise ctx.NoConvergence("maxcalls: function evaluated %i times" % N)
|
| 455 |
+
return f(*args, **kwargs)
|
| 456 |
+
return f_maxcalls_wrapped
|
| 457 |
+
|
| 458 |
+
def memoize(ctx, f):
|
| 459 |
+
"""
|
| 460 |
+
Return a wrapped copy of *f* that caches computed values, i.e.
|
| 461 |
+
a memoized copy of *f*. Values are only reused if the cached precision
|
| 462 |
+
is equal to or higher than the working precision::
|
| 463 |
+
|
| 464 |
+
>>> from mpmath import *
|
| 465 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 466 |
+
>>> f = memoize(maxcalls(sin, 1))
|
| 467 |
+
>>> f(2)
|
| 468 |
+
0.909297426825682
|
| 469 |
+
>>> f(2)
|
| 470 |
+
0.909297426825682
|
| 471 |
+
>>> mp.dps = 25
|
| 472 |
+
>>> f(2) # doctest: +IGNORE_EXCEPTION_DETAIL
|
| 473 |
+
Traceback (most recent call last):
|
| 474 |
+
...
|
| 475 |
+
NoConvergence: maxcalls: function evaluated 1 times
|
| 476 |
+
|
| 477 |
+
"""
|
| 478 |
+
f_cache = {}
|
| 479 |
+
def f_cached(*args, **kwargs):
|
| 480 |
+
if kwargs:
|
| 481 |
+
key = args, tuple(kwargs.items())
|
| 482 |
+
else:
|
| 483 |
+
key = args
|
| 484 |
+
prec = ctx.prec
|
| 485 |
+
if key in f_cache:
|
| 486 |
+
cprec, cvalue = f_cache[key]
|
| 487 |
+
if cprec >= prec:
|
| 488 |
+
return +cvalue
|
| 489 |
+
value = f(*args, **kwargs)
|
| 490 |
+
f_cache[key] = (prec, value)
|
| 491 |
+
return value
|
| 492 |
+
f_cached.__name__ = f.__name__
|
| 493 |
+
f_cached.__doc__ = f.__doc__
|
| 494 |
+
return f_cached
|
vllm/lib/python3.10/site-packages/mpmath/ctx_fp.py
ADDED
|
@@ -0,0 +1,253 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .ctx_base import StandardBaseContext
|
| 2 |
+
|
| 3 |
+
import math
|
| 4 |
+
import cmath
|
| 5 |
+
from . import math2
|
| 6 |
+
|
| 7 |
+
from . import function_docs
|
| 8 |
+
|
| 9 |
+
from .libmp import mpf_bernoulli, to_float, int_types
|
| 10 |
+
from . import libmp
|
| 11 |
+
|
| 12 |
+
class FPContext(StandardBaseContext):
|
| 13 |
+
"""
|
| 14 |
+
Context for fast low-precision arithmetic (53-bit precision, giving at most
|
| 15 |
+
about 15-digit accuracy), using Python's builtin float and complex.
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(ctx):
|
| 19 |
+
StandardBaseContext.__init__(ctx)
|
| 20 |
+
|
| 21 |
+
# Override SpecialFunctions implementation
|
| 22 |
+
ctx.loggamma = math2.loggamma
|
| 23 |
+
ctx._bernoulli_cache = {}
|
| 24 |
+
ctx.pretty = False
|
| 25 |
+
|
| 26 |
+
ctx._init_aliases()
|
| 27 |
+
|
| 28 |
+
_mpq = lambda cls, x: float(x[0])/x[1]
|
| 29 |
+
|
| 30 |
+
NoConvergence = libmp.NoConvergence
|
| 31 |
+
|
| 32 |
+
def _get_prec(ctx): return 53
|
| 33 |
+
def _set_prec(ctx, p): return
|
| 34 |
+
def _get_dps(ctx): return 15
|
| 35 |
+
def _set_dps(ctx, p): return
|
| 36 |
+
|
| 37 |
+
_fixed_precision = True
|
| 38 |
+
|
| 39 |
+
prec = property(_get_prec, _set_prec)
|
| 40 |
+
dps = property(_get_dps, _set_dps)
|
| 41 |
+
|
| 42 |
+
zero = 0.0
|
| 43 |
+
one = 1.0
|
| 44 |
+
eps = math2.EPS
|
| 45 |
+
inf = math2.INF
|
| 46 |
+
ninf = math2.NINF
|
| 47 |
+
nan = math2.NAN
|
| 48 |
+
j = 1j
|
| 49 |
+
|
| 50 |
+
# Called by SpecialFunctions.__init__()
|
| 51 |
+
@classmethod
|
| 52 |
+
def _wrap_specfun(cls, name, f, wrap):
|
| 53 |
+
if wrap:
|
| 54 |
+
def f_wrapped(ctx, *args, **kwargs):
|
| 55 |
+
convert = ctx.convert
|
| 56 |
+
args = [convert(a) for a in args]
|
| 57 |
+
return f(ctx, *args, **kwargs)
|
| 58 |
+
else:
|
| 59 |
+
f_wrapped = f
|
| 60 |
+
f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__)
|
| 61 |
+
setattr(cls, name, f_wrapped)
|
| 62 |
+
|
| 63 |
+
def bernoulli(ctx, n):
|
| 64 |
+
cache = ctx._bernoulli_cache
|
| 65 |
+
if n in cache:
|
| 66 |
+
return cache[n]
|
| 67 |
+
cache[n] = to_float(mpf_bernoulli(n, 53, 'n'), strict=True)
|
| 68 |
+
return cache[n]
|
| 69 |
+
|
| 70 |
+
pi = math2.pi
|
| 71 |
+
e = math2.e
|
| 72 |
+
euler = math2.euler
|
| 73 |
+
sqrt2 = 1.4142135623730950488
|
| 74 |
+
sqrt5 = 2.2360679774997896964
|
| 75 |
+
phi = 1.6180339887498948482
|
| 76 |
+
ln2 = 0.69314718055994530942
|
| 77 |
+
ln10 = 2.302585092994045684
|
| 78 |
+
euler = 0.57721566490153286061
|
| 79 |
+
catalan = 0.91596559417721901505
|
| 80 |
+
khinchin = 2.6854520010653064453
|
| 81 |
+
apery = 1.2020569031595942854
|
| 82 |
+
glaisher = 1.2824271291006226369
|
| 83 |
+
|
| 84 |
+
absmin = absmax = abs
|
| 85 |
+
|
| 86 |
+
def is_special(ctx, x):
|
| 87 |
+
return x - x != 0.0
|
| 88 |
+
|
| 89 |
+
def isnan(ctx, x):
|
| 90 |
+
return x != x
|
| 91 |
+
|
| 92 |
+
def isinf(ctx, x):
|
| 93 |
+
return abs(x) == math2.INF
|
| 94 |
+
|
| 95 |
+
def isnormal(ctx, x):
|
| 96 |
+
if x:
|
| 97 |
+
return x - x == 0.0
|
| 98 |
+
return False
|
| 99 |
+
|
| 100 |
+
def isnpint(ctx, x):
|
| 101 |
+
if type(x) is complex:
|
| 102 |
+
if x.imag:
|
| 103 |
+
return False
|
| 104 |
+
x = x.real
|
| 105 |
+
return x <= 0.0 and round(x) == x
|
| 106 |
+
|
| 107 |
+
mpf = float
|
| 108 |
+
mpc = complex
|
| 109 |
+
|
| 110 |
+
def convert(ctx, x):
|
| 111 |
+
try:
|
| 112 |
+
return float(x)
|
| 113 |
+
except:
|
| 114 |
+
return complex(x)
|
| 115 |
+
|
| 116 |
+
power = staticmethod(math2.pow)
|
| 117 |
+
sqrt = staticmethod(math2.sqrt)
|
| 118 |
+
exp = staticmethod(math2.exp)
|
| 119 |
+
ln = log = staticmethod(math2.log)
|
| 120 |
+
cos = staticmethod(math2.cos)
|
| 121 |
+
sin = staticmethod(math2.sin)
|
| 122 |
+
tan = staticmethod(math2.tan)
|
| 123 |
+
cos_sin = staticmethod(math2.cos_sin)
|
| 124 |
+
acos = staticmethod(math2.acos)
|
| 125 |
+
asin = staticmethod(math2.asin)
|
| 126 |
+
atan = staticmethod(math2.atan)
|
| 127 |
+
cosh = staticmethod(math2.cosh)
|
| 128 |
+
sinh = staticmethod(math2.sinh)
|
| 129 |
+
tanh = staticmethod(math2.tanh)
|
| 130 |
+
gamma = staticmethod(math2.gamma)
|
| 131 |
+
rgamma = staticmethod(math2.rgamma)
|
| 132 |
+
fac = factorial = staticmethod(math2.factorial)
|
| 133 |
+
floor = staticmethod(math2.floor)
|
| 134 |
+
ceil = staticmethod(math2.ceil)
|
| 135 |
+
cospi = staticmethod(math2.cospi)
|
| 136 |
+
sinpi = staticmethod(math2.sinpi)
|
| 137 |
+
cbrt = staticmethod(math2.cbrt)
|
| 138 |
+
_nthroot = staticmethod(math2.nthroot)
|
| 139 |
+
_ei = staticmethod(math2.ei)
|
| 140 |
+
_e1 = staticmethod(math2.e1)
|
| 141 |
+
_zeta = _zeta_int = staticmethod(math2.zeta)
|
| 142 |
+
|
| 143 |
+
# XXX: math2
|
| 144 |
+
def arg(ctx, z):
|
| 145 |
+
z = complex(z)
|
| 146 |
+
return math.atan2(z.imag, z.real)
|
| 147 |
+
|
| 148 |
+
def expj(ctx, x):
|
| 149 |
+
return ctx.exp(ctx.j*x)
|
| 150 |
+
|
| 151 |
+
def expjpi(ctx, x):
|
| 152 |
+
return ctx.exp(ctx.j*ctx.pi*x)
|
| 153 |
+
|
| 154 |
+
ldexp = math.ldexp
|
| 155 |
+
frexp = math.frexp
|
| 156 |
+
|
| 157 |
+
def mag(ctx, z):
|
| 158 |
+
if z:
|
| 159 |
+
return ctx.frexp(abs(z))[1]
|
| 160 |
+
return ctx.ninf
|
| 161 |
+
|
| 162 |
+
def isint(ctx, z):
|
| 163 |
+
if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
|
| 164 |
+
if z.imag:
|
| 165 |
+
return False
|
| 166 |
+
z = z.real
|
| 167 |
+
try:
|
| 168 |
+
return z == int(z)
|
| 169 |
+
except:
|
| 170 |
+
return False
|
| 171 |
+
|
| 172 |
+
def nint_distance(ctx, z):
|
| 173 |
+
if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
|
| 174 |
+
n = round(z.real)
|
| 175 |
+
else:
|
| 176 |
+
n = round(z)
|
| 177 |
+
if n == z:
|
| 178 |
+
return n, ctx.ninf
|
| 179 |
+
return n, ctx.mag(abs(z-n))
|
| 180 |
+
|
| 181 |
+
def _convert_param(ctx, z):
|
| 182 |
+
if type(z) is tuple:
|
| 183 |
+
p, q = z
|
| 184 |
+
return ctx.mpf(p) / q, 'R'
|
| 185 |
+
if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5
|
| 186 |
+
intz = int(z.real)
|
| 187 |
+
else:
|
| 188 |
+
intz = int(z)
|
| 189 |
+
if z == intz:
|
| 190 |
+
return intz, 'Z'
|
| 191 |
+
return z, 'R'
|
| 192 |
+
|
| 193 |
+
def _is_real_type(ctx, z):
|
| 194 |
+
return isinstance(z, float) or isinstance(z, int_types)
|
| 195 |
+
|
| 196 |
+
def _is_complex_type(ctx, z):
|
| 197 |
+
return isinstance(z, complex)
|
| 198 |
+
|
| 199 |
+
def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs):
|
| 200 |
+
coeffs = list(coeffs)
|
| 201 |
+
num = range(p)
|
| 202 |
+
den = range(p,p+q)
|
| 203 |
+
tol = ctx.eps
|
| 204 |
+
s = t = 1.0
|
| 205 |
+
k = 0
|
| 206 |
+
while 1:
|
| 207 |
+
for i in num: t *= (coeffs[i]+k)
|
| 208 |
+
for i in den: t /= (coeffs[i]+k)
|
| 209 |
+
k += 1; t /= k; t *= z; s += t
|
| 210 |
+
if abs(t) < tol:
|
| 211 |
+
return s
|
| 212 |
+
if k > maxterms:
|
| 213 |
+
raise ctx.NoConvergence
|
| 214 |
+
|
| 215 |
+
def atan2(ctx, x, y):
|
| 216 |
+
return math.atan2(x, y)
|
| 217 |
+
|
| 218 |
+
def psi(ctx, m, z):
|
| 219 |
+
m = int(m)
|
| 220 |
+
if m == 0:
|
| 221 |
+
return ctx.digamma(z)
|
| 222 |
+
return (-1)**(m+1) * ctx.fac(m) * ctx.zeta(m+1, z)
|
| 223 |
+
|
| 224 |
+
digamma = staticmethod(math2.digamma)
|
| 225 |
+
|
| 226 |
+
def harmonic(ctx, x):
|
| 227 |
+
x = ctx.convert(x)
|
| 228 |
+
if x == 0 or x == 1:
|
| 229 |
+
return x
|
| 230 |
+
return ctx.digamma(x+1) + ctx.euler
|
| 231 |
+
|
| 232 |
+
nstr = str
|
| 233 |
+
|
| 234 |
+
def to_fixed(ctx, x, prec):
|
| 235 |
+
return int(math.ldexp(x, prec))
|
| 236 |
+
|
| 237 |
+
def rand(ctx):
|
| 238 |
+
import random
|
| 239 |
+
return random.random()
|
| 240 |
+
|
| 241 |
+
_erf = staticmethod(math2.erf)
|
| 242 |
+
_erfc = staticmethod(math2.erfc)
|
| 243 |
+
|
| 244 |
+
def sum_accurately(ctx, terms, check_step=1):
|
| 245 |
+
s = ctx.zero
|
| 246 |
+
k = 0
|
| 247 |
+
for term in terms():
|
| 248 |
+
s += term
|
| 249 |
+
if (not k % check_step) and term:
|
| 250 |
+
if abs(term) <= 1e-18*abs(s):
|
| 251 |
+
break
|
| 252 |
+
k += 1
|
| 253 |
+
return s
|
vllm/lib/python3.10/site-packages/mpmath/ctx_mp.py
ADDED
|
@@ -0,0 +1,1339 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module defines the mpf, mpc classes, and standard functions for
|
| 3 |
+
operating with them.
|
| 4 |
+
"""
|
| 5 |
+
__docformat__ = 'plaintext'
|
| 6 |
+
|
| 7 |
+
import functools
|
| 8 |
+
|
| 9 |
+
import re
|
| 10 |
+
|
| 11 |
+
from .ctx_base import StandardBaseContext
|
| 12 |
+
|
| 13 |
+
from .libmp.backend import basestring, BACKEND
|
| 14 |
+
|
| 15 |
+
from . import libmp
|
| 16 |
+
|
| 17 |
+
from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps,
|
| 18 |
+
round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps,
|
| 19 |
+
ComplexResult, to_pickable, from_pickable, normalize,
|
| 20 |
+
from_int, from_float, from_str, to_int, to_float, to_str,
|
| 21 |
+
from_rational, from_man_exp,
|
| 22 |
+
fone, fzero, finf, fninf, fnan,
|
| 23 |
+
mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
|
| 24 |
+
mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod,
|
| 25 |
+
mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge,
|
| 26 |
+
mpf_hash, mpf_rand,
|
| 27 |
+
mpf_sum,
|
| 28 |
+
bitcount, to_fixed,
|
| 29 |
+
mpc_to_str,
|
| 30 |
+
mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate,
|
| 31 |
+
mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf,
|
| 32 |
+
mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int,
|
| 33 |
+
mpc_mpf_div,
|
| 34 |
+
mpf_pow,
|
| 35 |
+
mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10,
|
| 36 |
+
mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin,
|
| 37 |
+
mpf_glaisher, mpf_twinprime, mpf_mertens,
|
| 38 |
+
int_types)
|
| 39 |
+
|
| 40 |
+
from . import function_docs
|
| 41 |
+
from . import rational
|
| 42 |
+
|
| 43 |
+
new = object.__new__
|
| 44 |
+
|
| 45 |
+
get_complex = re.compile(r'^\(?(?P<re>[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?)??'
|
| 46 |
+
r'(?P<im>[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?j)?\)?$')
|
| 47 |
+
|
| 48 |
+
if BACKEND == 'sage':
|
| 49 |
+
from sage.libs.mpmath.ext_main import Context as BaseMPContext
|
| 50 |
+
# pickle hack
|
| 51 |
+
import sage.libs.mpmath.ext_main as _mpf_module
|
| 52 |
+
else:
|
| 53 |
+
from .ctx_mp_python import PythonMPContext as BaseMPContext
|
| 54 |
+
from . import ctx_mp_python as _mpf_module
|
| 55 |
+
|
| 56 |
+
from .ctx_mp_python import _mpf, _mpc, mpnumeric
|
| 57 |
+
|
| 58 |
+
class MPContext(BaseMPContext, StandardBaseContext):
|
| 59 |
+
"""
|
| 60 |
+
Context for multiprecision arithmetic with a global precision.
|
| 61 |
+
"""
|
| 62 |
+
|
| 63 |
+
def __init__(ctx):
|
| 64 |
+
BaseMPContext.__init__(ctx)
|
| 65 |
+
ctx.trap_complex = False
|
| 66 |
+
ctx.pretty = False
|
| 67 |
+
ctx.types = [ctx.mpf, ctx.mpc, ctx.constant]
|
| 68 |
+
ctx._mpq = rational.mpq
|
| 69 |
+
ctx.default()
|
| 70 |
+
StandardBaseContext.__init__(ctx)
|
| 71 |
+
|
| 72 |
+
ctx.mpq = rational.mpq
|
| 73 |
+
ctx.init_builtins()
|
| 74 |
+
|
| 75 |
+
ctx.hyp_summators = {}
|
| 76 |
+
|
| 77 |
+
ctx._init_aliases()
|
| 78 |
+
|
| 79 |
+
# XXX: automate
|
| 80 |
+
try:
|
| 81 |
+
ctx.bernoulli.im_func.func_doc = function_docs.bernoulli
|
| 82 |
+
ctx.primepi.im_func.func_doc = function_docs.primepi
|
| 83 |
+
ctx.psi.im_func.func_doc = function_docs.psi
|
| 84 |
+
ctx.atan2.im_func.func_doc = function_docs.atan2
|
| 85 |
+
except AttributeError:
|
| 86 |
+
# python 3
|
| 87 |
+
ctx.bernoulli.__func__.func_doc = function_docs.bernoulli
|
| 88 |
+
ctx.primepi.__func__.func_doc = function_docs.primepi
|
| 89 |
+
ctx.psi.__func__.func_doc = function_docs.psi
|
| 90 |
+
ctx.atan2.__func__.func_doc = function_docs.atan2
|
| 91 |
+
|
| 92 |
+
ctx.digamma.func_doc = function_docs.digamma
|
| 93 |
+
ctx.cospi.func_doc = function_docs.cospi
|
| 94 |
+
ctx.sinpi.func_doc = function_docs.sinpi
|
| 95 |
+
|
| 96 |
+
def init_builtins(ctx):
|
| 97 |
+
|
| 98 |
+
mpf = ctx.mpf
|
| 99 |
+
mpc = ctx.mpc
|
| 100 |
+
|
| 101 |
+
# Exact constants
|
| 102 |
+
ctx.one = ctx.make_mpf(fone)
|
| 103 |
+
ctx.zero = ctx.make_mpf(fzero)
|
| 104 |
+
ctx.j = ctx.make_mpc((fzero,fone))
|
| 105 |
+
ctx.inf = ctx.make_mpf(finf)
|
| 106 |
+
ctx.ninf = ctx.make_mpf(fninf)
|
| 107 |
+
ctx.nan = ctx.make_mpf(fnan)
|
| 108 |
+
|
| 109 |
+
eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1),
|
| 110 |
+
"epsilon of working precision", "eps")
|
| 111 |
+
ctx.eps = eps
|
| 112 |
+
|
| 113 |
+
# Approximate constants
|
| 114 |
+
ctx.pi = ctx.constant(mpf_pi, "pi", "pi")
|
| 115 |
+
ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2")
|
| 116 |
+
ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10")
|
| 117 |
+
ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi")
|
| 118 |
+
ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e")
|
| 119 |
+
ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler")
|
| 120 |
+
ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan")
|
| 121 |
+
ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin")
|
| 122 |
+
ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher")
|
| 123 |
+
ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery")
|
| 124 |
+
ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree")
|
| 125 |
+
ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime")
|
| 126 |
+
ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens")
|
| 127 |
+
|
| 128 |
+
# Standard functions
|
| 129 |
+
ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt)
|
| 130 |
+
ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt)
|
| 131 |
+
ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log)
|
| 132 |
+
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
|
| 133 |
+
ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp)
|
| 134 |
+
ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj)
|
| 135 |
+
ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi)
|
| 136 |
+
ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin)
|
| 137 |
+
ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos)
|
| 138 |
+
ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan)
|
| 139 |
+
ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh)
|
| 140 |
+
ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh)
|
| 141 |
+
ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh)
|
| 142 |
+
ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin)
|
| 143 |
+
ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos)
|
| 144 |
+
ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan)
|
| 145 |
+
ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh)
|
| 146 |
+
ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh)
|
| 147 |
+
ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh)
|
| 148 |
+
ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi)
|
| 149 |
+
ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi)
|
| 150 |
+
ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor)
|
| 151 |
+
ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil)
|
| 152 |
+
ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint)
|
| 153 |
+
ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac)
|
| 154 |
+
ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci)
|
| 155 |
+
|
| 156 |
+
ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma)
|
| 157 |
+
ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma)
|
| 158 |
+
ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma)
|
| 159 |
+
ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial)
|
| 160 |
+
|
| 161 |
+
ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0)
|
| 162 |
+
ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic)
|
| 163 |
+
ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei)
|
| 164 |
+
ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1)
|
| 165 |
+
ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci)
|
| 166 |
+
ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si)
|
| 167 |
+
ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk)
|
| 168 |
+
ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe)
|
| 169 |
+
ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1)
|
| 170 |
+
ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None)
|
| 171 |
+
ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None)
|
| 172 |
+
ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta)
|
| 173 |
+
ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta)
|
| 174 |
+
|
| 175 |
+
# Faster versions
|
| 176 |
+
ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt)
|
| 177 |
+
ctx.exp = getattr(ctx, "_sage_exp", ctx.exp)
|
| 178 |
+
ctx.ln = getattr(ctx, "_sage_ln", ctx.ln)
|
| 179 |
+
ctx.cos = getattr(ctx, "_sage_cos", ctx.cos)
|
| 180 |
+
ctx.sin = getattr(ctx, "_sage_sin", ctx.sin)
|
| 181 |
+
|
| 182 |
+
def to_fixed(ctx, x, prec):
|
| 183 |
+
return x.to_fixed(prec)
|
| 184 |
+
|
| 185 |
+
def hypot(ctx, x, y):
|
| 186 |
+
r"""
|
| 187 |
+
Computes the Euclidean norm of the vector `(x, y)`, equal
|
| 188 |
+
to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real."""
|
| 189 |
+
x = ctx.convert(x)
|
| 190 |
+
y = ctx.convert(y)
|
| 191 |
+
return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding))
|
| 192 |
+
|
| 193 |
+
def _gamma_upper_int(ctx, n, z):
|
| 194 |
+
n = int(ctx._re(n))
|
| 195 |
+
if n == 0:
|
| 196 |
+
return ctx.e1(z)
|
| 197 |
+
if not hasattr(z, '_mpf_'):
|
| 198 |
+
raise NotImplementedError
|
| 199 |
+
prec, rounding = ctx._prec_rounding
|
| 200 |
+
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True)
|
| 201 |
+
if imag is None:
|
| 202 |
+
return ctx.make_mpf(real)
|
| 203 |
+
else:
|
| 204 |
+
return ctx.make_mpc((real, imag))
|
| 205 |
+
|
| 206 |
+
def _expint_int(ctx, n, z):
|
| 207 |
+
n = int(n)
|
| 208 |
+
if n == 1:
|
| 209 |
+
return ctx.e1(z)
|
| 210 |
+
if not hasattr(z, '_mpf_'):
|
| 211 |
+
raise NotImplementedError
|
| 212 |
+
prec, rounding = ctx._prec_rounding
|
| 213 |
+
real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding)
|
| 214 |
+
if imag is None:
|
| 215 |
+
return ctx.make_mpf(real)
|
| 216 |
+
else:
|
| 217 |
+
return ctx.make_mpc((real, imag))
|
| 218 |
+
|
| 219 |
+
def _nthroot(ctx, x, n):
|
| 220 |
+
if hasattr(x, '_mpf_'):
|
| 221 |
+
try:
|
| 222 |
+
return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding))
|
| 223 |
+
except ComplexResult:
|
| 224 |
+
if ctx.trap_complex:
|
| 225 |
+
raise
|
| 226 |
+
x = (x._mpf_, libmp.fzero)
|
| 227 |
+
else:
|
| 228 |
+
x = x._mpc_
|
| 229 |
+
return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding))
|
| 230 |
+
|
| 231 |
+
def _besselj(ctx, n, z):
|
| 232 |
+
prec, rounding = ctx._prec_rounding
|
| 233 |
+
if hasattr(z, '_mpf_'):
|
| 234 |
+
return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding))
|
| 235 |
+
elif hasattr(z, '_mpc_'):
|
| 236 |
+
return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding))
|
| 237 |
+
|
| 238 |
+
def _agm(ctx, a, b=1):
|
| 239 |
+
prec, rounding = ctx._prec_rounding
|
| 240 |
+
if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'):
|
| 241 |
+
try:
|
| 242 |
+
v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding)
|
| 243 |
+
return ctx.make_mpf(v)
|
| 244 |
+
except ComplexResult:
|
| 245 |
+
pass
|
| 246 |
+
if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero)
|
| 247 |
+
else: a = a._mpc_
|
| 248 |
+
if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero)
|
| 249 |
+
else: b = b._mpc_
|
| 250 |
+
return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding))
|
| 251 |
+
|
| 252 |
+
def bernoulli(ctx, n):
|
| 253 |
+
return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding))
|
| 254 |
+
|
| 255 |
+
def _zeta_int(ctx, n):
|
| 256 |
+
return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding))
|
| 257 |
+
|
| 258 |
+
def atan2(ctx, y, x):
|
| 259 |
+
x = ctx.convert(x)
|
| 260 |
+
y = ctx.convert(y)
|
| 261 |
+
return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding))
|
| 262 |
+
|
| 263 |
+
def psi(ctx, m, z):
|
| 264 |
+
z = ctx.convert(z)
|
| 265 |
+
m = int(m)
|
| 266 |
+
if ctx._is_real_type(z):
|
| 267 |
+
return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding))
|
| 268 |
+
else:
|
| 269 |
+
return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding))
|
| 270 |
+
|
| 271 |
+
def cos_sin(ctx, x, **kwargs):
|
| 272 |
+
if type(x) not in ctx.types:
|
| 273 |
+
x = ctx.convert(x)
|
| 274 |
+
prec, rounding = ctx._parse_prec(kwargs)
|
| 275 |
+
if hasattr(x, '_mpf_'):
|
| 276 |
+
c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding)
|
| 277 |
+
return ctx.make_mpf(c), ctx.make_mpf(s)
|
| 278 |
+
elif hasattr(x, '_mpc_'):
|
| 279 |
+
c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding)
|
| 280 |
+
return ctx.make_mpc(c), ctx.make_mpc(s)
|
| 281 |
+
else:
|
| 282 |
+
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
|
| 283 |
+
|
| 284 |
+
def cospi_sinpi(ctx, x, **kwargs):
|
| 285 |
+
if type(x) not in ctx.types:
|
| 286 |
+
x = ctx.convert(x)
|
| 287 |
+
prec, rounding = ctx._parse_prec(kwargs)
|
| 288 |
+
if hasattr(x, '_mpf_'):
|
| 289 |
+
c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding)
|
| 290 |
+
return ctx.make_mpf(c), ctx.make_mpf(s)
|
| 291 |
+
elif hasattr(x, '_mpc_'):
|
| 292 |
+
c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding)
|
| 293 |
+
return ctx.make_mpc(c), ctx.make_mpc(s)
|
| 294 |
+
else:
|
| 295 |
+
return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs)
|
| 296 |
+
|
| 297 |
+
def clone(ctx):
|
| 298 |
+
"""
|
| 299 |
+
Create a copy of the context, with the same working precision.
|
| 300 |
+
"""
|
| 301 |
+
a = ctx.__class__()
|
| 302 |
+
a.prec = ctx.prec
|
| 303 |
+
return a
|
| 304 |
+
|
| 305 |
+
# Several helper methods
|
| 306 |
+
# TODO: add more of these, make consistent, write docstrings, ...
|
| 307 |
+
|
| 308 |
+
def _is_real_type(ctx, x):
|
| 309 |
+
if hasattr(x, '_mpc_') or type(x) is complex:
|
| 310 |
+
return False
|
| 311 |
+
return True
|
| 312 |
+
|
| 313 |
+
def _is_complex_type(ctx, x):
|
| 314 |
+
if hasattr(x, '_mpc_') or type(x) is complex:
|
| 315 |
+
return True
|
| 316 |
+
return False
|
| 317 |
+
|
| 318 |
+
def isnan(ctx, x):
|
| 319 |
+
"""
|
| 320 |
+
Return *True* if *x* is a NaN (not-a-number), or for a complex
|
| 321 |
+
number, whether either the real or complex part is NaN;
|
| 322 |
+
otherwise return *False*::
|
| 323 |
+
|
| 324 |
+
>>> from mpmath import *
|
| 325 |
+
>>> isnan(3.14)
|
| 326 |
+
False
|
| 327 |
+
>>> isnan(nan)
|
| 328 |
+
True
|
| 329 |
+
>>> isnan(mpc(3.14,2.72))
|
| 330 |
+
False
|
| 331 |
+
>>> isnan(mpc(3.14,nan))
|
| 332 |
+
True
|
| 333 |
+
|
| 334 |
+
"""
|
| 335 |
+
if hasattr(x, "_mpf_"):
|
| 336 |
+
return x._mpf_ == fnan
|
| 337 |
+
if hasattr(x, "_mpc_"):
|
| 338 |
+
return fnan in x._mpc_
|
| 339 |
+
if isinstance(x, int_types) or isinstance(x, rational.mpq):
|
| 340 |
+
return False
|
| 341 |
+
x = ctx.convert(x)
|
| 342 |
+
if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'):
|
| 343 |
+
return ctx.isnan(x)
|
| 344 |
+
raise TypeError("isnan() needs a number as input")
|
| 345 |
+
|
| 346 |
+
def isfinite(ctx, x):
|
| 347 |
+
"""
|
| 348 |
+
Return *True* if *x* is a finite number, i.e. neither
|
| 349 |
+
an infinity or a NaN.
|
| 350 |
+
|
| 351 |
+
>>> from mpmath import *
|
| 352 |
+
>>> isfinite(inf)
|
| 353 |
+
False
|
| 354 |
+
>>> isfinite(-inf)
|
| 355 |
+
False
|
| 356 |
+
>>> isfinite(3)
|
| 357 |
+
True
|
| 358 |
+
>>> isfinite(nan)
|
| 359 |
+
False
|
| 360 |
+
>>> isfinite(3+4j)
|
| 361 |
+
True
|
| 362 |
+
>>> isfinite(mpc(3,inf))
|
| 363 |
+
False
|
| 364 |
+
>>> isfinite(mpc(nan,3))
|
| 365 |
+
False
|
| 366 |
+
|
| 367 |
+
"""
|
| 368 |
+
if ctx.isinf(x) or ctx.isnan(x):
|
| 369 |
+
return False
|
| 370 |
+
return True
|
| 371 |
+
|
| 372 |
+
def isnpint(ctx, x):
|
| 373 |
+
"""
|
| 374 |
+
Determine if *x* is a nonpositive integer.
|
| 375 |
+
"""
|
| 376 |
+
if not x:
|
| 377 |
+
return True
|
| 378 |
+
if hasattr(x, '_mpf_'):
|
| 379 |
+
sign, man, exp, bc = x._mpf_
|
| 380 |
+
return sign and exp >= 0
|
| 381 |
+
if hasattr(x, '_mpc_'):
|
| 382 |
+
return not x.imag and ctx.isnpint(x.real)
|
| 383 |
+
if type(x) in int_types:
|
| 384 |
+
return x <= 0
|
| 385 |
+
if isinstance(x, ctx.mpq):
|
| 386 |
+
p, q = x._mpq_
|
| 387 |
+
if not p:
|
| 388 |
+
return True
|
| 389 |
+
return q == 1 and p <= 0
|
| 390 |
+
return ctx.isnpint(ctx.convert(x))
|
| 391 |
+
|
| 392 |
+
def __str__(ctx):
|
| 393 |
+
lines = ["Mpmath settings:",
|
| 394 |
+
(" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]",
|
| 395 |
+
(" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]",
|
| 396 |
+
(" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]",
|
| 397 |
+
]
|
| 398 |
+
return "\n".join(lines)
|
| 399 |
+
|
| 400 |
+
@property
|
| 401 |
+
def _repr_digits(ctx):
|
| 402 |
+
return repr_dps(ctx._prec)
|
| 403 |
+
|
| 404 |
+
@property
|
| 405 |
+
def _str_digits(ctx):
|
| 406 |
+
return ctx._dps
|
| 407 |
+
|
| 408 |
+
def extraprec(ctx, n, normalize_output=False):
|
| 409 |
+
"""
|
| 410 |
+
The block
|
| 411 |
+
|
| 412 |
+
with extraprec(n):
|
| 413 |
+
<code>
|
| 414 |
+
|
| 415 |
+
increases the precision n bits, executes <code>, and then
|
| 416 |
+
restores the precision.
|
| 417 |
+
|
| 418 |
+
extraprec(n)(f) returns a decorated version of the function f
|
| 419 |
+
that increases the working precision by n bits before execution,
|
| 420 |
+
and restores the parent precision afterwards. With
|
| 421 |
+
normalize_output=True, it rounds the return value to the parent
|
| 422 |
+
precision.
|
| 423 |
+
"""
|
| 424 |
+
return PrecisionManager(ctx, lambda p: p + n, None, normalize_output)
|
| 425 |
+
|
| 426 |
+
def extradps(ctx, n, normalize_output=False):
|
| 427 |
+
"""
|
| 428 |
+
This function is analogous to extraprec (see documentation)
|
| 429 |
+
but changes the decimal precision instead of the number of bits.
|
| 430 |
+
"""
|
| 431 |
+
return PrecisionManager(ctx, None, lambda d: d + n, normalize_output)
|
| 432 |
+
|
| 433 |
+
def workprec(ctx, n, normalize_output=False):
|
| 434 |
+
"""
|
| 435 |
+
The block
|
| 436 |
+
|
| 437 |
+
with workprec(n):
|
| 438 |
+
<code>
|
| 439 |
+
|
| 440 |
+
sets the precision to n bits, executes <code>, and then restores
|
| 441 |
+
the precision.
|
| 442 |
+
|
| 443 |
+
workprec(n)(f) returns a decorated version of the function f
|
| 444 |
+
that sets the precision to n bits before execution,
|
| 445 |
+
and restores the precision afterwards. With normalize_output=True,
|
| 446 |
+
it rounds the return value to the parent precision.
|
| 447 |
+
"""
|
| 448 |
+
return PrecisionManager(ctx, lambda p: n, None, normalize_output)
|
| 449 |
+
|
| 450 |
+
def workdps(ctx, n, normalize_output=False):
|
| 451 |
+
"""
|
| 452 |
+
This function is analogous to workprec (see documentation)
|
| 453 |
+
but changes the decimal precision instead of the number of bits.
|
| 454 |
+
"""
|
| 455 |
+
return PrecisionManager(ctx, None, lambda d: n, normalize_output)
|
| 456 |
+
|
| 457 |
+
def autoprec(ctx, f, maxprec=None, catch=(), verbose=False):
|
| 458 |
+
r"""
|
| 459 |
+
Return a wrapped copy of *f* that repeatedly evaluates *f*
|
| 460 |
+
with increasing precision until the result converges to the
|
| 461 |
+
full precision used at the point of the call.
|
| 462 |
+
|
| 463 |
+
This heuristically protects against rounding errors, at the cost of
|
| 464 |
+
roughly a 2x slowdown compared to manually setting the optimal
|
| 465 |
+
precision. This method can, however, easily be fooled if the results
|
| 466 |
+
from *f* depend "discontinuously" on the precision, for instance
|
| 467 |
+
if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec`
|
| 468 |
+
should be used judiciously.
|
| 469 |
+
|
| 470 |
+
**Examples**
|
| 471 |
+
|
| 472 |
+
Many functions are sensitive to perturbations of the input arguments.
|
| 473 |
+
If the arguments are decimal numbers, they may have to be converted
|
| 474 |
+
to binary at a much higher precision. If the amount of required
|
| 475 |
+
extra precision is unknown, :func:`~mpmath.autoprec` is convenient::
|
| 476 |
+
|
| 477 |
+
>>> from mpmath import *
|
| 478 |
+
>>> mp.dps = 15
|
| 479 |
+
>>> mp.pretty = True
|
| 480 |
+
>>> besselj(5, 125 * 10**28) # Exact input
|
| 481 |
+
-8.03284785591801e-17
|
| 482 |
+
>>> besselj(5, '1.25e30') # Bad
|
| 483 |
+
7.12954868316652e-16
|
| 484 |
+
>>> autoprec(besselj)(5, '1.25e30') # Good
|
| 485 |
+
-8.03284785591801e-17
|
| 486 |
+
|
| 487 |
+
The following fails to converge because `\sin(\pi) = 0` whereas all
|
| 488 |
+
finite-precision approximations of `\pi` give nonzero values::
|
| 489 |
+
|
| 490 |
+
>>> autoprec(sin)(pi) # doctest: +IGNORE_EXCEPTION_DETAIL
|
| 491 |
+
Traceback (most recent call last):
|
| 492 |
+
...
|
| 493 |
+
NoConvergence: autoprec: prec increased to 2910 without convergence
|
| 494 |
+
|
| 495 |
+
As the following example shows, :func:`~mpmath.autoprec` can protect against
|
| 496 |
+
cancellation, but is fooled by too severe cancellation::
|
| 497 |
+
|
| 498 |
+
>>> x = 1e-10
|
| 499 |
+
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
|
| 500 |
+
1.00000008274037e-10
|
| 501 |
+
1.00000000005e-10
|
| 502 |
+
1.00000000005e-10
|
| 503 |
+
>>> x = 1e-50
|
| 504 |
+
>>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x)
|
| 505 |
+
0.0
|
| 506 |
+
1.0e-50
|
| 507 |
+
0.0
|
| 508 |
+
|
| 509 |
+
With *catch*, an exception or list of exceptions to intercept
|
| 510 |
+
may be specified. The raised exception is interpreted
|
| 511 |
+
as signaling insufficient precision. This permits, for example,
|
| 512 |
+
evaluating a function where a too low precision results in a
|
| 513 |
+
division by zero::
|
| 514 |
+
|
| 515 |
+
>>> f = lambda x: 1/(exp(x)-1)
|
| 516 |
+
>>> f(1e-30)
|
| 517 |
+
Traceback (most recent call last):
|
| 518 |
+
...
|
| 519 |
+
ZeroDivisionError
|
| 520 |
+
>>> autoprec(f, catch=ZeroDivisionError)(1e-30)
|
| 521 |
+
1.0e+30
|
| 522 |
+
|
| 523 |
+
|
| 524 |
+
"""
|
| 525 |
+
def f_autoprec_wrapped(*args, **kwargs):
|
| 526 |
+
prec = ctx.prec
|
| 527 |
+
if maxprec is None:
|
| 528 |
+
maxprec2 = ctx._default_hyper_maxprec(prec)
|
| 529 |
+
else:
|
| 530 |
+
maxprec2 = maxprec
|
| 531 |
+
try:
|
| 532 |
+
ctx.prec = prec + 10
|
| 533 |
+
try:
|
| 534 |
+
v1 = f(*args, **kwargs)
|
| 535 |
+
except catch:
|
| 536 |
+
v1 = ctx.nan
|
| 537 |
+
prec2 = prec + 20
|
| 538 |
+
while 1:
|
| 539 |
+
ctx.prec = prec2
|
| 540 |
+
try:
|
| 541 |
+
v2 = f(*args, **kwargs)
|
| 542 |
+
except catch:
|
| 543 |
+
v2 = ctx.nan
|
| 544 |
+
if v1 == v2:
|
| 545 |
+
break
|
| 546 |
+
err = ctx.mag(v2-v1) - ctx.mag(v2)
|
| 547 |
+
if err < (-prec):
|
| 548 |
+
break
|
| 549 |
+
if verbose:
|
| 550 |
+
print("autoprec: target=%s, prec=%s, accuracy=%s" \
|
| 551 |
+
% (prec, prec2, -err))
|
| 552 |
+
v1 = v2
|
| 553 |
+
if prec2 >= maxprec2:
|
| 554 |
+
raise ctx.NoConvergence(\
|
| 555 |
+
"autoprec: prec increased to %i without convergence"\
|
| 556 |
+
% prec2)
|
| 557 |
+
prec2 += int(prec2*2)
|
| 558 |
+
prec2 = min(prec2, maxprec2)
|
| 559 |
+
finally:
|
| 560 |
+
ctx.prec = prec
|
| 561 |
+
return +v2
|
| 562 |
+
return f_autoprec_wrapped
|
| 563 |
+
|
| 564 |
+
def nstr(ctx, x, n=6, **kwargs):
|
| 565 |
+
"""
|
| 566 |
+
Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n*
|
| 567 |
+
significant digits. The small default value for *n* is chosen to
|
| 568 |
+
make this function useful for printing collections of numbers
|
| 569 |
+
(lists, matrices, etc).
|
| 570 |
+
|
| 571 |
+
If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively
|
| 572 |
+
to each element. For unrecognized classes, :func:`~mpmath.nstr`
|
| 573 |
+
simply returns ``str(x)``.
|
| 574 |
+
|
| 575 |
+
The companion function :func:`~mpmath.nprint` prints the result
|
| 576 |
+
instead of returning it.
|
| 577 |
+
|
| 578 |
+
The keyword arguments *strip_zeros*, *min_fixed*, *max_fixed*
|
| 579 |
+
and *show_zero_exponent* are forwarded to :func:`~mpmath.libmp.to_str`.
|
| 580 |
+
|
| 581 |
+
The number will be printed in fixed-point format if the position
|
| 582 |
+
of the leading digit is strictly between min_fixed
|
| 583 |
+
(default = min(-dps/3,-5)) and max_fixed (default = dps).
|
| 584 |
+
|
| 585 |
+
To force fixed-point format always, set min_fixed = -inf,
|
| 586 |
+
max_fixed = +inf. To force floating-point format, set
|
| 587 |
+
min_fixed >= max_fixed.
|
| 588 |
+
|
| 589 |
+
>>> from mpmath import *
|
| 590 |
+
>>> nstr([+pi, ldexp(1,-500)])
|
| 591 |
+
'[3.14159, 3.05494e-151]'
|
| 592 |
+
>>> nprint([+pi, ldexp(1,-500)])
|
| 593 |
+
[3.14159, 3.05494e-151]
|
| 594 |
+
>>> nstr(mpf("5e-10"), 5)
|
| 595 |
+
'5.0e-10'
|
| 596 |
+
>>> nstr(mpf("5e-10"), 5, strip_zeros=False)
|
| 597 |
+
'5.0000e-10'
|
| 598 |
+
>>> nstr(mpf("5e-10"), 5, strip_zeros=False, min_fixed=-11)
|
| 599 |
+
'0.00000000050000'
|
| 600 |
+
>>> nstr(mpf(0), 5, show_zero_exponent=True)
|
| 601 |
+
'0.0e+0'
|
| 602 |
+
|
| 603 |
+
"""
|
| 604 |
+
if isinstance(x, list):
|
| 605 |
+
return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
|
| 606 |
+
if isinstance(x, tuple):
|
| 607 |
+
return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x))
|
| 608 |
+
if hasattr(x, '_mpf_'):
|
| 609 |
+
return to_str(x._mpf_, n, **kwargs)
|
| 610 |
+
if hasattr(x, '_mpc_'):
|
| 611 |
+
return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")"
|
| 612 |
+
if isinstance(x, basestring):
|
| 613 |
+
return repr(x)
|
| 614 |
+
if isinstance(x, ctx.matrix):
|
| 615 |
+
return x.__nstr__(n, **kwargs)
|
| 616 |
+
return str(x)
|
| 617 |
+
|
| 618 |
+
def _convert_fallback(ctx, x, strings):
|
| 619 |
+
if strings and isinstance(x, basestring):
|
| 620 |
+
if 'j' in x.lower():
|
| 621 |
+
x = x.lower().replace(' ', '')
|
| 622 |
+
match = get_complex.match(x)
|
| 623 |
+
re = match.group('re')
|
| 624 |
+
if not re:
|
| 625 |
+
re = 0
|
| 626 |
+
im = match.group('im').rstrip('j')
|
| 627 |
+
return ctx.mpc(ctx.convert(re), ctx.convert(im))
|
| 628 |
+
if hasattr(x, "_mpi_"):
|
| 629 |
+
a, b = x._mpi_
|
| 630 |
+
if a == b:
|
| 631 |
+
return ctx.make_mpf(a)
|
| 632 |
+
else:
|
| 633 |
+
raise ValueError("can only create mpf from zero-width interval")
|
| 634 |
+
raise TypeError("cannot create mpf from " + repr(x))
|
| 635 |
+
|
| 636 |
+
def mpmathify(ctx, *args, **kwargs):
|
| 637 |
+
return ctx.convert(*args, **kwargs)
|
| 638 |
+
|
| 639 |
+
def _parse_prec(ctx, kwargs):
|
| 640 |
+
if kwargs:
|
| 641 |
+
if kwargs.get('exact'):
|
| 642 |
+
return 0, 'f'
|
| 643 |
+
prec, rounding = ctx._prec_rounding
|
| 644 |
+
if 'rounding' in kwargs:
|
| 645 |
+
rounding = kwargs['rounding']
|
| 646 |
+
if 'prec' in kwargs:
|
| 647 |
+
prec = kwargs['prec']
|
| 648 |
+
if prec == ctx.inf:
|
| 649 |
+
return 0, 'f'
|
| 650 |
+
else:
|
| 651 |
+
prec = int(prec)
|
| 652 |
+
elif 'dps' in kwargs:
|
| 653 |
+
dps = kwargs['dps']
|
| 654 |
+
if dps == ctx.inf:
|
| 655 |
+
return 0, 'f'
|
| 656 |
+
prec = dps_to_prec(dps)
|
| 657 |
+
return prec, rounding
|
| 658 |
+
return ctx._prec_rounding
|
| 659 |
+
|
| 660 |
+
_exact_overflow_msg = "the exact result does not fit in memory"
|
| 661 |
+
|
| 662 |
+
_hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy
|
| 663 |
+
using a working precision of %i bits. Try with a higher maxprec,
|
| 664 |
+
maxterms, or set zeroprec."""
|
| 665 |
+
|
| 666 |
+
def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs):
|
| 667 |
+
if hasattr(z, "_mpf_"):
|
| 668 |
+
key = p, q, flags, 'R'
|
| 669 |
+
v = z._mpf_
|
| 670 |
+
elif hasattr(z, "_mpc_"):
|
| 671 |
+
key = p, q, flags, 'C'
|
| 672 |
+
v = z._mpc_
|
| 673 |
+
if key not in ctx.hyp_summators:
|
| 674 |
+
ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1]
|
| 675 |
+
summator = ctx.hyp_summators[key]
|
| 676 |
+
prec = ctx.prec
|
| 677 |
+
maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec))
|
| 678 |
+
extraprec = 50
|
| 679 |
+
epsshift = 25
|
| 680 |
+
# Jumps in magnitude occur when parameters are close to negative
|
| 681 |
+
# integers. We must ensure that these terms are included in
|
| 682 |
+
# the sum and added accurately
|
| 683 |
+
magnitude_check = {}
|
| 684 |
+
max_total_jump = 0
|
| 685 |
+
for i, c in enumerate(coeffs):
|
| 686 |
+
if flags[i] == 'Z':
|
| 687 |
+
if i >= p and c <= 0:
|
| 688 |
+
ok = False
|
| 689 |
+
for ii, cc in enumerate(coeffs[:p]):
|
| 690 |
+
# Note: c <= cc or c < cc, depending on convention
|
| 691 |
+
if flags[ii] == 'Z' and cc <= 0 and c <= cc:
|
| 692 |
+
ok = True
|
| 693 |
+
if not ok:
|
| 694 |
+
raise ZeroDivisionError("pole in hypergeometric series")
|
| 695 |
+
continue
|
| 696 |
+
n, d = ctx.nint_distance(c)
|
| 697 |
+
n = -int(n)
|
| 698 |
+
d = -d
|
| 699 |
+
if i >= p and n >= 0 and d > 4:
|
| 700 |
+
if n in magnitude_check:
|
| 701 |
+
magnitude_check[n] += d
|
| 702 |
+
else:
|
| 703 |
+
magnitude_check[n] = d
|
| 704 |
+
extraprec = max(extraprec, d - prec + 60)
|
| 705 |
+
max_total_jump += abs(d)
|
| 706 |
+
while 1:
|
| 707 |
+
if extraprec > maxprec:
|
| 708 |
+
raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec))
|
| 709 |
+
wp = prec + extraprec
|
| 710 |
+
if magnitude_check:
|
| 711 |
+
mag_dict = dict((n,None) for n in magnitude_check)
|
| 712 |
+
else:
|
| 713 |
+
mag_dict = {}
|
| 714 |
+
zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \
|
| 715 |
+
epsshift, mag_dict, **kwargs)
|
| 716 |
+
cancel = -magnitude
|
| 717 |
+
jumps_resolved = True
|
| 718 |
+
if extraprec < max_total_jump:
|
| 719 |
+
for n in mag_dict.values():
|
| 720 |
+
if (n is None) or (n < prec):
|
| 721 |
+
jumps_resolved = False
|
| 722 |
+
break
|
| 723 |
+
accurate = (cancel < extraprec-25-5 or not accurate_small)
|
| 724 |
+
if jumps_resolved:
|
| 725 |
+
if accurate:
|
| 726 |
+
break
|
| 727 |
+
# zero?
|
| 728 |
+
zeroprec = kwargs.get('zeroprec')
|
| 729 |
+
if zeroprec is not None:
|
| 730 |
+
if cancel > zeroprec:
|
| 731 |
+
if have_complex:
|
| 732 |
+
return ctx.mpc(0)
|
| 733 |
+
else:
|
| 734 |
+
return ctx.zero
|
| 735 |
+
|
| 736 |
+
# Some near-singularities were not included, so increase
|
| 737 |
+
# precision and repeat until they are
|
| 738 |
+
extraprec *= 2
|
| 739 |
+
# Possible workaround for bad roundoff in fixed-point arithmetic
|
| 740 |
+
epsshift += 5
|
| 741 |
+
extraprec += 5
|
| 742 |
+
|
| 743 |
+
if type(zv) is tuple:
|
| 744 |
+
if have_complex:
|
| 745 |
+
return ctx.make_mpc(zv)
|
| 746 |
+
else:
|
| 747 |
+
return ctx.make_mpf(zv)
|
| 748 |
+
else:
|
| 749 |
+
return zv
|
| 750 |
+
|
| 751 |
+
def ldexp(ctx, x, n):
|
| 752 |
+
r"""
|
| 753 |
+
Computes `x 2^n` efficiently. No rounding is performed.
|
| 754 |
+
The argument `x` must be a real floating-point number (or
|
| 755 |
+
possible to convert into one) and `n` must be a Python ``int``.
|
| 756 |
+
|
| 757 |
+
>>> from mpmath import *
|
| 758 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 759 |
+
>>> ldexp(1, 10)
|
| 760 |
+
mpf('1024.0')
|
| 761 |
+
>>> ldexp(1, -3)
|
| 762 |
+
mpf('0.125')
|
| 763 |
+
|
| 764 |
+
"""
|
| 765 |
+
x = ctx.convert(x)
|
| 766 |
+
return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n))
|
| 767 |
+
|
| 768 |
+
def frexp(ctx, x):
|
| 769 |
+
r"""
|
| 770 |
+
Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`,
|
| 771 |
+
`n` a Python integer, and such that `x = y 2^n`. No rounding is
|
| 772 |
+
performed.
|
| 773 |
+
|
| 774 |
+
>>> from mpmath import *
|
| 775 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 776 |
+
>>> frexp(7.5)
|
| 777 |
+
(mpf('0.9375'), 3)
|
| 778 |
+
|
| 779 |
+
"""
|
| 780 |
+
x = ctx.convert(x)
|
| 781 |
+
y, n = libmp.mpf_frexp(x._mpf_)
|
| 782 |
+
return ctx.make_mpf(y), n
|
| 783 |
+
|
| 784 |
+
def fneg(ctx, x, **kwargs):
|
| 785 |
+
"""
|
| 786 |
+
Negates the number *x*, giving a floating-point result, optionally
|
| 787 |
+
using a custom precision and rounding mode.
|
| 788 |
+
|
| 789 |
+
See the documentation of :func:`~mpmath.fadd` for a detailed description
|
| 790 |
+
of how to specify precision and rounding.
|
| 791 |
+
|
| 792 |
+
**Examples**
|
| 793 |
+
|
| 794 |
+
An mpmath number is returned::
|
| 795 |
+
|
| 796 |
+
>>> from mpmath import *
|
| 797 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 798 |
+
>>> fneg(2.5)
|
| 799 |
+
mpf('-2.5')
|
| 800 |
+
>>> fneg(-5+2j)
|
| 801 |
+
mpc(real='5.0', imag='-2.0')
|
| 802 |
+
|
| 803 |
+
Precise control over rounding is possible::
|
| 804 |
+
|
| 805 |
+
>>> x = fadd(2, 1e-100, exact=True)
|
| 806 |
+
>>> fneg(x)
|
| 807 |
+
mpf('-2.0')
|
| 808 |
+
>>> fneg(x, rounding='f')
|
| 809 |
+
mpf('-2.0000000000000004')
|
| 810 |
+
|
| 811 |
+
Negating with and without roundoff::
|
| 812 |
+
|
| 813 |
+
>>> n = 200000000000000000000001
|
| 814 |
+
>>> print(int(-mpf(n)))
|
| 815 |
+
-200000000000000016777216
|
| 816 |
+
>>> print(int(fneg(n)))
|
| 817 |
+
-200000000000000016777216
|
| 818 |
+
>>> print(int(fneg(n, prec=log(n,2)+1)))
|
| 819 |
+
-200000000000000000000001
|
| 820 |
+
>>> print(int(fneg(n, dps=log(n,10)+1)))
|
| 821 |
+
-200000000000000000000001
|
| 822 |
+
>>> print(int(fneg(n, prec=inf)))
|
| 823 |
+
-200000000000000000000001
|
| 824 |
+
>>> print(int(fneg(n, dps=inf)))
|
| 825 |
+
-200000000000000000000001
|
| 826 |
+
>>> print(int(fneg(n, exact=True)))
|
| 827 |
+
-200000000000000000000001
|
| 828 |
+
|
| 829 |
+
"""
|
| 830 |
+
prec, rounding = ctx._parse_prec(kwargs)
|
| 831 |
+
x = ctx.convert(x)
|
| 832 |
+
if hasattr(x, '_mpf_'):
|
| 833 |
+
return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding))
|
| 834 |
+
if hasattr(x, '_mpc_'):
|
| 835 |
+
return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding))
|
| 836 |
+
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
|
| 837 |
+
|
| 838 |
+
def fadd(ctx, x, y, **kwargs):
|
| 839 |
+
"""
|
| 840 |
+
Adds the numbers *x* and *y*, giving a floating-point result,
|
| 841 |
+
optionally using a custom precision and rounding mode.
|
| 842 |
+
|
| 843 |
+
The default precision is the working precision of the context.
|
| 844 |
+
You can specify a custom precision in bits by passing the *prec* keyword
|
| 845 |
+
argument, or by providing an equivalent decimal precision with the *dps*
|
| 846 |
+
keyword argument. If the precision is set to ``+inf``, or if the flag
|
| 847 |
+
*exact=True* is passed, an exact addition with no rounding is performed.
|
| 848 |
+
|
| 849 |
+
When the precision is finite, the optional *rounding* keyword argument
|
| 850 |
+
specifies the direction of rounding. Valid options are ``'n'`` for
|
| 851 |
+
nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'``
|
| 852 |
+
for down, ``'u'`` for up.
|
| 853 |
+
|
| 854 |
+
**Examples**
|
| 855 |
+
|
| 856 |
+
Using :func:`~mpmath.fadd` with precision and rounding control::
|
| 857 |
+
|
| 858 |
+
>>> from mpmath import *
|
| 859 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 860 |
+
>>> fadd(2, 1e-20)
|
| 861 |
+
mpf('2.0')
|
| 862 |
+
>>> fadd(2, 1e-20, rounding='u')
|
| 863 |
+
mpf('2.0000000000000004')
|
| 864 |
+
>>> nprint(fadd(2, 1e-20, prec=100), 25)
|
| 865 |
+
2.00000000000000000001
|
| 866 |
+
>>> nprint(fadd(2, 1e-20, dps=15), 25)
|
| 867 |
+
2.0
|
| 868 |
+
>>> nprint(fadd(2, 1e-20, dps=25), 25)
|
| 869 |
+
2.00000000000000000001
|
| 870 |
+
>>> nprint(fadd(2, 1e-20, exact=True), 25)
|
| 871 |
+
2.00000000000000000001
|
| 872 |
+
|
| 873 |
+
Exact addition avoids cancellation errors, enforcing familiar laws
|
| 874 |
+
of numbers such as `x+y-x = y`, which don't hold in floating-point
|
| 875 |
+
arithmetic with finite precision::
|
| 876 |
+
|
| 877 |
+
>>> x, y = mpf(2), mpf('1e-1000')
|
| 878 |
+
>>> print(x + y - x)
|
| 879 |
+
0.0
|
| 880 |
+
>>> print(fadd(x, y, prec=inf) - x)
|
| 881 |
+
1.0e-1000
|
| 882 |
+
>>> print(fadd(x, y, exact=True) - x)
|
| 883 |
+
1.0e-1000
|
| 884 |
+
|
| 885 |
+
Exact addition can be inefficient and may be impossible to perform
|
| 886 |
+
with large magnitude differences::
|
| 887 |
+
|
| 888 |
+
>>> fadd(1, '1e-100000000000000000000', prec=inf)
|
| 889 |
+
Traceback (most recent call last):
|
| 890 |
+
...
|
| 891 |
+
OverflowError: the exact result does not fit in memory
|
| 892 |
+
|
| 893 |
+
"""
|
| 894 |
+
prec, rounding = ctx._parse_prec(kwargs)
|
| 895 |
+
x = ctx.convert(x)
|
| 896 |
+
y = ctx.convert(y)
|
| 897 |
+
try:
|
| 898 |
+
if hasattr(x, '_mpf_'):
|
| 899 |
+
if hasattr(y, '_mpf_'):
|
| 900 |
+
return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding))
|
| 901 |
+
if hasattr(y, '_mpc_'):
|
| 902 |
+
return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding))
|
| 903 |
+
if hasattr(x, '_mpc_'):
|
| 904 |
+
if hasattr(y, '_mpf_'):
|
| 905 |
+
return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding))
|
| 906 |
+
if hasattr(y, '_mpc_'):
|
| 907 |
+
return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding))
|
| 908 |
+
except (ValueError, OverflowError):
|
| 909 |
+
raise OverflowError(ctx._exact_overflow_msg)
|
| 910 |
+
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
|
| 911 |
+
|
| 912 |
+
def fsub(ctx, x, y, **kwargs):
|
| 913 |
+
"""
|
| 914 |
+
Subtracts the numbers *x* and *y*, giving a floating-point result,
|
| 915 |
+
optionally using a custom precision and rounding mode.
|
| 916 |
+
|
| 917 |
+
See the documentation of :func:`~mpmath.fadd` for a detailed description
|
| 918 |
+
of how to specify precision and rounding.
|
| 919 |
+
|
| 920 |
+
**Examples**
|
| 921 |
+
|
| 922 |
+
Using :func:`~mpmath.fsub` with precision and rounding control::
|
| 923 |
+
|
| 924 |
+
>>> from mpmath import *
|
| 925 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 926 |
+
>>> fsub(2, 1e-20)
|
| 927 |
+
mpf('2.0')
|
| 928 |
+
>>> fsub(2, 1e-20, rounding='d')
|
| 929 |
+
mpf('1.9999999999999998')
|
| 930 |
+
>>> nprint(fsub(2, 1e-20, prec=100), 25)
|
| 931 |
+
1.99999999999999999999
|
| 932 |
+
>>> nprint(fsub(2, 1e-20, dps=15), 25)
|
| 933 |
+
2.0
|
| 934 |
+
>>> nprint(fsub(2, 1e-20, dps=25), 25)
|
| 935 |
+
1.99999999999999999999
|
| 936 |
+
>>> nprint(fsub(2, 1e-20, exact=True), 25)
|
| 937 |
+
1.99999999999999999999
|
| 938 |
+
|
| 939 |
+
Exact subtraction avoids cancellation errors, enforcing familiar laws
|
| 940 |
+
of numbers such as `x-y+y = x`, which don't hold in floating-point
|
| 941 |
+
arithmetic with finite precision::
|
| 942 |
+
|
| 943 |
+
>>> x, y = mpf(2), mpf('1e1000')
|
| 944 |
+
>>> print(x - y + y)
|
| 945 |
+
0.0
|
| 946 |
+
>>> print(fsub(x, y, prec=inf) + y)
|
| 947 |
+
2.0
|
| 948 |
+
>>> print(fsub(x, y, exact=True) + y)
|
| 949 |
+
2.0
|
| 950 |
+
|
| 951 |
+
Exact addition can be inefficient and may be impossible to perform
|
| 952 |
+
with large magnitude differences::
|
| 953 |
+
|
| 954 |
+
>>> fsub(1, '1e-100000000000000000000', prec=inf)
|
| 955 |
+
Traceback (most recent call last):
|
| 956 |
+
...
|
| 957 |
+
OverflowError: the exact result does not fit in memory
|
| 958 |
+
|
| 959 |
+
"""
|
| 960 |
+
prec, rounding = ctx._parse_prec(kwargs)
|
| 961 |
+
x = ctx.convert(x)
|
| 962 |
+
y = ctx.convert(y)
|
| 963 |
+
try:
|
| 964 |
+
if hasattr(x, '_mpf_'):
|
| 965 |
+
if hasattr(y, '_mpf_'):
|
| 966 |
+
return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding))
|
| 967 |
+
if hasattr(y, '_mpc_'):
|
| 968 |
+
return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding))
|
| 969 |
+
if hasattr(x, '_mpc_'):
|
| 970 |
+
if hasattr(y, '_mpf_'):
|
| 971 |
+
return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding))
|
| 972 |
+
if hasattr(y, '_mpc_'):
|
| 973 |
+
return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding))
|
| 974 |
+
except (ValueError, OverflowError):
|
| 975 |
+
raise OverflowError(ctx._exact_overflow_msg)
|
| 976 |
+
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
|
| 977 |
+
|
| 978 |
+
def fmul(ctx, x, y, **kwargs):
|
| 979 |
+
"""
|
| 980 |
+
Multiplies the numbers *x* and *y*, giving a floating-point result,
|
| 981 |
+
optionally using a custom precision and rounding mode.
|
| 982 |
+
|
| 983 |
+
See the documentation of :func:`~mpmath.fadd` for a detailed description
|
| 984 |
+
of how to specify precision and rounding.
|
| 985 |
+
|
| 986 |
+
**Examples**
|
| 987 |
+
|
| 988 |
+
The result is an mpmath number::
|
| 989 |
+
|
| 990 |
+
>>> from mpmath import *
|
| 991 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 992 |
+
>>> fmul(2, 5.0)
|
| 993 |
+
mpf('10.0')
|
| 994 |
+
>>> fmul(0.5j, 0.5)
|
| 995 |
+
mpc(real='0.0', imag='0.25')
|
| 996 |
+
|
| 997 |
+
Avoiding roundoff::
|
| 998 |
+
|
| 999 |
+
>>> x, y = 10**10+1, 10**15+1
|
| 1000 |
+
>>> print(x*y)
|
| 1001 |
+
10000000001000010000000001
|
| 1002 |
+
>>> print(mpf(x) * mpf(y))
|
| 1003 |
+
1.0000000001e+25
|
| 1004 |
+
>>> print(int(mpf(x) * mpf(y)))
|
| 1005 |
+
10000000001000011026399232
|
| 1006 |
+
>>> print(int(fmul(x, y)))
|
| 1007 |
+
10000000001000011026399232
|
| 1008 |
+
>>> print(int(fmul(x, y, dps=25)))
|
| 1009 |
+
10000000001000010000000001
|
| 1010 |
+
>>> print(int(fmul(x, y, exact=True)))
|
| 1011 |
+
10000000001000010000000001
|
| 1012 |
+
|
| 1013 |
+
Exact multiplication with complex numbers can be inefficient and may
|
| 1014 |
+
be impossible to perform with large magnitude differences between
|
| 1015 |
+
real and imaginary parts::
|
| 1016 |
+
|
| 1017 |
+
>>> x = 1+2j
|
| 1018 |
+
>>> y = mpc(2, '1e-100000000000000000000')
|
| 1019 |
+
>>> fmul(x, y)
|
| 1020 |
+
mpc(real='2.0', imag='4.0')
|
| 1021 |
+
>>> fmul(x, y, rounding='u')
|
| 1022 |
+
mpc(real='2.0', imag='4.0000000000000009')
|
| 1023 |
+
>>> fmul(x, y, exact=True)
|
| 1024 |
+
Traceback (most recent call last):
|
| 1025 |
+
...
|
| 1026 |
+
OverflowError: the exact result does not fit in memory
|
| 1027 |
+
|
| 1028 |
+
"""
|
| 1029 |
+
prec, rounding = ctx._parse_prec(kwargs)
|
| 1030 |
+
x = ctx.convert(x)
|
| 1031 |
+
y = ctx.convert(y)
|
| 1032 |
+
try:
|
| 1033 |
+
if hasattr(x, '_mpf_'):
|
| 1034 |
+
if hasattr(y, '_mpf_'):
|
| 1035 |
+
return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding))
|
| 1036 |
+
if hasattr(y, '_mpc_'):
|
| 1037 |
+
return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding))
|
| 1038 |
+
if hasattr(x, '_mpc_'):
|
| 1039 |
+
if hasattr(y, '_mpf_'):
|
| 1040 |
+
return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding))
|
| 1041 |
+
if hasattr(y, '_mpc_'):
|
| 1042 |
+
return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding))
|
| 1043 |
+
except (ValueError, OverflowError):
|
| 1044 |
+
raise OverflowError(ctx._exact_overflow_msg)
|
| 1045 |
+
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
|
| 1046 |
+
|
| 1047 |
+
def fdiv(ctx, x, y, **kwargs):
|
| 1048 |
+
"""
|
| 1049 |
+
Divides the numbers *x* and *y*, giving a floating-point result,
|
| 1050 |
+
optionally using a custom precision and rounding mode.
|
| 1051 |
+
|
| 1052 |
+
See the documentation of :func:`~mpmath.fadd` for a detailed description
|
| 1053 |
+
of how to specify precision and rounding.
|
| 1054 |
+
|
| 1055 |
+
**Examples**
|
| 1056 |
+
|
| 1057 |
+
The result is an mpmath number::
|
| 1058 |
+
|
| 1059 |
+
>>> from mpmath import *
|
| 1060 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 1061 |
+
>>> fdiv(3, 2)
|
| 1062 |
+
mpf('1.5')
|
| 1063 |
+
>>> fdiv(2, 3)
|
| 1064 |
+
mpf('0.66666666666666663')
|
| 1065 |
+
>>> fdiv(2+4j, 0.5)
|
| 1066 |
+
mpc(real='4.0', imag='8.0')
|
| 1067 |
+
|
| 1068 |
+
The rounding direction and precision can be controlled::
|
| 1069 |
+
|
| 1070 |
+
>>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits
|
| 1071 |
+
mpf('0.6666259765625')
|
| 1072 |
+
>>> fdiv(2, 3, rounding='d')
|
| 1073 |
+
mpf('0.66666666666666663')
|
| 1074 |
+
>>> fdiv(2, 3, prec=60)
|
| 1075 |
+
mpf('0.66666666666666667')
|
| 1076 |
+
>>> fdiv(2, 3, rounding='u')
|
| 1077 |
+
mpf('0.66666666666666674')
|
| 1078 |
+
|
| 1079 |
+
Checking the error of a division by performing it at higher precision::
|
| 1080 |
+
|
| 1081 |
+
>>> fdiv(2, 3) - fdiv(2, 3, prec=100)
|
| 1082 |
+
mpf('-3.7007434154172148e-17')
|
| 1083 |
+
|
| 1084 |
+
Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not
|
| 1085 |
+
allowed since the quotient of two floating-point numbers generally
|
| 1086 |
+
does not have an exact floating-point representation. (In the
|
| 1087 |
+
future this might be changed to allow the case where the division
|
| 1088 |
+
is actually exact.)
|
| 1089 |
+
|
| 1090 |
+
>>> fdiv(2, 3, exact=True)
|
| 1091 |
+
Traceback (most recent call last):
|
| 1092 |
+
...
|
| 1093 |
+
ValueError: division is not an exact operation
|
| 1094 |
+
|
| 1095 |
+
"""
|
| 1096 |
+
prec, rounding = ctx._parse_prec(kwargs)
|
| 1097 |
+
if not prec:
|
| 1098 |
+
raise ValueError("division is not an exact operation")
|
| 1099 |
+
x = ctx.convert(x)
|
| 1100 |
+
y = ctx.convert(y)
|
| 1101 |
+
if hasattr(x, '_mpf_'):
|
| 1102 |
+
if hasattr(y, '_mpf_'):
|
| 1103 |
+
return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding))
|
| 1104 |
+
if hasattr(y, '_mpc_'):
|
| 1105 |
+
return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding))
|
| 1106 |
+
if hasattr(x, '_mpc_'):
|
| 1107 |
+
if hasattr(y, '_mpf_'):
|
| 1108 |
+
return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding))
|
| 1109 |
+
if hasattr(y, '_mpc_'):
|
| 1110 |
+
return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding))
|
| 1111 |
+
raise ValueError("Arguments need to be mpf or mpc compatible numbers")
|
| 1112 |
+
|
| 1113 |
+
def nint_distance(ctx, x):
|
| 1114 |
+
r"""
|
| 1115 |
+
Return `(n,d)` where `n` is the nearest integer to `x` and `d` is
|
| 1116 |
+
an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision
|
| 1117 |
+
(measured in bits) lost to cancellation when computing `x-n`.
|
| 1118 |
+
|
| 1119 |
+
>>> from mpmath import *
|
| 1120 |
+
>>> n, d = nint_distance(5)
|
| 1121 |
+
>>> print(n); print(d)
|
| 1122 |
+
5
|
| 1123 |
+
-inf
|
| 1124 |
+
>>> n, d = nint_distance(mpf(5))
|
| 1125 |
+
>>> print(n); print(d)
|
| 1126 |
+
5
|
| 1127 |
+
-inf
|
| 1128 |
+
>>> n, d = nint_distance(mpf(5.00000001))
|
| 1129 |
+
>>> print(n); print(d)
|
| 1130 |
+
5
|
| 1131 |
+
-26
|
| 1132 |
+
>>> n, d = nint_distance(mpf(4.99999999))
|
| 1133 |
+
>>> print(n); print(d)
|
| 1134 |
+
5
|
| 1135 |
+
-26
|
| 1136 |
+
>>> n, d = nint_distance(mpc(5,10))
|
| 1137 |
+
>>> print(n); print(d)
|
| 1138 |
+
5
|
| 1139 |
+
4
|
| 1140 |
+
>>> n, d = nint_distance(mpc(5,0.000001))
|
| 1141 |
+
>>> print(n); print(d)
|
| 1142 |
+
5
|
| 1143 |
+
-19
|
| 1144 |
+
|
| 1145 |
+
"""
|
| 1146 |
+
typx = type(x)
|
| 1147 |
+
if typx in int_types:
|
| 1148 |
+
return int(x), ctx.ninf
|
| 1149 |
+
elif typx is rational.mpq:
|
| 1150 |
+
p, q = x._mpq_
|
| 1151 |
+
n, r = divmod(p, q)
|
| 1152 |
+
if 2*r >= q:
|
| 1153 |
+
n += 1
|
| 1154 |
+
elif not r:
|
| 1155 |
+
return n, ctx.ninf
|
| 1156 |
+
# log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q)
|
| 1157 |
+
d = bitcount(abs(p-n*q)) - bitcount(q)
|
| 1158 |
+
return n, d
|
| 1159 |
+
if hasattr(x, "_mpf_"):
|
| 1160 |
+
re = x._mpf_
|
| 1161 |
+
im_dist = ctx.ninf
|
| 1162 |
+
elif hasattr(x, "_mpc_"):
|
| 1163 |
+
re, im = x._mpc_
|
| 1164 |
+
isign, iman, iexp, ibc = im
|
| 1165 |
+
if iman:
|
| 1166 |
+
im_dist = iexp + ibc
|
| 1167 |
+
elif im == fzero:
|
| 1168 |
+
im_dist = ctx.ninf
|
| 1169 |
+
else:
|
| 1170 |
+
raise ValueError("requires a finite number")
|
| 1171 |
+
else:
|
| 1172 |
+
x = ctx.convert(x)
|
| 1173 |
+
if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"):
|
| 1174 |
+
return ctx.nint_distance(x)
|
| 1175 |
+
else:
|
| 1176 |
+
raise TypeError("requires an mpf/mpc")
|
| 1177 |
+
sign, man, exp, bc = re
|
| 1178 |
+
mag = exp+bc
|
| 1179 |
+
# |x| < 0.5
|
| 1180 |
+
if mag < 0:
|
| 1181 |
+
n = 0
|
| 1182 |
+
re_dist = mag
|
| 1183 |
+
elif man:
|
| 1184 |
+
# exact integer
|
| 1185 |
+
if exp >= 0:
|
| 1186 |
+
n = man << exp
|
| 1187 |
+
re_dist = ctx.ninf
|
| 1188 |
+
# exact half-integer
|
| 1189 |
+
elif exp == -1:
|
| 1190 |
+
n = (man>>1)+1
|
| 1191 |
+
re_dist = 0
|
| 1192 |
+
else:
|
| 1193 |
+
d = (-exp-1)
|
| 1194 |
+
t = man >> d
|
| 1195 |
+
if t & 1:
|
| 1196 |
+
t += 1
|
| 1197 |
+
man = (t<<d) - man
|
| 1198 |
+
else:
|
| 1199 |
+
man -= (t<<d)
|
| 1200 |
+
n = t>>1 # int(t)>>1
|
| 1201 |
+
re_dist = exp+bitcount(man)
|
| 1202 |
+
if sign:
|
| 1203 |
+
n = -n
|
| 1204 |
+
elif re == fzero:
|
| 1205 |
+
re_dist = ctx.ninf
|
| 1206 |
+
n = 0
|
| 1207 |
+
else:
|
| 1208 |
+
raise ValueError("requires a finite number")
|
| 1209 |
+
return n, max(re_dist, im_dist)
|
| 1210 |
+
|
| 1211 |
+
def fprod(ctx, factors):
|
| 1212 |
+
r"""
|
| 1213 |
+
Calculates a product containing a finite number of factors (for
|
| 1214 |
+
infinite products, see :func:`~mpmath.nprod`). The factors will be
|
| 1215 |
+
converted to mpmath numbers.
|
| 1216 |
+
|
| 1217 |
+
>>> from mpmath import *
|
| 1218 |
+
>>> mp.dps = 15; mp.pretty = False
|
| 1219 |
+
>>> fprod([1, 2, 0.5, 7])
|
| 1220 |
+
mpf('7.0')
|
| 1221 |
+
|
| 1222 |
+
"""
|
| 1223 |
+
orig = ctx.prec
|
| 1224 |
+
try:
|
| 1225 |
+
v = ctx.one
|
| 1226 |
+
for p in factors:
|
| 1227 |
+
v *= p
|
| 1228 |
+
finally:
|
| 1229 |
+
ctx.prec = orig
|
| 1230 |
+
return +v
|
| 1231 |
+
|
| 1232 |
+
def rand(ctx):
|
| 1233 |
+
"""
|
| 1234 |
+
Returns an ``mpf`` with value chosen randomly from `[0, 1)`.
|
| 1235 |
+
The number of randomly generated bits in the mantissa is equal
|
| 1236 |
+
to the working precision.
|
| 1237 |
+
"""
|
| 1238 |
+
return ctx.make_mpf(mpf_rand(ctx._prec))
|
| 1239 |
+
|
| 1240 |
+
def fraction(ctx, p, q):
|
| 1241 |
+
"""
|
| 1242 |
+
Given Python integers `(p, q)`, returns a lazy ``mpf`` representing
|
| 1243 |
+
the fraction `p/q`. The value is updated with the precision.
|
| 1244 |
+
|
| 1245 |
+
>>> from mpmath import *
|
| 1246 |
+
>>> mp.dps = 15
|
| 1247 |
+
>>> a = fraction(1,100)
|
| 1248 |
+
>>> b = mpf(1)/100
|
| 1249 |
+
>>> print(a); print(b)
|
| 1250 |
+
0.01
|
| 1251 |
+
0.01
|
| 1252 |
+
>>> mp.dps = 30
|
| 1253 |
+
>>> print(a); print(b) # a will be accurate
|
| 1254 |
+
0.01
|
| 1255 |
+
0.0100000000000000002081668171172
|
| 1256 |
+
>>> mp.dps = 15
|
| 1257 |
+
"""
|
| 1258 |
+
return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd),
|
| 1259 |
+
'%s/%s' % (p, q))
|
| 1260 |
+
|
| 1261 |
+
def absmin(ctx, x):
|
| 1262 |
+
return abs(ctx.convert(x))
|
| 1263 |
+
|
| 1264 |
+
def absmax(ctx, x):
|
| 1265 |
+
return abs(ctx.convert(x))
|
| 1266 |
+
|
| 1267 |
+
def _as_points(ctx, x):
|
| 1268 |
+
# XXX: remove this?
|
| 1269 |
+
if hasattr(x, '_mpi_'):
|
| 1270 |
+
a, b = x._mpi_
|
| 1271 |
+
return [ctx.make_mpf(a), ctx.make_mpf(b)]
|
| 1272 |
+
return x
|
| 1273 |
+
|
| 1274 |
+
'''
|
| 1275 |
+
def _zetasum(ctx, s, a, b):
|
| 1276 |
+
"""
|
| 1277 |
+
Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small
|
| 1278 |
+
integers.
|
| 1279 |
+
"""
|
| 1280 |
+
a = int(a)
|
| 1281 |
+
b = int(b)
|
| 1282 |
+
s = ctx.convert(s)
|
| 1283 |
+
prec, rounding = ctx._prec_rounding
|
| 1284 |
+
if hasattr(s, '_mpf_'):
|
| 1285 |
+
v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec))
|
| 1286 |
+
elif hasattr(s, '_mpc_'):
|
| 1287 |
+
v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec))
|
| 1288 |
+
return v
|
| 1289 |
+
'''
|
| 1290 |
+
|
| 1291 |
+
def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False):
|
| 1292 |
+
if not (ctx.isint(a) and hasattr(s, "_mpc_")):
|
| 1293 |
+
raise NotImplementedError
|
| 1294 |
+
a = int(a)
|
| 1295 |
+
prec = ctx._prec
|
| 1296 |
+
xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec)
|
| 1297 |
+
xs = [ctx.make_mpc(x) for x in xs]
|
| 1298 |
+
ys = [ctx.make_mpc(y) for y in ys]
|
| 1299 |
+
return xs, ys
|
| 1300 |
+
|
| 1301 |
+
class PrecisionManager:
|
| 1302 |
+
def __init__(self, ctx, precfun, dpsfun, normalize_output=False):
|
| 1303 |
+
self.ctx = ctx
|
| 1304 |
+
self.precfun = precfun
|
| 1305 |
+
self.dpsfun = dpsfun
|
| 1306 |
+
self.normalize_output = normalize_output
|
| 1307 |
+
def __call__(self, f):
|
| 1308 |
+
@functools.wraps(f)
|
| 1309 |
+
def g(*args, **kwargs):
|
| 1310 |
+
orig = self.ctx.prec
|
| 1311 |
+
try:
|
| 1312 |
+
if self.precfun:
|
| 1313 |
+
self.ctx.prec = self.precfun(self.ctx.prec)
|
| 1314 |
+
else:
|
| 1315 |
+
self.ctx.dps = self.dpsfun(self.ctx.dps)
|
| 1316 |
+
if self.normalize_output:
|
| 1317 |
+
v = f(*args, **kwargs)
|
| 1318 |
+
if type(v) is tuple:
|
| 1319 |
+
return tuple([+a for a in v])
|
| 1320 |
+
return +v
|
| 1321 |
+
else:
|
| 1322 |
+
return f(*args, **kwargs)
|
| 1323 |
+
finally:
|
| 1324 |
+
self.ctx.prec = orig
|
| 1325 |
+
return g
|
| 1326 |
+
def __enter__(self):
|
| 1327 |
+
self.origp = self.ctx.prec
|
| 1328 |
+
if self.precfun:
|
| 1329 |
+
self.ctx.prec = self.precfun(self.ctx.prec)
|
| 1330 |
+
else:
|
| 1331 |
+
self.ctx.dps = self.dpsfun(self.ctx.dps)
|
| 1332 |
+
def __exit__(self, exc_type, exc_val, exc_tb):
|
| 1333 |
+
self.ctx.prec = self.origp
|
| 1334 |
+
return False
|
| 1335 |
+
|
| 1336 |
+
|
| 1337 |
+
if __name__ == '__main__':
|
| 1338 |
+
import doctest
|
| 1339 |
+
doctest.testmod()
|
vllm/lib/python3.10/site-packages/mpmath/functions/__init__.py
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import functions
|
| 2 |
+
# Hack to update methods
|
| 3 |
+
from . import factorials
|
| 4 |
+
from . import hypergeometric
|
| 5 |
+
from . import expintegrals
|
| 6 |
+
from . import bessel
|
| 7 |
+
from . import orthogonal
|
| 8 |
+
from . import theta
|
| 9 |
+
from . import elliptic
|
| 10 |
+
from . import signals
|
| 11 |
+
from . import zeta
|
| 12 |
+
from . import rszeta
|
| 13 |
+
from . import zetazeros
|
| 14 |
+
from . import qfunctions
|
vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/bessel.cpython-310.pyc
ADDED
|
Binary file (34.1 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-310.pyc
ADDED
|
Binary file (39.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/functions/__pycache__/signals.cpython-310.pyc
ADDED
|
Binary file (1.13 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/functions/bessel.py
ADDED
|
@@ -0,0 +1,1108 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .functions import defun, defun_wrapped
|
| 2 |
+
|
| 3 |
+
@defun
|
| 4 |
+
def j0(ctx, x):
|
| 5 |
+
"""Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`."""
|
| 6 |
+
return ctx.besselj(0, x)
|
| 7 |
+
|
| 8 |
+
@defun
|
| 9 |
+
def j1(ctx, x):
|
| 10 |
+
"""Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`."""
|
| 11 |
+
return ctx.besselj(1, x)
|
| 12 |
+
|
| 13 |
+
@defun
|
| 14 |
+
def besselj(ctx, n, z, derivative=0, **kwargs):
|
| 15 |
+
if type(n) is int:
|
| 16 |
+
n_isint = True
|
| 17 |
+
else:
|
| 18 |
+
n = ctx.convert(n)
|
| 19 |
+
n_isint = ctx.isint(n)
|
| 20 |
+
if n_isint:
|
| 21 |
+
n = int(ctx._re(n))
|
| 22 |
+
if n_isint and n < 0:
|
| 23 |
+
return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs)
|
| 24 |
+
z = ctx.convert(z)
|
| 25 |
+
M = ctx.mag(z)
|
| 26 |
+
if derivative:
|
| 27 |
+
d = ctx.convert(derivative)
|
| 28 |
+
# TODO: the integer special-casing shouldn't be necessary.
|
| 29 |
+
# However, the hypergeometric series gets inaccurate for large d
|
| 30 |
+
# because of inaccurate pole cancellation at a pole far from
|
| 31 |
+
# zero (needs to be fixed in hypercomb or hypsum)
|
| 32 |
+
if ctx.isint(d) and d >= 0:
|
| 33 |
+
d = int(d)
|
| 34 |
+
orig = ctx.prec
|
| 35 |
+
try:
|
| 36 |
+
ctx.prec += 15
|
| 37 |
+
v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z)
|
| 38 |
+
for k in range(d+1))
|
| 39 |
+
finally:
|
| 40 |
+
ctx.prec = orig
|
| 41 |
+
v *= ctx.mpf(2)**(-d)
|
| 42 |
+
else:
|
| 43 |
+
def h(n,d):
|
| 44 |
+
r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True)
|
| 45 |
+
B = [0.5*(n-d+1), 0.5*(n-d+2)]
|
| 46 |
+
T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)]
|
| 47 |
+
return T
|
| 48 |
+
v = ctx.hypercomb(h, [n,d], **kwargs)
|
| 49 |
+
else:
|
| 50 |
+
# Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation
|
| 51 |
+
if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20:
|
| 52 |
+
try:
|
| 53 |
+
return ctx._besselj(n, z)
|
| 54 |
+
except NotImplementedError:
|
| 55 |
+
pass
|
| 56 |
+
if not z:
|
| 57 |
+
if not n:
|
| 58 |
+
v = ctx.one + n+z
|
| 59 |
+
elif ctx.re(n) > 0:
|
| 60 |
+
v = n*z
|
| 61 |
+
else:
|
| 62 |
+
v = ctx.inf + z + n
|
| 63 |
+
else:
|
| 64 |
+
#v = 0
|
| 65 |
+
orig = ctx.prec
|
| 66 |
+
try:
|
| 67 |
+
# XXX: workaround for accuracy in low level hypergeometric series
|
| 68 |
+
# when alternating, large arguments
|
| 69 |
+
ctx.prec += min(3*abs(M), ctx.prec)
|
| 70 |
+
w = ctx.fmul(z, 0.5, exact=True)
|
| 71 |
+
def h(n):
|
| 72 |
+
r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True)
|
| 73 |
+
return [([w], [n], [], [n+1], [], [n+1], r)]
|
| 74 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
| 75 |
+
finally:
|
| 76 |
+
ctx.prec = orig
|
| 77 |
+
v = +v
|
| 78 |
+
return v
|
| 79 |
+
|
| 80 |
+
@defun
|
| 81 |
+
def besseli(ctx, n, z, derivative=0, **kwargs):
|
| 82 |
+
n = ctx.convert(n)
|
| 83 |
+
z = ctx.convert(z)
|
| 84 |
+
if not z:
|
| 85 |
+
if derivative:
|
| 86 |
+
raise ValueError
|
| 87 |
+
if not n:
|
| 88 |
+
# I(0,0) = 1
|
| 89 |
+
return 1+n+z
|
| 90 |
+
if ctx.isint(n):
|
| 91 |
+
return 0*(n+z)
|
| 92 |
+
r = ctx.re(n)
|
| 93 |
+
if r == 0:
|
| 94 |
+
return ctx.nan*(n+z)
|
| 95 |
+
elif r > 0:
|
| 96 |
+
return 0*(n+z)
|
| 97 |
+
else:
|
| 98 |
+
return ctx.inf+(n+z)
|
| 99 |
+
M = ctx.mag(z)
|
| 100 |
+
if derivative:
|
| 101 |
+
d = ctx.convert(derivative)
|
| 102 |
+
def h(n,d):
|
| 103 |
+
r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True)
|
| 104 |
+
B = [0.5*(n-d+1), 0.5*(n-d+2), n+1]
|
| 105 |
+
T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)]
|
| 106 |
+
return T
|
| 107 |
+
v = ctx.hypercomb(h, [n,d], **kwargs)
|
| 108 |
+
else:
|
| 109 |
+
def h(n):
|
| 110 |
+
w = ctx.fmul(z, 0.5, exact=True)
|
| 111 |
+
r = ctx.fmul(w, w, prec=max(0,ctx.prec+M))
|
| 112 |
+
return [([w], [n], [], [n+1], [], [n+1], r)]
|
| 113 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
| 114 |
+
return v
|
| 115 |
+
|
| 116 |
+
@defun_wrapped
|
| 117 |
+
def bessely(ctx, n, z, derivative=0, **kwargs):
|
| 118 |
+
if not z:
|
| 119 |
+
if derivative:
|
| 120 |
+
# Not implemented
|
| 121 |
+
raise ValueError
|
| 122 |
+
if not n:
|
| 123 |
+
# ~ log(z/2)
|
| 124 |
+
return -ctx.inf + (n+z)
|
| 125 |
+
if ctx.im(n):
|
| 126 |
+
return ctx.nan * (n+z)
|
| 127 |
+
r = ctx.re(n)
|
| 128 |
+
q = n+0.5
|
| 129 |
+
if ctx.isint(q):
|
| 130 |
+
if n > 0:
|
| 131 |
+
return -ctx.inf + (n+z)
|
| 132 |
+
else:
|
| 133 |
+
return 0 * (n+z)
|
| 134 |
+
if r < 0 and int(ctx.floor(q)) % 2:
|
| 135 |
+
return ctx.inf + (n+z)
|
| 136 |
+
else:
|
| 137 |
+
return ctx.ninf + (n+z)
|
| 138 |
+
# XXX: use hypercomb
|
| 139 |
+
ctx.prec += 10
|
| 140 |
+
m, d = ctx.nint_distance(n)
|
| 141 |
+
if d < -ctx.prec:
|
| 142 |
+
h = +ctx.eps
|
| 143 |
+
ctx.prec *= 2
|
| 144 |
+
n += h
|
| 145 |
+
elif d < 0:
|
| 146 |
+
ctx.prec -= d
|
| 147 |
+
# TODO: avoid cancellation for imaginary arguments
|
| 148 |
+
cos, sin = ctx.cospi_sinpi(n)
|
| 149 |
+
return (ctx.besselj(n,z,derivative,**kwargs)*cos - \
|
| 150 |
+
ctx.besselj(-n,z,derivative,**kwargs))/sin
|
| 151 |
+
|
| 152 |
+
@defun_wrapped
|
| 153 |
+
def besselk(ctx, n, z, **kwargs):
|
| 154 |
+
if not z:
|
| 155 |
+
return ctx.inf
|
| 156 |
+
M = ctx.mag(z)
|
| 157 |
+
if M < 1:
|
| 158 |
+
# Represent as limit definition
|
| 159 |
+
def h(n):
|
| 160 |
+
r = (z/2)**2
|
| 161 |
+
T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r
|
| 162 |
+
T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r
|
| 163 |
+
return T1, T2
|
| 164 |
+
# We could use the limit definition always, but it leads
|
| 165 |
+
# to very bad cancellation (of exponentially large terms)
|
| 166 |
+
# for large real z
|
| 167 |
+
# Instead represent in terms of 2F0
|
| 168 |
+
else:
|
| 169 |
+
ctx.prec += M
|
| 170 |
+
def h(n):
|
| 171 |
+
return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \
|
| 172 |
+
[n+0.5, 0.5-n], [], -1/(2*z))]
|
| 173 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
| 174 |
+
|
| 175 |
+
@defun_wrapped
|
| 176 |
+
def hankel1(ctx,n,x,**kwargs):
|
| 177 |
+
return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs)
|
| 178 |
+
|
| 179 |
+
@defun_wrapped
|
| 180 |
+
def hankel2(ctx,n,x,**kwargs):
|
| 181 |
+
return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs)
|
| 182 |
+
|
| 183 |
+
@defun_wrapped
|
| 184 |
+
def whitm(ctx,k,m,z,**kwargs):
|
| 185 |
+
if z == 0:
|
| 186 |
+
# M(k,m,z) = 0^(1/2+m)
|
| 187 |
+
if ctx.re(m) > -0.5:
|
| 188 |
+
return z
|
| 189 |
+
elif ctx.re(m) < -0.5:
|
| 190 |
+
return ctx.inf + z
|
| 191 |
+
else:
|
| 192 |
+
return ctx.nan * z
|
| 193 |
+
x = ctx.fmul(-0.5, z, exact=True)
|
| 194 |
+
y = 0.5+m
|
| 195 |
+
return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs)
|
| 196 |
+
|
| 197 |
+
@defun_wrapped
|
| 198 |
+
def whitw(ctx,k,m,z,**kwargs):
|
| 199 |
+
if z == 0:
|
| 200 |
+
g = abs(ctx.re(m))
|
| 201 |
+
if g < 0.5:
|
| 202 |
+
return z
|
| 203 |
+
elif g > 0.5:
|
| 204 |
+
return ctx.inf + z
|
| 205 |
+
else:
|
| 206 |
+
return ctx.nan * z
|
| 207 |
+
x = ctx.fmul(-0.5, z, exact=True)
|
| 208 |
+
y = 0.5+m
|
| 209 |
+
return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs)
|
| 210 |
+
|
| 211 |
+
@defun
|
| 212 |
+
def hyperu(ctx, a, b, z, **kwargs):
|
| 213 |
+
a, atype = ctx._convert_param(a)
|
| 214 |
+
b, btype = ctx._convert_param(b)
|
| 215 |
+
z = ctx.convert(z)
|
| 216 |
+
if not z:
|
| 217 |
+
if ctx.re(b) <= 1:
|
| 218 |
+
return ctx.gammaprod([1-b],[a-b+1])
|
| 219 |
+
else:
|
| 220 |
+
return ctx.inf + z
|
| 221 |
+
bb = 1+a-b
|
| 222 |
+
bb, bbtype = ctx._convert_param(bb)
|
| 223 |
+
try:
|
| 224 |
+
orig = ctx.prec
|
| 225 |
+
try:
|
| 226 |
+
ctx.prec += 10
|
| 227 |
+
v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec)
|
| 228 |
+
return v / z**a
|
| 229 |
+
finally:
|
| 230 |
+
ctx.prec = orig
|
| 231 |
+
except ctx.NoConvergence:
|
| 232 |
+
pass
|
| 233 |
+
def h(a,b):
|
| 234 |
+
w = ctx.sinpi(b)
|
| 235 |
+
T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z)
|
| 236 |
+
T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z)
|
| 237 |
+
return T1, T2
|
| 238 |
+
return ctx.hypercomb(h, [a,b], **kwargs)
|
| 239 |
+
|
| 240 |
+
@defun
|
| 241 |
+
def struveh(ctx,n,z, **kwargs):
|
| 242 |
+
n = ctx.convert(n)
|
| 243 |
+
z = ctx.convert(z)
|
| 244 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/
|
| 245 |
+
def h(n):
|
| 246 |
+
return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)]
|
| 247 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
| 248 |
+
|
| 249 |
+
@defun
|
| 250 |
+
def struvel(ctx,n,z, **kwargs):
|
| 251 |
+
n = ctx.convert(n)
|
| 252 |
+
z = ctx.convert(z)
|
| 253 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/
|
| 254 |
+
def h(n):
|
| 255 |
+
return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)]
|
| 256 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
| 257 |
+
|
| 258 |
+
def _anger(ctx,which,v,z,**kwargs):
|
| 259 |
+
v = ctx._convert_param(v)[0]
|
| 260 |
+
z = ctx.convert(z)
|
| 261 |
+
def h(v):
|
| 262 |
+
b = ctx.mpq_1_2
|
| 263 |
+
u = v*b
|
| 264 |
+
m = b*3
|
| 265 |
+
a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u
|
| 266 |
+
c, s = ctx.cospi_sinpi(u)
|
| 267 |
+
if which == 0:
|
| 268 |
+
A, B = [b*z, s], [c]
|
| 269 |
+
if which == 1:
|
| 270 |
+
A, B = [b*z, -c], [s]
|
| 271 |
+
w = ctx.square_exp_arg(z, mult=-0.25)
|
| 272 |
+
T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w
|
| 273 |
+
T2 = B, [1], [], [b1,b2], [1], [b1,b2], w
|
| 274 |
+
return T1, T2
|
| 275 |
+
return ctx.hypercomb(h, [v], **kwargs)
|
| 276 |
+
|
| 277 |
+
@defun
|
| 278 |
+
def angerj(ctx, v, z, **kwargs):
|
| 279 |
+
return _anger(ctx, 0, v, z, **kwargs)
|
| 280 |
+
|
| 281 |
+
@defun
|
| 282 |
+
def webere(ctx, v, z, **kwargs):
|
| 283 |
+
return _anger(ctx, 1, v, z, **kwargs)
|
| 284 |
+
|
| 285 |
+
@defun
|
| 286 |
+
def lommels1(ctx, u, v, z, **kwargs):
|
| 287 |
+
u = ctx._convert_param(u)[0]
|
| 288 |
+
v = ctx._convert_param(v)[0]
|
| 289 |
+
z = ctx.convert(z)
|
| 290 |
+
def h(u,v):
|
| 291 |
+
b = ctx.mpq_1_2
|
| 292 |
+
w = ctx.square_exp_arg(z, mult=-0.25)
|
| 293 |
+
return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \
|
| 294 |
+
[b*(u-v+3),b*(u+v+3)], w),
|
| 295 |
+
return ctx.hypercomb(h, [u,v], **kwargs)
|
| 296 |
+
|
| 297 |
+
@defun
|
| 298 |
+
def lommels2(ctx, u, v, z, **kwargs):
|
| 299 |
+
u = ctx._convert_param(u)[0]
|
| 300 |
+
v = ctx._convert_param(v)[0]
|
| 301 |
+
z = ctx.convert(z)
|
| 302 |
+
# Asymptotic expansion (GR p. 947) -- need to be careful
|
| 303 |
+
# not to use for small arguments
|
| 304 |
+
# def h(u,v):
|
| 305 |
+
# b = ctx.mpq_1_2
|
| 306 |
+
# w = -(z/2)**(-2)
|
| 307 |
+
# return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w),
|
| 308 |
+
def h(u,v):
|
| 309 |
+
b = ctx.mpq_1_2
|
| 310 |
+
w = ctx.square_exp_arg(z, mult=-0.25)
|
| 311 |
+
T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w
|
| 312 |
+
T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w
|
| 313 |
+
T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w
|
| 314 |
+
#c1 = ctx.cospi((u-v)*b)
|
| 315 |
+
#c2 = ctx.cospi((u+v)*b)
|
| 316 |
+
#s = ctx.sinpi(v)
|
| 317 |
+
#r1 = (u-v+1)*b
|
| 318 |
+
#r2 = (u+v+1)*b
|
| 319 |
+
#T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w
|
| 320 |
+
#T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w
|
| 321 |
+
#T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w
|
| 322 |
+
#T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w
|
| 323 |
+
return T1, T2, T3
|
| 324 |
+
return ctx.hypercomb(h, [u,v], **kwargs)
|
| 325 |
+
|
| 326 |
+
@defun
|
| 327 |
+
def ber(ctx, n, z, **kwargs):
|
| 328 |
+
n = ctx.convert(n)
|
| 329 |
+
z = ctx.convert(z)
|
| 330 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/
|
| 331 |
+
def h(n):
|
| 332 |
+
r = -(z/4)**4
|
| 333 |
+
cos, sin = ctx.cospi_sinpi(-0.75*n)
|
| 334 |
+
T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
|
| 335 |
+
T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
|
| 336 |
+
return T1, T2
|
| 337 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
| 338 |
+
|
| 339 |
+
@defun
|
| 340 |
+
def bei(ctx, n, z, **kwargs):
|
| 341 |
+
n = ctx.convert(n)
|
| 342 |
+
z = ctx.convert(z)
|
| 343 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/
|
| 344 |
+
def h(n):
|
| 345 |
+
r = -(z/4)**4
|
| 346 |
+
cos, sin = ctx.cospi_sinpi(0.75*n)
|
| 347 |
+
T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r
|
| 348 |
+
T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r
|
| 349 |
+
return T1, T2
|
| 350 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
| 351 |
+
|
| 352 |
+
@defun
|
| 353 |
+
def ker(ctx, n, z, **kwargs):
|
| 354 |
+
n = ctx.convert(n)
|
| 355 |
+
z = ctx.convert(z)
|
| 356 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/
|
| 357 |
+
def h(n):
|
| 358 |
+
r = -(z/4)**4
|
| 359 |
+
cos1, sin1 = ctx.cospi_sinpi(0.25*n)
|
| 360 |
+
cos2, sin2 = ctx.cospi_sinpi(0.75*n)
|
| 361 |
+
T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r
|
| 362 |
+
T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r
|
| 363 |
+
T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
|
| 364 |
+
T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
|
| 365 |
+
return T1, T2, T3, T4
|
| 366 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
| 367 |
+
|
| 368 |
+
@defun
|
| 369 |
+
def kei(ctx, n, z, **kwargs):
|
| 370 |
+
n = ctx.convert(n)
|
| 371 |
+
z = ctx.convert(z)
|
| 372 |
+
# http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/
|
| 373 |
+
def h(n):
|
| 374 |
+
r = -(z/4)**4
|
| 375 |
+
cos1, sin1 = ctx.cospi_sinpi(0.75*n)
|
| 376 |
+
cos2, sin2 = ctx.cospi_sinpi(0.25*n)
|
| 377 |
+
T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r
|
| 378 |
+
T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r
|
| 379 |
+
T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r
|
| 380 |
+
T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r
|
| 381 |
+
return T1, T2, T3, T4
|
| 382 |
+
return ctx.hypercomb(h, [n], **kwargs)
|
| 383 |
+
|
| 384 |
+
# TODO: do this more generically?
|
| 385 |
+
def c_memo(f):
|
| 386 |
+
name = f.__name__
|
| 387 |
+
def f_wrapped(ctx):
|
| 388 |
+
cache = ctx._misc_const_cache
|
| 389 |
+
prec = ctx.prec
|
| 390 |
+
p,v = cache.get(name, (-1,0))
|
| 391 |
+
if p >= prec:
|
| 392 |
+
return +v
|
| 393 |
+
else:
|
| 394 |
+
cache[name] = (prec, f(ctx))
|
| 395 |
+
return cache[name][1]
|
| 396 |
+
return f_wrapped
|
| 397 |
+
|
| 398 |
+
@c_memo
|
| 399 |
+
def _airyai_C1(ctx):
|
| 400 |
+
return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3))
|
| 401 |
+
|
| 402 |
+
@c_memo
|
| 403 |
+
def _airyai_C2(ctx):
|
| 404 |
+
return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3))
|
| 405 |
+
|
| 406 |
+
@c_memo
|
| 407 |
+
def _airybi_C1(ctx):
|
| 408 |
+
return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3))
|
| 409 |
+
|
| 410 |
+
@c_memo
|
| 411 |
+
def _airybi_C2(ctx):
|
| 412 |
+
return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3)
|
| 413 |
+
|
| 414 |
+
def _airybi_n2_inf(ctx):
|
| 415 |
+
prec = ctx.prec
|
| 416 |
+
try:
|
| 417 |
+
v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi)
|
| 418 |
+
finally:
|
| 419 |
+
ctx.prec = prec
|
| 420 |
+
return +v
|
| 421 |
+
|
| 422 |
+
# Derivatives at z = 0
|
| 423 |
+
# TODO: could be expressed more elegantly using triple factorials
|
| 424 |
+
def _airyderiv_0(ctx, z, n, ntype, which):
|
| 425 |
+
if ntype == 'Z':
|
| 426 |
+
if n < 0:
|
| 427 |
+
return z
|
| 428 |
+
r = ctx.mpq_1_3
|
| 429 |
+
prec = ctx.prec
|
| 430 |
+
try:
|
| 431 |
+
ctx.prec += 10
|
| 432 |
+
v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi
|
| 433 |
+
if which == 0:
|
| 434 |
+
v *= ctx.sinpi(2*(n+1)*r)
|
| 435 |
+
v /= ctx.power(3,'2/3')
|
| 436 |
+
else:
|
| 437 |
+
v *= abs(ctx.sinpi(2*(n+1)*r))
|
| 438 |
+
v /= ctx.power(3,'1/6')
|
| 439 |
+
finally:
|
| 440 |
+
ctx.prec = prec
|
| 441 |
+
return +v + z
|
| 442 |
+
else:
|
| 443 |
+
# singular (does the limit exist?)
|
| 444 |
+
raise NotImplementedError
|
| 445 |
+
|
| 446 |
+
@defun
|
| 447 |
+
def airyai(ctx, z, derivative=0, **kwargs):
|
| 448 |
+
z = ctx.convert(z)
|
| 449 |
+
if derivative:
|
| 450 |
+
n, ntype = ctx._convert_param(derivative)
|
| 451 |
+
else:
|
| 452 |
+
n = 0
|
| 453 |
+
# Values at infinities
|
| 454 |
+
if not ctx.isnormal(z) and z:
|
| 455 |
+
if n and ntype == 'Z':
|
| 456 |
+
if n == -1:
|
| 457 |
+
if z == ctx.inf:
|
| 458 |
+
return ctx.mpf(1)/3 + 1/z
|
| 459 |
+
if z == ctx.ninf:
|
| 460 |
+
return ctx.mpf(-2)/3 + 1/z
|
| 461 |
+
if n < -1:
|
| 462 |
+
if z == ctx.inf:
|
| 463 |
+
return z
|
| 464 |
+
if z == ctx.ninf:
|
| 465 |
+
return (-1)**n * (-z)
|
| 466 |
+
if (not n) and z == ctx.inf or z == ctx.ninf:
|
| 467 |
+
return 1/z
|
| 468 |
+
# TODO: limits
|
| 469 |
+
raise ValueError("essential singularity of Ai(z)")
|
| 470 |
+
# Account for exponential scaling
|
| 471 |
+
if z:
|
| 472 |
+
extraprec = max(0, int(1.5*ctx.mag(z)))
|
| 473 |
+
else:
|
| 474 |
+
extraprec = 0
|
| 475 |
+
if n:
|
| 476 |
+
if n == 1:
|
| 477 |
+
def h():
|
| 478 |
+
# http://functions.wolfram.com/03.07.06.0005.01
|
| 479 |
+
if ctx._re(z) > 4:
|
| 480 |
+
ctx.prec += extraprec
|
| 481 |
+
w = z**1.5; r = -0.75/w; u = -2*w/3
|
| 482 |
+
ctx.prec -= extraprec
|
| 483 |
+
C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4)
|
| 484 |
+
return ([C],[1],[],[],[(-1,6),(7,6)],[],r),
|
| 485 |
+
# http://functions.wolfram.com/03.07.26.0001.01
|
| 486 |
+
else:
|
| 487 |
+
ctx.prec += extraprec
|
| 488 |
+
w = z**3 / 9
|
| 489 |
+
ctx.prec -= extraprec
|
| 490 |
+
C1 = _airyai_C1(ctx) * 0.5
|
| 491 |
+
C2 = _airyai_C2(ctx)
|
| 492 |
+
T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
|
| 493 |
+
T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
|
| 494 |
+
return T1, T2
|
| 495 |
+
return ctx.hypercomb(h, [], **kwargs)
|
| 496 |
+
else:
|
| 497 |
+
if z == 0:
|
| 498 |
+
return _airyderiv_0(ctx, z, n, ntype, 0)
|
| 499 |
+
# http://functions.wolfram.com/03.05.20.0004.01
|
| 500 |
+
def h(n):
|
| 501 |
+
ctx.prec += extraprec
|
| 502 |
+
w = z**3/9
|
| 503 |
+
ctx.prec -= extraprec
|
| 504 |
+
q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
|
| 505 |
+
a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
|
| 506 |
+
T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \
|
| 507 |
+
[a1,a2], [b1,b2,b3], w
|
| 508 |
+
a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
|
| 509 |
+
T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \
|
| 510 |
+
[a1,a2], [b1,b2,b3], w
|
| 511 |
+
return T1, T2
|
| 512 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
| 513 |
+
if ctx._is_real_type(z) and ctx.isint(n):
|
| 514 |
+
v = ctx._re(v)
|
| 515 |
+
return v
|
| 516 |
+
else:
|
| 517 |
+
def h():
|
| 518 |
+
if ctx._re(z) > 4:
|
| 519 |
+
# We could use 1F1, but it results in huge cancellation;
|
| 520 |
+
# the following expansion is better.
|
| 521 |
+
# TODO: asymptotic series for derivatives
|
| 522 |
+
ctx.prec += extraprec
|
| 523 |
+
w = z**1.5; r = -0.75/w; u = -2*w/3
|
| 524 |
+
ctx.prec -= extraprec
|
| 525 |
+
C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4))
|
| 526 |
+
return ([C],[1],[],[],[(1,6),(5,6)],[],r),
|
| 527 |
+
else:
|
| 528 |
+
ctx.prec += extraprec
|
| 529 |
+
w = z**3 / 9
|
| 530 |
+
ctx.prec -= extraprec
|
| 531 |
+
C1 = _airyai_C1(ctx)
|
| 532 |
+
C2 = _airyai_C2(ctx)
|
| 533 |
+
T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
|
| 534 |
+
T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
|
| 535 |
+
return T1, T2
|
| 536 |
+
return ctx.hypercomb(h, [], **kwargs)
|
| 537 |
+
|
| 538 |
+
@defun
|
| 539 |
+
def airybi(ctx, z, derivative=0, **kwargs):
|
| 540 |
+
z = ctx.convert(z)
|
| 541 |
+
if derivative:
|
| 542 |
+
n, ntype = ctx._convert_param(derivative)
|
| 543 |
+
else:
|
| 544 |
+
n = 0
|
| 545 |
+
# Values at infinities
|
| 546 |
+
if not ctx.isnormal(z) and z:
|
| 547 |
+
if n and ntype == 'Z':
|
| 548 |
+
if z == ctx.inf:
|
| 549 |
+
return z
|
| 550 |
+
if z == ctx.ninf:
|
| 551 |
+
if n == -1:
|
| 552 |
+
return 1/z
|
| 553 |
+
if n == -2:
|
| 554 |
+
return _airybi_n2_inf(ctx)
|
| 555 |
+
if n < -2:
|
| 556 |
+
return (-1)**n * (-z)
|
| 557 |
+
if not n:
|
| 558 |
+
if z == ctx.inf:
|
| 559 |
+
return z
|
| 560 |
+
if z == ctx.ninf:
|
| 561 |
+
return 1/z
|
| 562 |
+
# TODO: limits
|
| 563 |
+
raise ValueError("essential singularity of Bi(z)")
|
| 564 |
+
if z:
|
| 565 |
+
extraprec = max(0, int(1.5*ctx.mag(z)))
|
| 566 |
+
else:
|
| 567 |
+
extraprec = 0
|
| 568 |
+
if n:
|
| 569 |
+
if n == 1:
|
| 570 |
+
# http://functions.wolfram.com/03.08.26.0001.01
|
| 571 |
+
def h():
|
| 572 |
+
ctx.prec += extraprec
|
| 573 |
+
w = z**3 / 9
|
| 574 |
+
ctx.prec -= extraprec
|
| 575 |
+
C1 = _airybi_C1(ctx)*0.5
|
| 576 |
+
C2 = _airybi_C2(ctx)
|
| 577 |
+
T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w
|
| 578 |
+
T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w
|
| 579 |
+
return T1, T2
|
| 580 |
+
return ctx.hypercomb(h, [], **kwargs)
|
| 581 |
+
else:
|
| 582 |
+
if z == 0:
|
| 583 |
+
return _airyderiv_0(ctx, z, n, ntype, 1)
|
| 584 |
+
def h(n):
|
| 585 |
+
ctx.prec += extraprec
|
| 586 |
+
w = z**3/9
|
| 587 |
+
ctx.prec -= extraprec
|
| 588 |
+
q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3
|
| 589 |
+
q16 = ctx.mpq_1_6
|
| 590 |
+
q56 = ctx.mpq_5_6
|
| 591 |
+
a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13
|
| 592 |
+
T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \
|
| 593 |
+
[a1,a2], [b1,b2,b3], w
|
| 594 |
+
a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13
|
| 595 |
+
T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \
|
| 596 |
+
[a1,a2], [b1,b2,b3], w
|
| 597 |
+
return T1, T2
|
| 598 |
+
v = ctx.hypercomb(h, [n], **kwargs)
|
| 599 |
+
if ctx._is_real_type(z) and ctx.isint(n):
|
| 600 |
+
v = ctx._re(v)
|
| 601 |
+
return v
|
| 602 |
+
else:
|
| 603 |
+
def h():
|
| 604 |
+
ctx.prec += extraprec
|
| 605 |
+
w = z**3 / 9
|
| 606 |
+
ctx.prec -= extraprec
|
| 607 |
+
C1 = _airybi_C1(ctx)
|
| 608 |
+
C2 = _airybi_C2(ctx)
|
| 609 |
+
T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w
|
| 610 |
+
T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w
|
| 611 |
+
return T1, T2
|
| 612 |
+
return ctx.hypercomb(h, [], **kwargs)
|
| 613 |
+
|
| 614 |
+
def _airy_zero(ctx, which, k, derivative, complex=False):
|
| 615 |
+
# Asymptotic formulas are given in DLMF section 9.9
|
| 616 |
+
def U(t): return t**(2/3.)*(1-7/(t**2*48))
|
| 617 |
+
def T(t): return t**(2/3.)*(1+5/(t**2*48))
|
| 618 |
+
k = int(k)
|
| 619 |
+
if k < 1:
|
| 620 |
+
raise ValueError("k cannot be less than 1")
|
| 621 |
+
if not derivative in (0,1):
|
| 622 |
+
raise ValueError("Derivative should lie between 0 and 1")
|
| 623 |
+
if which == 0:
|
| 624 |
+
if derivative:
|
| 625 |
+
return ctx.findroot(lambda z: ctx.airyai(z,1),
|
| 626 |
+
-U(3*ctx.pi*(4*k-3)/8))
|
| 627 |
+
return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8))
|
| 628 |
+
if which == 1 and complex == False:
|
| 629 |
+
if derivative:
|
| 630 |
+
return ctx.findroot(lambda z: ctx.airybi(z,1),
|
| 631 |
+
-U(3*ctx.pi*(4*k-1)/8))
|
| 632 |
+
return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8))
|
| 633 |
+
if which == 1 and complex == True:
|
| 634 |
+
if derivative:
|
| 635 |
+
t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2
|
| 636 |
+
s = ctx.expjpi(ctx.mpf(1)/3) * T(t)
|
| 637 |
+
return ctx.findroot(lambda z: ctx.airybi(z,1), s)
|
| 638 |
+
t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2
|
| 639 |
+
s = ctx.expjpi(ctx.mpf(1)/3) * U(t)
|
| 640 |
+
return ctx.findroot(ctx.airybi, s)
|
| 641 |
+
|
| 642 |
+
@defun
|
| 643 |
+
def airyaizero(ctx, k, derivative=0):
|
| 644 |
+
return _airy_zero(ctx, 0, k, derivative, False)
|
| 645 |
+
|
| 646 |
+
@defun
|
| 647 |
+
def airybizero(ctx, k, derivative=0, complex=False):
|
| 648 |
+
return _airy_zero(ctx, 1, k, derivative, complex)
|
| 649 |
+
|
| 650 |
+
def _scorer(ctx, z, which, kwargs):
|
| 651 |
+
z = ctx.convert(z)
|
| 652 |
+
if ctx.isinf(z):
|
| 653 |
+
if z == ctx.inf:
|
| 654 |
+
if which == 0: return 1/z
|
| 655 |
+
if which == 1: return z
|
| 656 |
+
if z == ctx.ninf:
|
| 657 |
+
return 1/z
|
| 658 |
+
raise ValueError("essential singularity")
|
| 659 |
+
if z:
|
| 660 |
+
extraprec = max(0, int(1.5*ctx.mag(z)))
|
| 661 |
+
else:
|
| 662 |
+
extraprec = 0
|
| 663 |
+
if kwargs.get('derivative'):
|
| 664 |
+
raise NotImplementedError
|
| 665 |
+
# Direct asymptotic expansions, to avoid
|
| 666 |
+
# exponentially large cancellation
|
| 667 |
+
try:
|
| 668 |
+
if ctx.mag(z) > 3:
|
| 669 |
+
if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999:
|
| 670 |
+
def h():
|
| 671 |
+
return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
|
| 672 |
+
return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
|
| 673 |
+
if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999:
|
| 674 |
+
def h():
|
| 675 |
+
return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),)
|
| 676 |
+
return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True)
|
| 677 |
+
except ctx.NoConvergence:
|
| 678 |
+
pass
|
| 679 |
+
def h():
|
| 680 |
+
A = ctx.airybi(z, **kwargs)/3
|
| 681 |
+
B = -2*ctx.pi
|
| 682 |
+
if which == 1:
|
| 683 |
+
A *= 2
|
| 684 |
+
B *= -1
|
| 685 |
+
ctx.prec += extraprec
|
| 686 |
+
w = z**3/9
|
| 687 |
+
ctx.prec -= extraprec
|
| 688 |
+
T1 = [A], [1], [], [], [], [], 0
|
| 689 |
+
T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w
|
| 690 |
+
return T1, T2
|
| 691 |
+
return ctx.hypercomb(h, [], **kwargs)
|
| 692 |
+
|
| 693 |
+
@defun
|
| 694 |
+
def scorergi(ctx, z, **kwargs):
|
| 695 |
+
return _scorer(ctx, z, 0, kwargs)
|
| 696 |
+
|
| 697 |
+
@defun
|
| 698 |
+
def scorerhi(ctx, z, **kwargs):
|
| 699 |
+
return _scorer(ctx, z, 1, kwargs)
|
| 700 |
+
|
| 701 |
+
@defun_wrapped
|
| 702 |
+
def coulombc(ctx, l, eta, _cache={}):
|
| 703 |
+
if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
|
| 704 |
+
return +_cache[l,eta][1]
|
| 705 |
+
G3 = ctx.loggamma(2*l+2)
|
| 706 |
+
G1 = ctx.loggamma(1+l+ctx.j*eta)
|
| 707 |
+
G2 = ctx.loggamma(1+l-ctx.j*eta)
|
| 708 |
+
v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3)
|
| 709 |
+
if not (ctx.im(l) or ctx.im(eta)):
|
| 710 |
+
v = ctx.re(v)
|
| 711 |
+
_cache[l,eta] = (ctx.prec, v)
|
| 712 |
+
return v
|
| 713 |
+
|
| 714 |
+
@defun_wrapped
|
| 715 |
+
def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs):
|
| 716 |
+
# Regular Coulomb wave function
|
| 717 |
+
# Note: w can be either 1 or -1; the other may be better in some cases
|
| 718 |
+
# TODO: check that chop=True chops when and only when it should
|
| 719 |
+
#ctx.prec += 10
|
| 720 |
+
def h(l, eta):
|
| 721 |
+
try:
|
| 722 |
+
jw = ctx.j*w
|
| 723 |
+
jwz = ctx.fmul(jw, z, exact=True)
|
| 724 |
+
jwz2 = ctx.fmul(jwz, -2, exact=True)
|
| 725 |
+
C = ctx.coulombc(l, eta)
|
| 726 |
+
T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \
|
| 727 |
+
[2*l+2], jwz2
|
| 728 |
+
except ValueError:
|
| 729 |
+
T1 = [0], [-1], [], [], [], [], 0
|
| 730 |
+
return (T1,)
|
| 731 |
+
v = ctx.hypercomb(h, [l,eta], **kwargs)
|
| 732 |
+
if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \
|
| 733 |
+
(ctx.re(z) >= 0):
|
| 734 |
+
v = ctx.re(v)
|
| 735 |
+
return v
|
| 736 |
+
|
| 737 |
+
@defun_wrapped
|
| 738 |
+
def _coulomb_chi(ctx, l, eta, _cache={}):
|
| 739 |
+
if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec:
|
| 740 |
+
return _cache[l,eta][1]
|
| 741 |
+
def terms():
|
| 742 |
+
l2 = -l-1
|
| 743 |
+
jeta = ctx.j*eta
|
| 744 |
+
return [ctx.loggamma(1+l+jeta) * (-0.5j),
|
| 745 |
+
ctx.loggamma(1+l-jeta) * (0.5j),
|
| 746 |
+
ctx.loggamma(1+l2+jeta) * (0.5j),
|
| 747 |
+
ctx.loggamma(1+l2-jeta) * (-0.5j),
|
| 748 |
+
-(l+0.5)*ctx.pi]
|
| 749 |
+
v = ctx.sum_accurately(terms, 1)
|
| 750 |
+
_cache[l,eta] = (ctx.prec, v)
|
| 751 |
+
return v
|
| 752 |
+
|
| 753 |
+
@defun_wrapped
|
| 754 |
+
def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs):
|
| 755 |
+
# Irregular Coulomb wave function
|
| 756 |
+
# Note: w can be either 1 or -1; the other may be better in some cases
|
| 757 |
+
# TODO: check that chop=True chops when and only when it should
|
| 758 |
+
if not ctx._im(l):
|
| 759 |
+
l = ctx._re(l) # XXX: for isint
|
| 760 |
+
def h(l, eta):
|
| 761 |
+
# Force perturbation for integers and half-integers
|
| 762 |
+
if ctx.isint(l*2):
|
| 763 |
+
T1 = [0], [-1], [], [], [], [], 0
|
| 764 |
+
return (T1,)
|
| 765 |
+
l2 = -l-1
|
| 766 |
+
try:
|
| 767 |
+
chi = ctx._coulomb_chi(l, eta)
|
| 768 |
+
jw = ctx.j*w
|
| 769 |
+
s = ctx.sin(chi); c = ctx.cos(chi)
|
| 770 |
+
C1 = ctx.coulombc(l,eta)
|
| 771 |
+
C2 = ctx.coulombc(l2,eta)
|
| 772 |
+
u = ctx.exp(jw*z)
|
| 773 |
+
x = -2*jw*z
|
| 774 |
+
T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \
|
| 775 |
+
[1+l+jw*eta], [2*l+2], x
|
| 776 |
+
T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \
|
| 777 |
+
[1+l2+jw*eta], [2*l2+2], x
|
| 778 |
+
return T1, T2
|
| 779 |
+
except ValueError:
|
| 780 |
+
T1 = [0], [-1], [], [], [], [], 0
|
| 781 |
+
return (T1,)
|
| 782 |
+
v = ctx.hypercomb(h, [l,eta], **kwargs)
|
| 783 |
+
if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \
|
| 784 |
+
(ctx._re(z) >= 0):
|
| 785 |
+
v = ctx._re(v)
|
| 786 |
+
return v
|
| 787 |
+
|
| 788 |
+
def mcmahon(ctx,kind,prime,v,m):
|
| 789 |
+
"""
|
| 790 |
+
Computes an estimate for the location of the Bessel function zero
|
| 791 |
+
j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic
|
| 792 |
+
expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)).
|
| 793 |
+
|
| 794 |
+
Returns (r,err) where r is the estimated location of the root
|
| 795 |
+
and err is a positive number estimating the error of the
|
| 796 |
+
asymptotic expansion.
|
| 797 |
+
"""
|
| 798 |
+
u = 4*v**2
|
| 799 |
+
if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4
|
| 800 |
+
if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4
|
| 801 |
+
if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4
|
| 802 |
+
if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4
|
| 803 |
+
if not prime:
|
| 804 |
+
s1 = b
|
| 805 |
+
s2 = -(u-1)/(8*b)
|
| 806 |
+
s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3)
|
| 807 |
+
s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5)
|
| 808 |
+
s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7)
|
| 809 |
+
if prime:
|
| 810 |
+
s1 = b
|
| 811 |
+
s2 = -(u+3)/(8*b)
|
| 812 |
+
s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3)
|
| 813 |
+
s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5)
|
| 814 |
+
s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7)
|
| 815 |
+
terms = [s1,s2,s3,s4,s5]
|
| 816 |
+
s = s1
|
| 817 |
+
err = 0.0
|
| 818 |
+
for i in range(1,len(terms)):
|
| 819 |
+
if abs(terms[i]) < abs(terms[i-1]):
|
| 820 |
+
s += terms[i]
|
| 821 |
+
else:
|
| 822 |
+
err = abs(terms[i])
|
| 823 |
+
if i == len(terms)-1:
|
| 824 |
+
err = abs(terms[-1])
|
| 825 |
+
return s, err
|
| 826 |
+
|
| 827 |
+
def generalized_bisection(ctx,f,a,b,n):
|
| 828 |
+
"""
|
| 829 |
+
Given f known to have exactly n simple roots within [a,b],
|
| 830 |
+
return a list of n intervals isolating the roots
|
| 831 |
+
and having opposite signs at the endpoints.
|
| 832 |
+
|
| 833 |
+
TODO: this can be optimized, e.g. by reusing evaluation points.
|
| 834 |
+
"""
|
| 835 |
+
if n < 1:
|
| 836 |
+
raise ValueError("n cannot be less than 1")
|
| 837 |
+
N = n+1
|
| 838 |
+
points = []
|
| 839 |
+
signs = []
|
| 840 |
+
while 1:
|
| 841 |
+
points = ctx.linspace(a,b,N)
|
| 842 |
+
signs = [ctx.sign(f(x)) for x in points]
|
| 843 |
+
ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \
|
| 844 |
+
if signs[i]*signs[i+1] == -1]
|
| 845 |
+
if len(ok_intervals) == n:
|
| 846 |
+
return ok_intervals
|
| 847 |
+
N = N*2
|
| 848 |
+
|
| 849 |
+
def find_in_interval(ctx, f, ab):
|
| 850 |
+
return ctx.findroot(f, ab, solver='illinois', verify=False)
|
| 851 |
+
|
| 852 |
+
def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}):
|
| 853 |
+
prec = ctx.prec
|
| 854 |
+
workprec = max(prec, ctx.mag(v), ctx.mag(m))+10
|
| 855 |
+
try:
|
| 856 |
+
ctx.prec = workprec
|
| 857 |
+
v = ctx.mpf(v)
|
| 858 |
+
m = int(m)
|
| 859 |
+
prime = int(prime)
|
| 860 |
+
if v < 0:
|
| 861 |
+
raise ValueError("v cannot be negative")
|
| 862 |
+
if m < 1:
|
| 863 |
+
raise ValueError("m cannot be less than 1")
|
| 864 |
+
if not prime in (0,1):
|
| 865 |
+
raise ValueError("prime should lie between 0 and 1")
|
| 866 |
+
if kind == 1:
|
| 867 |
+
if prime: f = lambda x: ctx.besselj(v,x,derivative=1)
|
| 868 |
+
else: f = lambda x: ctx.besselj(v,x)
|
| 869 |
+
if kind == 2:
|
| 870 |
+
if prime: f = lambda x: ctx.bessely(v,x,derivative=1)
|
| 871 |
+
else: f = lambda x: ctx.bessely(v,x)
|
| 872 |
+
# The first root of J' is very close to 0 for small
|
| 873 |
+
# orders, and this needs to be special-cased
|
| 874 |
+
if kind == 1 and prime and m == 1:
|
| 875 |
+
if v == 0:
|
| 876 |
+
return ctx.zero
|
| 877 |
+
if v <= 1:
|
| 878 |
+
# TODO: use v <= j'_{v,1} < y_{v,1}?
|
| 879 |
+
r = 2*ctx.sqrt(v*(1+v)/(v+2))
|
| 880 |
+
return find_in_interval(ctx, f, (r/10, 2*r))
|
| 881 |
+
if (kind,prime,v,m) in _interval_cache:
|
| 882 |
+
return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m])
|
| 883 |
+
r, err = mcmahon(ctx, kind, prime, v, m)
|
| 884 |
+
if err < isoltol:
|
| 885 |
+
return find_in_interval(ctx, f, (r-isoltol, r+isoltol))
|
| 886 |
+
# An x such that 0 < x < r_{v,1}
|
| 887 |
+
if kind == 1 and not prime: low = 2.4
|
| 888 |
+
if kind == 1 and prime: low = 1.8
|
| 889 |
+
if kind == 2 and not prime: low = 0.8
|
| 890 |
+
if kind == 2 and prime: low = 2.0
|
| 891 |
+
n = m+1
|
| 892 |
+
while 1:
|
| 893 |
+
r1, err = mcmahon(ctx, kind, prime, v, n)
|
| 894 |
+
if err < isoltol:
|
| 895 |
+
r2, err2 = mcmahon(ctx, kind, prime, v, n+1)
|
| 896 |
+
intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n)
|
| 897 |
+
for k, ab in enumerate(intervals):
|
| 898 |
+
_interval_cache[kind,prime,v,k+1] = ab
|
| 899 |
+
return find_in_interval(ctx, f, intervals[m-1])
|
| 900 |
+
else:
|
| 901 |
+
n = n*2
|
| 902 |
+
finally:
|
| 903 |
+
ctx.prec = prec
|
| 904 |
+
|
| 905 |
+
@defun
|
| 906 |
+
def besseljzero(ctx, v, m, derivative=0):
|
| 907 |
+
r"""
|
| 908 |
+
For a real order `\nu \ge 0` and a positive integer `m`, returns
|
| 909 |
+
`j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
|
| 910 |
+
first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively,
|
| 911 |
+
with *derivative=1*, gives the first nonnegative simple zero
|
| 912 |
+
`j'_{\nu,m}` of `J'_{\nu}(z)`.
|
| 913 |
+
|
| 914 |
+
The indexing convention is that used by Abramowitz & Stegun
|
| 915 |
+
and the DLMF. Note the special case `j'_{0,1} = 0`, while all other
|
| 916 |
+
zeros are positive. In effect, only simple zeros are counted
|
| 917 |
+
(all zeros of Bessel functions are simple except possibly `z = 0`)
|
| 918 |
+
and `j_{\nu,m}` becomes a monotonic function of both `\nu`
|
| 919 |
+
and `m`.
|
| 920 |
+
|
| 921 |
+
The zeros are interlaced according to the inequalities
|
| 922 |
+
|
| 923 |
+
.. math ::
|
| 924 |
+
|
| 925 |
+
j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1}
|
| 926 |
+
|
| 927 |
+
j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots
|
| 928 |
+
|
| 929 |
+
**Examples**
|
| 930 |
+
|
| 931 |
+
Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`::
|
| 932 |
+
|
| 933 |
+
>>> from mpmath import *
|
| 934 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 935 |
+
>>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3)
|
| 936 |
+
2.404825557695772768621632
|
| 937 |
+
5.520078110286310649596604
|
| 938 |
+
8.653727912911012216954199
|
| 939 |
+
>>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3)
|
| 940 |
+
3.831705970207512315614436
|
| 941 |
+
7.01558666981561875353705
|
| 942 |
+
10.17346813506272207718571
|
| 943 |
+
>>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3)
|
| 944 |
+
5.135622301840682556301402
|
| 945 |
+
8.417244140399864857783614
|
| 946 |
+
11.61984117214905942709415
|
| 947 |
+
|
| 948 |
+
Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`::
|
| 949 |
+
|
| 950 |
+
0.0
|
| 951 |
+
3.831705970207512315614436
|
| 952 |
+
7.01558666981561875353705
|
| 953 |
+
>>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1)
|
| 954 |
+
1.84118378134065930264363
|
| 955 |
+
5.331442773525032636884016
|
| 956 |
+
8.536316366346285834358961
|
| 957 |
+
>>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1)
|
| 958 |
+
3.054236928227140322755932
|
| 959 |
+
6.706133194158459146634394
|
| 960 |
+
9.969467823087595793179143
|
| 961 |
+
|
| 962 |
+
Zeros with large index::
|
| 963 |
+
|
| 964 |
+
>>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000)
|
| 965 |
+
313.3742660775278447196902
|
| 966 |
+
3140.807295225078628895545
|
| 967 |
+
31415.14114171350798533666
|
| 968 |
+
>>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000)
|
| 969 |
+
321.1893195676003157339222
|
| 970 |
+
3148.657306813047523500494
|
| 971 |
+
31422.9947255486291798943
|
| 972 |
+
>>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1)
|
| 973 |
+
311.8018681873704508125112
|
| 974 |
+
3139.236339643802482833973
|
| 975 |
+
31413.57032947022399485808
|
| 976 |
+
|
| 977 |
+
Zeros of functions with large order::
|
| 978 |
+
|
| 979 |
+
>>> besseljzero(50,1)
|
| 980 |
+
57.11689916011917411936228
|
| 981 |
+
>>> besseljzero(50,2)
|
| 982 |
+
62.80769876483536093435393
|
| 983 |
+
>>> besseljzero(50,100)
|
| 984 |
+
388.6936600656058834640981
|
| 985 |
+
>>> besseljzero(50,1,1)
|
| 986 |
+
52.99764038731665010944037
|
| 987 |
+
>>> besseljzero(50,2,1)
|
| 988 |
+
60.02631933279942589882363
|
| 989 |
+
>>> besseljzero(50,100,1)
|
| 990 |
+
387.1083151608726181086283
|
| 991 |
+
|
| 992 |
+
Zeros of functions with fractional order::
|
| 993 |
+
|
| 994 |
+
>>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4)
|
| 995 |
+
3.141592653589793238462643
|
| 996 |
+
4.493409457909064175307881
|
| 997 |
+
15.15657692957458622921634
|
| 998 |
+
|
| 999 |
+
Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite
|
| 1000 |
+
products over their zeros::
|
| 1001 |
+
|
| 1002 |
+
>>> v,z = 2, mpf(1)
|
| 1003 |
+
>>> (z/2)**v/gamma(v+1) * \
|
| 1004 |
+
... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf])
|
| 1005 |
+
...
|
| 1006 |
+
0.1149034849319004804696469
|
| 1007 |
+
>>> besselj(v,z)
|
| 1008 |
+
0.1149034849319004804696469
|
| 1009 |
+
>>> (z/2)**(v-1)/2/gamma(v) * \
|
| 1010 |
+
... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf])
|
| 1011 |
+
...
|
| 1012 |
+
0.2102436158811325550203884
|
| 1013 |
+
>>> besselj(v,z,1)
|
| 1014 |
+
0.2102436158811325550203884
|
| 1015 |
+
|
| 1016 |
+
"""
|
| 1017 |
+
return +bessel_zero(ctx, 1, derivative, v, m)
|
| 1018 |
+
|
| 1019 |
+
@defun
|
| 1020 |
+
def besselyzero(ctx, v, m, derivative=0):
|
| 1021 |
+
r"""
|
| 1022 |
+
For a real order `\nu \ge 0` and a positive integer `m`, returns
|
| 1023 |
+
`y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the
|
| 1024 |
+
second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively,
|
| 1025 |
+
with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of
|
| 1026 |
+
`Y'_{\nu}(z)`.
|
| 1027 |
+
|
| 1028 |
+
The zeros are interlaced according to the inequalities
|
| 1029 |
+
|
| 1030 |
+
.. math ::
|
| 1031 |
+
|
| 1032 |
+
y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1}
|
| 1033 |
+
|
| 1034 |
+
y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots
|
| 1035 |
+
|
| 1036 |
+
**Examples**
|
| 1037 |
+
|
| 1038 |
+
Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`::
|
| 1039 |
+
|
| 1040 |
+
>>> from mpmath import *
|
| 1041 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1042 |
+
>>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3)
|
| 1043 |
+
0.8935769662791675215848871
|
| 1044 |
+
3.957678419314857868375677
|
| 1045 |
+
7.086051060301772697623625
|
| 1046 |
+
>>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3)
|
| 1047 |
+
2.197141326031017035149034
|
| 1048 |
+
5.429681040794135132772005
|
| 1049 |
+
8.596005868331168926429606
|
| 1050 |
+
>>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3)
|
| 1051 |
+
3.384241767149593472701426
|
| 1052 |
+
6.793807513268267538291167
|
| 1053 |
+
10.02347797936003797850539
|
| 1054 |
+
|
| 1055 |
+
Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`::
|
| 1056 |
+
|
| 1057 |
+
>>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1)
|
| 1058 |
+
2.197141326031017035149034
|
| 1059 |
+
5.429681040794135132772005
|
| 1060 |
+
8.596005868331168926429606
|
| 1061 |
+
>>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1)
|
| 1062 |
+
3.683022856585177699898967
|
| 1063 |
+
6.941499953654175655751944
|
| 1064 |
+
10.12340465543661307978775
|
| 1065 |
+
>>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1)
|
| 1066 |
+
5.002582931446063945200176
|
| 1067 |
+
8.350724701413079526349714
|
| 1068 |
+
11.57419546521764654624265
|
| 1069 |
+
|
| 1070 |
+
Zeros with large index::
|
| 1071 |
+
|
| 1072 |
+
>>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000)
|
| 1073 |
+
311.8034717601871549333419
|
| 1074 |
+
3139.236498918198006794026
|
| 1075 |
+
31413.57034538691205229188
|
| 1076 |
+
>>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000)
|
| 1077 |
+
319.6183338562782156235062
|
| 1078 |
+
3147.086508524556404473186
|
| 1079 |
+
31421.42392920214673402828
|
| 1080 |
+
>>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1)
|
| 1081 |
+
313.3726705426359345050449
|
| 1082 |
+
3140.807136030340213610065
|
| 1083 |
+
31415.14112579761578220175
|
| 1084 |
+
|
| 1085 |
+
Zeros of functions with large order::
|
| 1086 |
+
|
| 1087 |
+
>>> besselyzero(50,1)
|
| 1088 |
+
53.50285882040036394680237
|
| 1089 |
+
>>> besselyzero(50,2)
|
| 1090 |
+
60.11244442774058114686022
|
| 1091 |
+
>>> besselyzero(50,100)
|
| 1092 |
+
387.1096509824943957706835
|
| 1093 |
+
>>> besselyzero(50,1,1)
|
| 1094 |
+
56.96290427516751320063605
|
| 1095 |
+
>>> besselyzero(50,2,1)
|
| 1096 |
+
62.74888166945933944036623
|
| 1097 |
+
>>> besselyzero(50,100,1)
|
| 1098 |
+
388.6923300548309258355475
|
| 1099 |
+
|
| 1100 |
+
Zeros of functions with fractional order::
|
| 1101 |
+
|
| 1102 |
+
>>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4)
|
| 1103 |
+
1.570796326794896619231322
|
| 1104 |
+
2.798386045783887136720249
|
| 1105 |
+
13.56721208770735123376018
|
| 1106 |
+
|
| 1107 |
+
"""
|
| 1108 |
+
return +bessel_zero(ctx, 2, derivative, v, m)
|
vllm/lib/python3.10/site-packages/mpmath/functions/elliptic.py
ADDED
|
@@ -0,0 +1,1431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""
|
| 2 |
+
Elliptic functions historically comprise the elliptic integrals
|
| 3 |
+
and their inverses, and originate from the problem of computing the
|
| 4 |
+
arc length of an ellipse. From a more modern point of view,
|
| 5 |
+
an elliptic function is defined as a doubly periodic function, i.e.
|
| 6 |
+
a function which satisfies
|
| 7 |
+
|
| 8 |
+
.. math ::
|
| 9 |
+
|
| 10 |
+
f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z)
|
| 11 |
+
|
| 12 |
+
for some half-periods `\omega_1, \omega_2` with
|
| 13 |
+
`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic
|
| 14 |
+
functions are the Jacobi elliptic functions. More broadly, this section
|
| 15 |
+
includes quasi-doubly periodic functions (such as the Jacobi theta
|
| 16 |
+
functions) and other functions useful in the study of elliptic functions.
|
| 17 |
+
|
| 18 |
+
Many different conventions for the arguments of
|
| 19 |
+
elliptic functions are in use. It is even standard to use
|
| 20 |
+
different parameterizations for different functions in the same
|
| 21 |
+
text or software (and mpmath is no exception).
|
| 22 |
+
The usual parameters are the elliptic nome `q`, which usually
|
| 23 |
+
must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary
|
| 24 |
+
complex number); the elliptic modulus `k` (an arbitrary complex
|
| 25 |
+
number); and the half-period ratio `\tau`, which usually must
|
| 26 |
+
satisfy `\mathrm{Im}[\tau] > 0`.
|
| 27 |
+
These quantities can be expressed in terms of each other
|
| 28 |
+
using the following relations:
|
| 29 |
+
|
| 30 |
+
.. math ::
|
| 31 |
+
|
| 32 |
+
m = k^2
|
| 33 |
+
|
| 34 |
+
.. math ::
|
| 35 |
+
|
| 36 |
+
\tau = i \frac{K(1-m)}{K(m)}
|
| 37 |
+
|
| 38 |
+
.. math ::
|
| 39 |
+
|
| 40 |
+
q = e^{i \pi \tau}
|
| 41 |
+
|
| 42 |
+
.. math ::
|
| 43 |
+
|
| 44 |
+
k = \frac{\vartheta_2^2(q)}{\vartheta_3^2(q)}
|
| 45 |
+
|
| 46 |
+
In addition, an alternative definition is used for the nome in
|
| 47 |
+
number theory, which we here denote by q-bar:
|
| 48 |
+
|
| 49 |
+
.. math ::
|
| 50 |
+
|
| 51 |
+
\bar{q} = q^2 = e^{2 i \pi \tau}
|
| 52 |
+
|
| 53 |
+
For convenience, mpmath provides functions to convert
|
| 54 |
+
between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`,
|
| 55 |
+
:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`).
|
| 56 |
+
|
| 57 |
+
**References**
|
| 58 |
+
|
| 59 |
+
1. [AbramowitzStegun]_
|
| 60 |
+
|
| 61 |
+
2. [WhittakerWatson]_
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
from .functions import defun, defun_wrapped
|
| 66 |
+
|
| 67 |
+
@defun_wrapped
|
| 68 |
+
def eta(ctx, tau):
|
| 69 |
+
r"""
|
| 70 |
+
Returns the Dedekind eta function of tau in the upper half-plane.
|
| 71 |
+
|
| 72 |
+
>>> from mpmath import *
|
| 73 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 74 |
+
>>> eta(1j); gamma(0.25) / (2*pi**0.75)
|
| 75 |
+
(0.7682254223260566590025942 + 0.0j)
|
| 76 |
+
0.7682254223260566590025942
|
| 77 |
+
>>> tau = sqrt(2) + sqrt(5)*1j
|
| 78 |
+
>>> eta(-1/tau); sqrt(-1j*tau) * eta(tau)
|
| 79 |
+
(0.9022859908439376463573294 + 0.07985093673948098408048575j)
|
| 80 |
+
(0.9022859908439376463573295 + 0.07985093673948098408048575j)
|
| 81 |
+
>>> eta(tau+1); exp(pi*1j/12) * eta(tau)
|
| 82 |
+
(0.4493066139717553786223114 + 0.3290014793877986663915939j)
|
| 83 |
+
(0.4493066139717553786223114 + 0.3290014793877986663915939j)
|
| 84 |
+
>>> f = lambda z: diff(eta, z) / eta(z)
|
| 85 |
+
>>> chop(36*diff(f,tau)**2 - 24*diff(f,tau,2)*f(tau) + diff(f,tau,3))
|
| 86 |
+
0.0
|
| 87 |
+
|
| 88 |
+
"""
|
| 89 |
+
if ctx.im(tau) <= 0.0:
|
| 90 |
+
raise ValueError("eta is only defined in the upper half-plane")
|
| 91 |
+
q = ctx.expjpi(tau/12)
|
| 92 |
+
return q * ctx.qp(q**24)
|
| 93 |
+
|
| 94 |
+
def nome(ctx, m):
|
| 95 |
+
m = ctx.convert(m)
|
| 96 |
+
if not m:
|
| 97 |
+
return m
|
| 98 |
+
if m == ctx.one:
|
| 99 |
+
return m
|
| 100 |
+
if ctx.isnan(m):
|
| 101 |
+
return m
|
| 102 |
+
if ctx.isinf(m):
|
| 103 |
+
if m == ctx.ninf:
|
| 104 |
+
return type(m)(-1)
|
| 105 |
+
else:
|
| 106 |
+
return ctx.mpc(-1)
|
| 107 |
+
a = ctx.ellipk(ctx.one-m)
|
| 108 |
+
b = ctx.ellipk(m)
|
| 109 |
+
v = ctx.exp(-ctx.pi*a/b)
|
| 110 |
+
if not ctx._im(m) and ctx._re(m) < 1:
|
| 111 |
+
if ctx._is_real_type(m):
|
| 112 |
+
return v.real
|
| 113 |
+
else:
|
| 114 |
+
return v.real + 0j
|
| 115 |
+
elif m == 2:
|
| 116 |
+
v = ctx.mpc(0, v.imag)
|
| 117 |
+
return v
|
| 118 |
+
|
| 119 |
+
@defun_wrapped
|
| 120 |
+
def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 121 |
+
r"""
|
| 122 |
+
Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`::
|
| 123 |
+
|
| 124 |
+
>>> from mpmath import *
|
| 125 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 126 |
+
>>> qfrom(q=0.25)
|
| 127 |
+
0.25
|
| 128 |
+
>>> qfrom(m=mfrom(q=0.25))
|
| 129 |
+
0.25
|
| 130 |
+
>>> qfrom(k=kfrom(q=0.25))
|
| 131 |
+
0.25
|
| 132 |
+
>>> qfrom(tau=taufrom(q=0.25))
|
| 133 |
+
(0.25 + 0.0j)
|
| 134 |
+
>>> qfrom(qbar=qbarfrom(q=0.25))
|
| 135 |
+
0.25
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
if q is not None:
|
| 139 |
+
return ctx.convert(q)
|
| 140 |
+
if m is not None:
|
| 141 |
+
return nome(ctx, m)
|
| 142 |
+
if k is not None:
|
| 143 |
+
return nome(ctx, ctx.convert(k)**2)
|
| 144 |
+
if tau is not None:
|
| 145 |
+
return ctx.expjpi(tau)
|
| 146 |
+
if qbar is not None:
|
| 147 |
+
return ctx.sqrt(qbar)
|
| 148 |
+
|
| 149 |
+
@defun_wrapped
|
| 150 |
+
def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 151 |
+
r"""
|
| 152 |
+
Returns the number-theoretic nome `\bar q`, given any of
|
| 153 |
+
`q, m, k, \tau, \bar{q}`::
|
| 154 |
+
|
| 155 |
+
>>> from mpmath import *
|
| 156 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 157 |
+
>>> qbarfrom(qbar=0.25)
|
| 158 |
+
0.25
|
| 159 |
+
>>> qbarfrom(q=qfrom(qbar=0.25))
|
| 160 |
+
0.25
|
| 161 |
+
>>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned
|
| 162 |
+
0.25
|
| 163 |
+
>>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned
|
| 164 |
+
0.25
|
| 165 |
+
>>> qbarfrom(tau=taufrom(qbar=0.25))
|
| 166 |
+
(0.25 + 0.0j)
|
| 167 |
+
|
| 168 |
+
"""
|
| 169 |
+
if qbar is not None:
|
| 170 |
+
return ctx.convert(qbar)
|
| 171 |
+
if q is not None:
|
| 172 |
+
return ctx.convert(q) ** 2
|
| 173 |
+
if m is not None:
|
| 174 |
+
return nome(ctx, m) ** 2
|
| 175 |
+
if k is not None:
|
| 176 |
+
return nome(ctx, ctx.convert(k)**2) ** 2
|
| 177 |
+
if tau is not None:
|
| 178 |
+
return ctx.expjpi(2*tau)
|
| 179 |
+
|
| 180 |
+
@defun_wrapped
|
| 181 |
+
def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 182 |
+
r"""
|
| 183 |
+
Returns the elliptic half-period ratio `\tau`, given any of
|
| 184 |
+
`q, m, k, \tau, \bar{q}`::
|
| 185 |
+
|
| 186 |
+
>>> from mpmath import *
|
| 187 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 188 |
+
>>> taufrom(tau=0.5j)
|
| 189 |
+
(0.0 + 0.5j)
|
| 190 |
+
>>> taufrom(q=qfrom(tau=0.5j))
|
| 191 |
+
(0.0 + 0.5j)
|
| 192 |
+
>>> taufrom(m=mfrom(tau=0.5j))
|
| 193 |
+
(0.0 + 0.5j)
|
| 194 |
+
>>> taufrom(k=kfrom(tau=0.5j))
|
| 195 |
+
(0.0 + 0.5j)
|
| 196 |
+
>>> taufrom(qbar=qbarfrom(tau=0.5j))
|
| 197 |
+
(0.0 + 0.5j)
|
| 198 |
+
|
| 199 |
+
"""
|
| 200 |
+
if tau is not None:
|
| 201 |
+
return ctx.convert(tau)
|
| 202 |
+
if m is not None:
|
| 203 |
+
m = ctx.convert(m)
|
| 204 |
+
return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m)
|
| 205 |
+
if k is not None:
|
| 206 |
+
k = ctx.convert(k)
|
| 207 |
+
return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2)
|
| 208 |
+
if q is not None:
|
| 209 |
+
return ctx.log(q) / (ctx.pi*ctx.j)
|
| 210 |
+
if qbar is not None:
|
| 211 |
+
qbar = ctx.convert(qbar)
|
| 212 |
+
return ctx.log(qbar) / (2*ctx.pi*ctx.j)
|
| 213 |
+
|
| 214 |
+
@defun_wrapped
|
| 215 |
+
def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 216 |
+
r"""
|
| 217 |
+
Returns the elliptic modulus `k`, given any of
|
| 218 |
+
`q, m, k, \tau, \bar{q}`::
|
| 219 |
+
|
| 220 |
+
>>> from mpmath import *
|
| 221 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 222 |
+
>>> kfrom(k=0.25)
|
| 223 |
+
0.25
|
| 224 |
+
>>> kfrom(m=mfrom(k=0.25))
|
| 225 |
+
0.25
|
| 226 |
+
>>> kfrom(q=qfrom(k=0.25))
|
| 227 |
+
0.25
|
| 228 |
+
>>> kfrom(tau=taufrom(k=0.25))
|
| 229 |
+
(0.25 + 0.0j)
|
| 230 |
+
>>> kfrom(qbar=qbarfrom(k=0.25))
|
| 231 |
+
0.25
|
| 232 |
+
|
| 233 |
+
As `q \to 1` and `q \to -1`, `k` rapidly approaches
|
| 234 |
+
`1` and `i \infty` respectively::
|
| 235 |
+
|
| 236 |
+
>>> kfrom(q=0.75)
|
| 237 |
+
0.9999999999999899166471767
|
| 238 |
+
>>> kfrom(q=-0.75)
|
| 239 |
+
(0.0 + 7041781.096692038332790615j)
|
| 240 |
+
>>> kfrom(q=1)
|
| 241 |
+
1
|
| 242 |
+
>>> kfrom(q=-1)
|
| 243 |
+
(0.0 + +infj)
|
| 244 |
+
"""
|
| 245 |
+
if k is not None:
|
| 246 |
+
return ctx.convert(k)
|
| 247 |
+
if m is not None:
|
| 248 |
+
return ctx.sqrt(m)
|
| 249 |
+
if tau is not None:
|
| 250 |
+
q = ctx.expjpi(tau)
|
| 251 |
+
if qbar is not None:
|
| 252 |
+
q = ctx.sqrt(qbar)
|
| 253 |
+
if q == 1:
|
| 254 |
+
return q
|
| 255 |
+
if q == -1:
|
| 256 |
+
return ctx.mpc(0,'inf')
|
| 257 |
+
return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2
|
| 258 |
+
|
| 259 |
+
@defun_wrapped
|
| 260 |
+
def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 261 |
+
r"""
|
| 262 |
+
Returns the elliptic parameter `m`, given any of
|
| 263 |
+
`q, m, k, \tau, \bar{q}`::
|
| 264 |
+
|
| 265 |
+
>>> from mpmath import *
|
| 266 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 267 |
+
>>> mfrom(m=0.25)
|
| 268 |
+
0.25
|
| 269 |
+
>>> mfrom(q=qfrom(m=0.25))
|
| 270 |
+
0.25
|
| 271 |
+
>>> mfrom(k=kfrom(m=0.25))
|
| 272 |
+
0.25
|
| 273 |
+
>>> mfrom(tau=taufrom(m=0.25))
|
| 274 |
+
(0.25 + 0.0j)
|
| 275 |
+
>>> mfrom(qbar=qbarfrom(m=0.25))
|
| 276 |
+
0.25
|
| 277 |
+
|
| 278 |
+
As `q \to 1` and `q \to -1`, `m` rapidly approaches
|
| 279 |
+
`1` and `-\infty` respectively::
|
| 280 |
+
|
| 281 |
+
>>> mfrom(q=0.75)
|
| 282 |
+
0.9999999999999798332943533
|
| 283 |
+
>>> mfrom(q=-0.75)
|
| 284 |
+
-49586681013729.32611558353
|
| 285 |
+
>>> mfrom(q=1)
|
| 286 |
+
1.0
|
| 287 |
+
>>> mfrom(q=-1)
|
| 288 |
+
-inf
|
| 289 |
+
|
| 290 |
+
The inverse nome as a function of `q` has an integer
|
| 291 |
+
Taylor series expansion::
|
| 292 |
+
|
| 293 |
+
>>> taylor(lambda q: mfrom(q), 0, 7)
|
| 294 |
+
[0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0]
|
| 295 |
+
|
| 296 |
+
"""
|
| 297 |
+
if m is not None:
|
| 298 |
+
return m
|
| 299 |
+
if k is not None:
|
| 300 |
+
return k**2
|
| 301 |
+
if tau is not None:
|
| 302 |
+
q = ctx.expjpi(tau)
|
| 303 |
+
if qbar is not None:
|
| 304 |
+
q = ctx.sqrt(qbar)
|
| 305 |
+
if q == 1:
|
| 306 |
+
return ctx.convert(q)
|
| 307 |
+
if q == -1:
|
| 308 |
+
return q*ctx.inf
|
| 309 |
+
v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4
|
| 310 |
+
if ctx._is_real_type(q) and q < 0:
|
| 311 |
+
v = v.real
|
| 312 |
+
return v
|
| 313 |
+
|
| 314 |
+
jacobi_spec = {
|
| 315 |
+
'sn' : ([3],[2],[1],[4], 'sin', 'tanh'),
|
| 316 |
+
'cn' : ([4],[2],[2],[4], 'cos', 'sech'),
|
| 317 |
+
'dn' : ([4],[3],[3],[4], '1', 'sech'),
|
| 318 |
+
'ns' : ([2],[3],[4],[1], 'csc', 'coth'),
|
| 319 |
+
'nc' : ([2],[4],[4],[2], 'sec', 'cosh'),
|
| 320 |
+
'nd' : ([3],[4],[4],[3], '1', 'cosh'),
|
| 321 |
+
'sc' : ([3],[4],[1],[2], 'tan', 'sinh'),
|
| 322 |
+
'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'),
|
| 323 |
+
'cd' : ([3],[2],[2],[3], 'cos', '1'),
|
| 324 |
+
'cs' : ([4],[3],[2],[1], 'cot', 'csch'),
|
| 325 |
+
'dc' : ([2],[3],[3],[2], 'sec', '1'),
|
| 326 |
+
'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'),
|
| 327 |
+
'cc' : None,
|
| 328 |
+
'ss' : None,
|
| 329 |
+
'nn' : None,
|
| 330 |
+
'dd' : None
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
@defun
|
| 334 |
+
def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None):
|
| 335 |
+
try:
|
| 336 |
+
S = jacobi_spec[kind]
|
| 337 |
+
except KeyError:
|
| 338 |
+
raise ValueError("First argument must be a two-character string "
|
| 339 |
+
"containing 's', 'c', 'd' or 'n', e.g.: 'sn'")
|
| 340 |
+
if u is None:
|
| 341 |
+
def f(*args, **kwargs):
|
| 342 |
+
return ctx.ellipfun(kind, *args, **kwargs)
|
| 343 |
+
f.__name__ = kind
|
| 344 |
+
return f
|
| 345 |
+
prec = ctx.prec
|
| 346 |
+
try:
|
| 347 |
+
ctx.prec += 10
|
| 348 |
+
u = ctx.convert(u)
|
| 349 |
+
q = ctx.qfrom(m=m, q=q, k=k, tau=tau)
|
| 350 |
+
if S is None:
|
| 351 |
+
v = ctx.one + 0*q*u
|
| 352 |
+
elif q == ctx.zero:
|
| 353 |
+
if S[4] == '1': v = ctx.one
|
| 354 |
+
else: v = getattr(ctx, S[4])(u)
|
| 355 |
+
v += 0*q*u
|
| 356 |
+
elif q == ctx.one:
|
| 357 |
+
if S[5] == '1': v = ctx.one
|
| 358 |
+
else: v = getattr(ctx, S[5])(u)
|
| 359 |
+
v += 0*q*u
|
| 360 |
+
else:
|
| 361 |
+
t = u / ctx.jtheta(3, 0, q)**2
|
| 362 |
+
v = ctx.one
|
| 363 |
+
for a in S[0]: v *= ctx.jtheta(a, 0, q)
|
| 364 |
+
for b in S[1]: v /= ctx.jtheta(b, 0, q)
|
| 365 |
+
for c in S[2]: v *= ctx.jtheta(c, t, q)
|
| 366 |
+
for d in S[3]: v /= ctx.jtheta(d, t, q)
|
| 367 |
+
finally:
|
| 368 |
+
ctx.prec = prec
|
| 369 |
+
return +v
|
| 370 |
+
|
| 371 |
+
@defun_wrapped
|
| 372 |
+
def kleinj(ctx, tau=None, **kwargs):
|
| 373 |
+
r"""
|
| 374 |
+
Evaluates the Klein j-invariant, which is a modular function defined for
|
| 375 |
+
`\tau` in the upper half-plane as
|
| 376 |
+
|
| 377 |
+
.. math ::
|
| 378 |
+
|
| 379 |
+
J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)}
|
| 380 |
+
|
| 381 |
+
where `g_2` and `g_3` are the modular invariants of the Weierstrass
|
| 382 |
+
elliptic function,
|
| 383 |
+
|
| 384 |
+
.. math ::
|
| 385 |
+
|
| 386 |
+
g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4}
|
| 387 |
+
|
| 388 |
+
g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}.
|
| 389 |
+
|
| 390 |
+
An alternative, common notation is that of the j-function
|
| 391 |
+
`j(\tau) = 1728 J(\tau)`.
|
| 392 |
+
|
| 393 |
+
**Plots**
|
| 394 |
+
|
| 395 |
+
.. literalinclude :: /plots/kleinj.py
|
| 396 |
+
.. image :: /plots/kleinj.png
|
| 397 |
+
.. literalinclude :: /plots/kleinj2.py
|
| 398 |
+
.. image :: /plots/kleinj2.png
|
| 399 |
+
|
| 400 |
+
**Examples**
|
| 401 |
+
|
| 402 |
+
Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`::
|
| 403 |
+
|
| 404 |
+
>>> from mpmath import *
|
| 405 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 406 |
+
>>> tau = 0.625+0.75*j
|
| 407 |
+
>>> tau = 0.625+0.75*j
|
| 408 |
+
>>> kleinj(tau)
|
| 409 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
|
| 410 |
+
>>> kleinj(tau+1)
|
| 411 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
|
| 412 |
+
>>> kleinj(-1/tau)
|
| 413 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228946j)
|
| 414 |
+
|
| 415 |
+
The j-function has a famous Laurent series expansion in terms of the nome
|
| 416 |
+
`\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`::
|
| 417 |
+
|
| 418 |
+
>>> mp.dps = 15
|
| 419 |
+
>>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True)
|
| 420 |
+
[1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0]
|
| 421 |
+
|
| 422 |
+
The j-function admits exact evaluation at special algebraic points
|
| 423 |
+
related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163::
|
| 424 |
+
|
| 425 |
+
>>> @extraprec(10)
|
| 426 |
+
... def h(n):
|
| 427 |
+
... v = (1+sqrt(n)*j)
|
| 428 |
+
... if n > 2:
|
| 429 |
+
... v *= 0.5
|
| 430 |
+
... return v
|
| 431 |
+
...
|
| 432 |
+
>>> mp.dps = 25
|
| 433 |
+
>>> for n in [1,2,3,7,11,19,43,67,163]:
|
| 434 |
+
... n, chop(1728*kleinj(h(n)))
|
| 435 |
+
...
|
| 436 |
+
(1, 1728.0)
|
| 437 |
+
(2, 8000.0)
|
| 438 |
+
(3, 0.0)
|
| 439 |
+
(7, -3375.0)
|
| 440 |
+
(11, -32768.0)
|
| 441 |
+
(19, -884736.0)
|
| 442 |
+
(43, -884736000.0)
|
| 443 |
+
(67, -147197952000.0)
|
| 444 |
+
(163, -262537412640768000.0)
|
| 445 |
+
|
| 446 |
+
Also at other special points, the j-function assumes explicit
|
| 447 |
+
algebraic values, e.g.::
|
| 448 |
+
|
| 449 |
+
>>> chop(1728*kleinj(j*sqrt(5)))
|
| 450 |
+
1264538.909475140509320227
|
| 451 |
+
>>> identify(cbrt(_)) # note: not simplified
|
| 452 |
+
'((100+sqrt(13520))/2)'
|
| 453 |
+
>>> (50+26*sqrt(5))**3
|
| 454 |
+
1264538.909475140509320227
|
| 455 |
+
|
| 456 |
+
"""
|
| 457 |
+
q = ctx.qfrom(tau=tau, **kwargs)
|
| 458 |
+
t2 = ctx.jtheta(2,0,q)
|
| 459 |
+
t3 = ctx.jtheta(3,0,q)
|
| 460 |
+
t4 = ctx.jtheta(4,0,q)
|
| 461 |
+
P = (t2**8 + t3**8 + t4**8)**3
|
| 462 |
+
Q = 54*(t2*t3*t4)**8
|
| 463 |
+
return P/Q
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def RF_calc(ctx, x, y, z, r):
|
| 467 |
+
if y == z: return RC_calc(ctx, x, y, r)
|
| 468 |
+
if x == z: return RC_calc(ctx, y, x, r)
|
| 469 |
+
if x == y: return RC_calc(ctx, z, x, r)
|
| 470 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)):
|
| 471 |
+
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z):
|
| 472 |
+
return x*y*z
|
| 473 |
+
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z):
|
| 474 |
+
return ctx.zero
|
| 475 |
+
xm,ym,zm = x,y,z
|
| 476 |
+
A0 = Am = (x+y+z)/3
|
| 477 |
+
Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z))
|
| 478 |
+
g = ctx.mpf(0.25)
|
| 479 |
+
pow4 = ctx.one
|
| 480 |
+
while 1:
|
| 481 |
+
xs = ctx.sqrt(xm)
|
| 482 |
+
ys = ctx.sqrt(ym)
|
| 483 |
+
zs = ctx.sqrt(zm)
|
| 484 |
+
lm = xs*ys + xs*zs + ys*zs
|
| 485 |
+
Am1 = (Am+lm)*g
|
| 486 |
+
xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g
|
| 487 |
+
if pow4 * Q < abs(Am):
|
| 488 |
+
break
|
| 489 |
+
Am = Am1
|
| 490 |
+
pow4 *= g
|
| 491 |
+
t = pow4/Am
|
| 492 |
+
X = (A0-x)*t
|
| 493 |
+
Y = (A0-y)*t
|
| 494 |
+
Z = -X-Y
|
| 495 |
+
E2 = X*Y-Z**2
|
| 496 |
+
E3 = X*Y*Z
|
| 497 |
+
return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240
|
| 498 |
+
|
| 499 |
+
def RC_calc(ctx, x, y, r, pv=True):
|
| 500 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y)):
|
| 501 |
+
if ctx.isinf(x) or ctx.isinf(y):
|
| 502 |
+
return 1/(x*y)
|
| 503 |
+
if y == 0:
|
| 504 |
+
return ctx.inf
|
| 505 |
+
if x == 0:
|
| 506 |
+
return ctx.pi / ctx.sqrt(y) / 2
|
| 507 |
+
raise ValueError
|
| 508 |
+
# Cauchy principal value
|
| 509 |
+
if pv and ctx._im(y) == 0 and ctx._re(y) < 0:
|
| 510 |
+
return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r)
|
| 511 |
+
if x == y:
|
| 512 |
+
return 1/ctx.sqrt(x)
|
| 513 |
+
extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x))
|
| 514 |
+
ctx.prec += extraprec
|
| 515 |
+
if ctx._is_real_type(x) and ctx._is_real_type(y):
|
| 516 |
+
x = ctx._re(x)
|
| 517 |
+
y = ctx._re(y)
|
| 518 |
+
a = ctx.sqrt(x/y)
|
| 519 |
+
if x < y:
|
| 520 |
+
b = ctx.sqrt(y-x)
|
| 521 |
+
v = ctx.acos(a)/b
|
| 522 |
+
else:
|
| 523 |
+
b = ctx.sqrt(x-y)
|
| 524 |
+
v = ctx.acosh(a)/b
|
| 525 |
+
else:
|
| 526 |
+
sx = ctx.sqrt(x)
|
| 527 |
+
sy = ctx.sqrt(y)
|
| 528 |
+
v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy)
|
| 529 |
+
ctx.prec -= extraprec
|
| 530 |
+
return v
|
| 531 |
+
|
| 532 |
+
def RJ_calc(ctx, x, y, z, p, r, integration):
|
| 533 |
+
"""
|
| 534 |
+
With integration == 0, computes RJ only using Carlson's algorithm
|
| 535 |
+
(may be wrong for some values).
|
| 536 |
+
With integration == 1, uses an initial integration to make sure
|
| 537 |
+
Carlson's algorithm is correct.
|
| 538 |
+
With integration == 2, uses only integration.
|
| 539 |
+
"""
|
| 540 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y) and \
|
| 541 |
+
ctx.isnormal(z) and ctx.isnormal(p)):
|
| 542 |
+
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p):
|
| 543 |
+
return x*y*z
|
| 544 |
+
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p):
|
| 545 |
+
return ctx.zero
|
| 546 |
+
if not p:
|
| 547 |
+
return ctx.inf
|
| 548 |
+
if (not x) + (not y) + (not z) > 1:
|
| 549 |
+
return ctx.inf
|
| 550 |
+
# Check conditions and fall back on integration for argument
|
| 551 |
+
# reduction if needed. The following conditions might be needlessly
|
| 552 |
+
# restrictive.
|
| 553 |
+
initial_integral = ctx.zero
|
| 554 |
+
if integration >= 1:
|
| 555 |
+
ok = (x.real >= 0 and y.real >= 0 and z.real >= 0 and p.real > 0)
|
| 556 |
+
if not ok:
|
| 557 |
+
if x == p or y == p or z == p:
|
| 558 |
+
ok = True
|
| 559 |
+
if not ok:
|
| 560 |
+
if p.imag != 0 or p.real >= 0:
|
| 561 |
+
if (x.imag == 0 and x.real >= 0 and ctx.conj(y) == z):
|
| 562 |
+
ok = True
|
| 563 |
+
if (y.imag == 0 and y.real >= 0 and ctx.conj(x) == z):
|
| 564 |
+
ok = True
|
| 565 |
+
if (z.imag == 0 and z.real >= 0 and ctx.conj(x) == y):
|
| 566 |
+
ok = True
|
| 567 |
+
if not ok or (integration == 2):
|
| 568 |
+
N = ctx.ceil(-min(x.real, y.real, z.real, p.real)) + 1
|
| 569 |
+
# Integrate around any singularities
|
| 570 |
+
if all((t.imag >= 0 or t.real > 0) for t in [x, y, z, p]):
|
| 571 |
+
margin = ctx.j
|
| 572 |
+
elif all((t.imag < 0 or t.real > 0) for t in [x, y, z, p]):
|
| 573 |
+
margin = -ctx.j
|
| 574 |
+
else:
|
| 575 |
+
margin = 1
|
| 576 |
+
# Go through the upper half-plane, but low enough that any
|
| 577 |
+
# parameter starting in the lower plane doesn't cross the
|
| 578 |
+
# branch cut
|
| 579 |
+
for t in [x, y, z, p]:
|
| 580 |
+
if t.imag >= 0 or t.real > 0:
|
| 581 |
+
continue
|
| 582 |
+
margin = min(margin, abs(t.imag) * 0.5)
|
| 583 |
+
margin *= ctx.j
|
| 584 |
+
N += margin
|
| 585 |
+
F = lambda t: 1/(ctx.sqrt(t+x)*ctx.sqrt(t+y)*ctx.sqrt(t+z)*(t+p))
|
| 586 |
+
if integration == 2:
|
| 587 |
+
return 1.5 * ctx.quadsubdiv(F, [0, N, ctx.inf])
|
| 588 |
+
initial_integral = 1.5 * ctx.quadsubdiv(F, [0, N])
|
| 589 |
+
x += N; y += N; z += N; p += N
|
| 590 |
+
xm,ym,zm,pm = x,y,z,p
|
| 591 |
+
A0 = Am = (x + y + z + 2*p)/5
|
| 592 |
+
delta = (p-x)*(p-y)*(p-z)
|
| 593 |
+
Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p))
|
| 594 |
+
g = ctx.mpf(0.25)
|
| 595 |
+
pow4 = ctx.one
|
| 596 |
+
S = 0
|
| 597 |
+
while 1:
|
| 598 |
+
sx = ctx.sqrt(xm)
|
| 599 |
+
sy = ctx.sqrt(ym)
|
| 600 |
+
sz = ctx.sqrt(zm)
|
| 601 |
+
sp = ctx.sqrt(pm)
|
| 602 |
+
lm = sx*sy + sx*sz + sy*sz
|
| 603 |
+
Am1 = (Am+lm)*g
|
| 604 |
+
xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g
|
| 605 |
+
dm = (sp+sx) * (sp+sy) * (sp+sz)
|
| 606 |
+
em = delta * pow4**3 / dm**2
|
| 607 |
+
if pow4 * Q < abs(Am):
|
| 608 |
+
break
|
| 609 |
+
T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm
|
| 610 |
+
S += T
|
| 611 |
+
pow4 *= g
|
| 612 |
+
Am = Am1
|
| 613 |
+
t = pow4 / Am
|
| 614 |
+
X = (A0-x)*t
|
| 615 |
+
Y = (A0-y)*t
|
| 616 |
+
Z = (A0-z)*t
|
| 617 |
+
P = (-X-Y-Z)/2
|
| 618 |
+
E2 = X*Y + X*Z + Y*Z - 3*P**2
|
| 619 |
+
E3 = X*Y*Z + 2*E2*P + 4*P**3
|
| 620 |
+
E4 = (2*X*Y*Z + E2*P + 3*P**3)*P
|
| 621 |
+
E5 = X*Y*Z*P**2
|
| 622 |
+
P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5
|
| 623 |
+
Q = 24024
|
| 624 |
+
v1 = pow4 * ctx.power(Am, -1.5) * P/Q
|
| 625 |
+
v2 = 6*S
|
| 626 |
+
return initial_integral + v1 + v2
|
| 627 |
+
|
| 628 |
+
@defun
|
| 629 |
+
def elliprf(ctx, x, y, z):
|
| 630 |
+
r"""
|
| 631 |
+
Evaluates the Carlson symmetric elliptic integral of the first kind
|
| 632 |
+
|
| 633 |
+
.. math ::
|
| 634 |
+
|
| 635 |
+
R_F(x,y,z) = \frac{1}{2}
|
| 636 |
+
\int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}}
|
| 637 |
+
|
| 638 |
+
which is defined for `x,y,z \notin (-\infty,0)`, and with
|
| 639 |
+
at most one of `x,y,z` being zero.
|
| 640 |
+
|
| 641 |
+
For real `x,y,z \ge 0`, the principal square root is taken in the integrand.
|
| 642 |
+
For complex `x,y,z`, the principal square root is taken as `t \to \infty`
|
| 643 |
+
and as `t \to 0` non-principal branches are chosen as necessary so as to
|
| 644 |
+
make the integrand continuous.
|
| 645 |
+
|
| 646 |
+
**Examples**
|
| 647 |
+
|
| 648 |
+
Some basic values and limits::
|
| 649 |
+
|
| 650 |
+
>>> from mpmath import *
|
| 651 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 652 |
+
>>> elliprf(0,1,1); pi/2
|
| 653 |
+
1.570796326794896619231322
|
| 654 |
+
1.570796326794896619231322
|
| 655 |
+
>>> elliprf(0,1,inf)
|
| 656 |
+
0.0
|
| 657 |
+
>>> elliprf(1,1,1)
|
| 658 |
+
1.0
|
| 659 |
+
>>> elliprf(2,2,2)**2
|
| 660 |
+
0.5
|
| 661 |
+
>>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0)
|
| 662 |
+
+inf
|
| 663 |
+
+inf
|
| 664 |
+
+inf
|
| 665 |
+
+inf
|
| 666 |
+
|
| 667 |
+
Representing complete elliptic integrals in terms of `R_F`::
|
| 668 |
+
|
| 669 |
+
>>> m = mpf(0.75)
|
| 670 |
+
>>> ellipk(m); elliprf(0,1-m,1)
|
| 671 |
+
2.156515647499643235438675
|
| 672 |
+
2.156515647499643235438675
|
| 673 |
+
>>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3
|
| 674 |
+
1.211056027568459524803563
|
| 675 |
+
1.211056027568459524803563
|
| 676 |
+
|
| 677 |
+
Some symmetries and argument transformations::
|
| 678 |
+
|
| 679 |
+
>>> x,y,z = 2,3,4
|
| 680 |
+
>>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x)
|
| 681 |
+
0.5840828416771517066928492
|
| 682 |
+
0.5840828416771517066928492
|
| 683 |
+
0.5840828416771517066928492
|
| 684 |
+
>>> k = mpf(100000)
|
| 685 |
+
>>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z)
|
| 686 |
+
0.001847032121923321253219284
|
| 687 |
+
0.001847032121923321253219284
|
| 688 |
+
>>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x)
|
| 689 |
+
>>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l)
|
| 690 |
+
0.5840828416771517066928492
|
| 691 |
+
0.5840828416771517066928492
|
| 692 |
+
>>> elliprf((x+l)/4,(y+l)/4,(z+l)/4)
|
| 693 |
+
0.5840828416771517066928492
|
| 694 |
+
|
| 695 |
+
Comparing with numerical integration::
|
| 696 |
+
|
| 697 |
+
>>> x,y,z = 2,3,4
|
| 698 |
+
>>> elliprf(x,y,z)
|
| 699 |
+
0.5840828416771517066928492
|
| 700 |
+
>>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5)
|
| 701 |
+
>>> q = extradps(25)(quad)
|
| 702 |
+
>>> q(f, [0,inf])
|
| 703 |
+
0.5840828416771517066928492
|
| 704 |
+
|
| 705 |
+
With the following arguments, the square root in the integrand becomes
|
| 706 |
+
discontinuous at `t = 1/2` if the principal branch is used. To obtain
|
| 707 |
+
the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}`
|
| 708 |
+
on `t \in (0, 1/2)`::
|
| 709 |
+
|
| 710 |
+
>>> x,y,z = j-1,j,0
|
| 711 |
+
>>> elliprf(x,y,z)
|
| 712 |
+
(0.7961258658423391329305694 - 1.213856669836495986430094j)
|
| 713 |
+
>>> -q(f, [0,0.5]) + q(f, [0.5,inf])
|
| 714 |
+
(0.7961258658423391329305694 - 1.213856669836495986430094j)
|
| 715 |
+
|
| 716 |
+
The so-called *first lemniscate constant*, a transcendental number::
|
| 717 |
+
|
| 718 |
+
>>> elliprf(0,1,2)
|
| 719 |
+
1.31102877714605990523242
|
| 720 |
+
>>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1])
|
| 721 |
+
1.31102877714605990523242
|
| 722 |
+
>>> gamma('1/4')**2/(4*sqrt(2*pi))
|
| 723 |
+
1.31102877714605990523242
|
| 724 |
+
|
| 725 |
+
**References**
|
| 726 |
+
|
| 727 |
+
1. [Carlson]_
|
| 728 |
+
2. [DLMF]_ Chapter 19. Elliptic Integrals
|
| 729 |
+
|
| 730 |
+
"""
|
| 731 |
+
x = ctx.convert(x)
|
| 732 |
+
y = ctx.convert(y)
|
| 733 |
+
z = ctx.convert(z)
|
| 734 |
+
prec = ctx.prec
|
| 735 |
+
try:
|
| 736 |
+
ctx.prec += 20
|
| 737 |
+
tol = ctx.eps * 2**10
|
| 738 |
+
v = RF_calc(ctx, x, y, z, tol)
|
| 739 |
+
finally:
|
| 740 |
+
ctx.prec = prec
|
| 741 |
+
return +v
|
| 742 |
+
|
| 743 |
+
@defun
|
| 744 |
+
def elliprc(ctx, x, y, pv=True):
|
| 745 |
+
r"""
|
| 746 |
+
Evaluates the degenerate Carlson symmetric elliptic integral
|
| 747 |
+
of the first kind
|
| 748 |
+
|
| 749 |
+
.. math ::
|
| 750 |
+
|
| 751 |
+
R_C(x,y) = R_F(x,y,y) =
|
| 752 |
+
\frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}.
|
| 753 |
+
|
| 754 |
+
If `y \in (-\infty,0)`, either a value defined by continuity,
|
| 755 |
+
or with *pv=True* the Cauchy principal value, can be computed.
|
| 756 |
+
|
| 757 |
+
If `x \ge 0, y > 0`, the value can be expressed in terms of
|
| 758 |
+
elementary functions as
|
| 759 |
+
|
| 760 |
+
.. math ::
|
| 761 |
+
|
| 762 |
+
R_C(x,y) =
|
| 763 |
+
\begin{cases}
|
| 764 |
+
\dfrac{1}{\sqrt{y-x}}
|
| 765 |
+
\cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\
|
| 766 |
+
\dfrac{1}{\sqrt{y}}, & x = y \\
|
| 767 |
+
\dfrac{1}{\sqrt{x-y}}
|
| 768 |
+
\cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\
|
| 769 |
+
\end{cases}.
|
| 770 |
+
|
| 771 |
+
**Examples**
|
| 772 |
+
|
| 773 |
+
Some special values and limits::
|
| 774 |
+
|
| 775 |
+
>>> from mpmath import *
|
| 776 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 777 |
+
>>> elliprc(1,2)*4; elliprc(0,1)*2; +pi
|
| 778 |
+
3.141592653589793238462643
|
| 779 |
+
3.141592653589793238462643
|
| 780 |
+
3.141592653589793238462643
|
| 781 |
+
>>> elliprc(1,0)
|
| 782 |
+
+inf
|
| 783 |
+
>>> elliprc(5,5)**2
|
| 784 |
+
0.2
|
| 785 |
+
>>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf)
|
| 786 |
+
0.0
|
| 787 |
+
0.0
|
| 788 |
+
0.0
|
| 789 |
+
|
| 790 |
+
Comparing with the elementary closed-form solution::
|
| 791 |
+
|
| 792 |
+
>>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3'))
|
| 793 |
+
2.041630778983498390751238
|
| 794 |
+
2.041630778983498390751238
|
| 795 |
+
>>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5'))
|
| 796 |
+
1.875180765206547065111085
|
| 797 |
+
1.875180765206547065111085
|
| 798 |
+
|
| 799 |
+
Comparing with numerical integration::
|
| 800 |
+
|
| 801 |
+
>>> q = extradps(25)(quad)
|
| 802 |
+
>>> elliprc(2, -3, pv=True)
|
| 803 |
+
0.3333969101113672670749334
|
| 804 |
+
>>> elliprc(2, -3, pv=False)
|
| 805 |
+
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
|
| 806 |
+
>>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf])
|
| 807 |
+
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
|
| 808 |
+
|
| 809 |
+
"""
|
| 810 |
+
x = ctx.convert(x)
|
| 811 |
+
y = ctx.convert(y)
|
| 812 |
+
prec = ctx.prec
|
| 813 |
+
try:
|
| 814 |
+
ctx.prec += 20
|
| 815 |
+
tol = ctx.eps * 2**10
|
| 816 |
+
v = RC_calc(ctx, x, y, tol, pv)
|
| 817 |
+
finally:
|
| 818 |
+
ctx.prec = prec
|
| 819 |
+
return +v
|
| 820 |
+
|
| 821 |
+
@defun
|
| 822 |
+
def elliprj(ctx, x, y, z, p, integration=1):
|
| 823 |
+
r"""
|
| 824 |
+
Evaluates the Carlson symmetric elliptic integral of the third kind
|
| 825 |
+
|
| 826 |
+
.. math ::
|
| 827 |
+
|
| 828 |
+
R_J(x,y,z,p) = \frac{3}{2}
|
| 829 |
+
\int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}.
|
| 830 |
+
|
| 831 |
+
Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand
|
| 832 |
+
is defined so as to be continuous along the path of integration for
|
| 833 |
+
complex values of the arguments.
|
| 834 |
+
|
| 835 |
+
**Examples**
|
| 836 |
+
|
| 837 |
+
Some values and limits::
|
| 838 |
+
|
| 839 |
+
>>> from mpmath import *
|
| 840 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 841 |
+
>>> elliprj(1,1,1,1)
|
| 842 |
+
1.0
|
| 843 |
+
>>> elliprj(2,2,2,2); 1/(2*sqrt(2))
|
| 844 |
+
0.3535533905932737622004222
|
| 845 |
+
0.3535533905932737622004222
|
| 846 |
+
>>> elliprj(0,1,2,2)
|
| 847 |
+
1.067937989667395702268688
|
| 848 |
+
>>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi))
|
| 849 |
+
1.067937989667395702268688
|
| 850 |
+
>>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4
|
| 851 |
+
1.380226776765915172432054
|
| 852 |
+
1.380226776765915172432054
|
| 853 |
+
>>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0)
|
| 854 |
+
+inf
|
| 855 |
+
+inf
|
| 856 |
+
+inf
|
| 857 |
+
>>> elliprj(1,inf,1,0); elliprj(1,1,1,inf)
|
| 858 |
+
0.0
|
| 859 |
+
0.0
|
| 860 |
+
>>> chop(elliprj(1+j, 1-j, 1, 1))
|
| 861 |
+
0.8505007163686739432927844
|
| 862 |
+
|
| 863 |
+
Scale transformation::
|
| 864 |
+
|
| 865 |
+
>>> x,y,z,p = 2,3,4,5
|
| 866 |
+
>>> k = mpf(100000)
|
| 867 |
+
>>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p)
|
| 868 |
+
4.521291677592745527851168e-9
|
| 869 |
+
4.521291677592745527851168e-9
|
| 870 |
+
|
| 871 |
+
Comparing with numerical integration::
|
| 872 |
+
|
| 873 |
+
>>> elliprj(1,2,3,4)
|
| 874 |
+
0.2398480997495677621758617
|
| 875 |
+
>>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3)))
|
| 876 |
+
>>> 1.5*quad(f, [0,inf])
|
| 877 |
+
0.2398480997495677621758617
|
| 878 |
+
>>> elliprj(1,2+1j,3,4-2j)
|
| 879 |
+
(0.216888906014633498739952 + 0.04081912627366673332369512j)
|
| 880 |
+
>>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3)))
|
| 881 |
+
>>> 1.5*quad(f, [0,inf])
|
| 882 |
+
(0.216888906014633498739952 + 0.04081912627366673332369511j)
|
| 883 |
+
|
| 884 |
+
"""
|
| 885 |
+
x = ctx.convert(x)
|
| 886 |
+
y = ctx.convert(y)
|
| 887 |
+
z = ctx.convert(z)
|
| 888 |
+
p = ctx.convert(p)
|
| 889 |
+
prec = ctx.prec
|
| 890 |
+
try:
|
| 891 |
+
ctx.prec += 20
|
| 892 |
+
tol = ctx.eps * 2**10
|
| 893 |
+
v = RJ_calc(ctx, x, y, z, p, tol, integration)
|
| 894 |
+
finally:
|
| 895 |
+
ctx.prec = prec
|
| 896 |
+
return +v
|
| 897 |
+
|
| 898 |
+
@defun
|
| 899 |
+
def elliprd(ctx, x, y, z):
|
| 900 |
+
r"""
|
| 901 |
+
Evaluates the degenerate Carlson symmetric elliptic integral
|
| 902 |
+
of the third kind or Carlson elliptic integral of the
|
| 903 |
+
second kind `R_D(x,y,z) = R_J(x,y,z,z)`.
|
| 904 |
+
|
| 905 |
+
See :func:`~mpmath.elliprj` for additional information.
|
| 906 |
+
|
| 907 |
+
**Examples**
|
| 908 |
+
|
| 909 |
+
>>> from mpmath import *
|
| 910 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 911 |
+
>>> elliprd(1,2,3)
|
| 912 |
+
0.2904602810289906442326534
|
| 913 |
+
>>> elliprj(1,2,3,3)
|
| 914 |
+
0.2904602810289906442326534
|
| 915 |
+
|
| 916 |
+
The so-called *second lemniscate constant*, a transcendental number::
|
| 917 |
+
|
| 918 |
+
>>> elliprd(0,2,1)/3
|
| 919 |
+
0.5990701173677961037199612
|
| 920 |
+
>>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1])
|
| 921 |
+
0.5990701173677961037199612
|
| 922 |
+
>>> gamma('3/4')**2/sqrt(2*pi)
|
| 923 |
+
0.5990701173677961037199612
|
| 924 |
+
|
| 925 |
+
"""
|
| 926 |
+
return ctx.elliprj(x,y,z,z)
|
| 927 |
+
|
| 928 |
+
@defun
|
| 929 |
+
def elliprg(ctx, x, y, z):
|
| 930 |
+
r"""
|
| 931 |
+
Evaluates the Carlson completely symmetric elliptic integral
|
| 932 |
+
of the second kind
|
| 933 |
+
|
| 934 |
+
.. math ::
|
| 935 |
+
|
| 936 |
+
R_G(x,y,z) = \frac{1}{4} \int_0^{\infty}
|
| 937 |
+
\frac{t}{\sqrt{(t+x)(t+y)(t+z)}}
|
| 938 |
+
\left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt.
|
| 939 |
+
|
| 940 |
+
**Examples**
|
| 941 |
+
|
| 942 |
+
Evaluation for real and complex arguments::
|
| 943 |
+
|
| 944 |
+
>>> from mpmath import *
|
| 945 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 946 |
+
>>> elliprg(0,1,1)*4; +pi
|
| 947 |
+
3.141592653589793238462643
|
| 948 |
+
3.141592653589793238462643
|
| 949 |
+
>>> elliprg(0,0.5,1)
|
| 950 |
+
0.6753219405238377512600874
|
| 951 |
+
>>> chop(elliprg(1+j, 1-j, 2))
|
| 952 |
+
1.172431327676416604532822
|
| 953 |
+
|
| 954 |
+
A double integral that can be evaluated in terms of `R_G`::
|
| 955 |
+
|
| 956 |
+
>>> x,y,z = 2,3,4
|
| 957 |
+
>>> def f(t,u):
|
| 958 |
+
... st = fp.sin(t); ct = fp.cos(t)
|
| 959 |
+
... su = fp.sin(u); cu = fp.cos(u)
|
| 960 |
+
... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st
|
| 961 |
+
...
|
| 962 |
+
>>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13)
|
| 963 |
+
1.725503028069
|
| 964 |
+
>>> nprint(elliprg(x,y,z), 13)
|
| 965 |
+
1.725503028069
|
| 966 |
+
|
| 967 |
+
"""
|
| 968 |
+
x = ctx.convert(x)
|
| 969 |
+
y = ctx.convert(y)
|
| 970 |
+
z = ctx.convert(z)
|
| 971 |
+
zeros = (not x) + (not y) + (not z)
|
| 972 |
+
if zeros == 3:
|
| 973 |
+
return (x+y+z)*0
|
| 974 |
+
if zeros == 2:
|
| 975 |
+
if x: return 0.5*ctx.sqrt(x)
|
| 976 |
+
if y: return 0.5*ctx.sqrt(y)
|
| 977 |
+
return 0.5*ctx.sqrt(z)
|
| 978 |
+
if zeros == 1:
|
| 979 |
+
if not z:
|
| 980 |
+
x, z = z, x
|
| 981 |
+
def terms():
|
| 982 |
+
T1 = 0.5*z*ctx.elliprf(x,y,z)
|
| 983 |
+
T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3
|
| 984 |
+
T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z)
|
| 985 |
+
return T1,T2,T3
|
| 986 |
+
return ctx.sum_accurately(terms)
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
@defun_wrapped
|
| 990 |
+
def ellipf(ctx, phi, m):
|
| 991 |
+
r"""
|
| 992 |
+
Evaluates the Legendre incomplete elliptic integral of the first kind
|
| 993 |
+
|
| 994 |
+
.. math ::
|
| 995 |
+
|
| 996 |
+
F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}}
|
| 997 |
+
|
| 998 |
+
or equivalently
|
| 999 |
+
|
| 1000 |
+
.. math ::
|
| 1001 |
+
|
| 1002 |
+
F(\phi,m) = \int_0^{\sin \phi}
|
| 1003 |
+
\frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}.
|
| 1004 |
+
|
| 1005 |
+
The function reduces to a complete elliptic integral of the first kind
|
| 1006 |
+
(see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is,
|
| 1007 |
+
|
| 1008 |
+
.. math ::
|
| 1009 |
+
|
| 1010 |
+
F\left(\frac{\pi}{2}, m\right) = K(m).
|
| 1011 |
+
|
| 1012 |
+
In the defining integral, it is assumed that the principal branch
|
| 1013 |
+
of the square root is taken and that the path of integration avoids
|
| 1014 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
|
| 1015 |
+
the function extends quasi-periodically as
|
| 1016 |
+
|
| 1017 |
+
.. math ::
|
| 1018 |
+
|
| 1019 |
+
F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}.
|
| 1020 |
+
|
| 1021 |
+
**Plots**
|
| 1022 |
+
|
| 1023 |
+
.. literalinclude :: /plots/ellipf.py
|
| 1024 |
+
.. image :: /plots/ellipf.png
|
| 1025 |
+
|
| 1026 |
+
**Examples**
|
| 1027 |
+
|
| 1028 |
+
Basic values and limits::
|
| 1029 |
+
|
| 1030 |
+
>>> from mpmath import *
|
| 1031 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1032 |
+
>>> ellipf(0,1)
|
| 1033 |
+
0.0
|
| 1034 |
+
>>> ellipf(0,0)
|
| 1035 |
+
0.0
|
| 1036 |
+
>>> ellipf(1,0); ellipf(2+3j,0)
|
| 1037 |
+
1.0
|
| 1038 |
+
(2.0 + 3.0j)
|
| 1039 |
+
>>> ellipf(1,1); log(sec(1)+tan(1))
|
| 1040 |
+
1.226191170883517070813061
|
| 1041 |
+
1.226191170883517070813061
|
| 1042 |
+
>>> ellipf(pi/2, -0.5); ellipk(-0.5)
|
| 1043 |
+
1.415737208425956198892166
|
| 1044 |
+
1.415737208425956198892166
|
| 1045 |
+
>>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1)
|
| 1046 |
+
+inf
|
| 1047 |
+
+inf
|
| 1048 |
+
>>> ellipf(1.5, 1)
|
| 1049 |
+
3.340677542798311003320813
|
| 1050 |
+
|
| 1051 |
+
Comparing with numerical integration::
|
| 1052 |
+
|
| 1053 |
+
>>> z,m = 0.5, 1.25
|
| 1054 |
+
>>> ellipf(z,m)
|
| 1055 |
+
0.5287219202206327872978255
|
| 1056 |
+
>>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z])
|
| 1057 |
+
0.5287219202206327872978255
|
| 1058 |
+
|
| 1059 |
+
The arguments may be complex numbers::
|
| 1060 |
+
|
| 1061 |
+
>>> ellipf(3j, 0.5)
|
| 1062 |
+
(0.0 + 1.713602407841590234804143j)
|
| 1063 |
+
>>> ellipf(3+4j, 5-6j)
|
| 1064 |
+
(1.269131241950351323305741 - 0.3561052815014558335412538j)
|
| 1065 |
+
>>> z,m = 2+3j, 1.25
|
| 1066 |
+
>>> k = 1011
|
| 1067 |
+
>>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m)
|
| 1068 |
+
(4086.184383622179764082821 - 3003.003538923749396546871j)
|
| 1069 |
+
(4086.184383622179764082821 - 3003.003538923749396546871j)
|
| 1070 |
+
|
| 1071 |
+
For `|\Re(z)| < \pi/2`, the function can be expressed as a
|
| 1072 |
+
hypergeometric series of two variables
|
| 1073 |
+
(see :func:`~mpmath.appellf1`)::
|
| 1074 |
+
|
| 1075 |
+
>>> z,m = 0.5, 0.25
|
| 1076 |
+
>>> ellipf(z,m)
|
| 1077 |
+
0.5050887275786480788831083
|
| 1078 |
+
>>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2)
|
| 1079 |
+
0.5050887275786480788831083
|
| 1080 |
+
|
| 1081 |
+
"""
|
| 1082 |
+
z = phi
|
| 1083 |
+
if not (ctx.isnormal(z) and ctx.isnormal(m)):
|
| 1084 |
+
if m == 0:
|
| 1085 |
+
return z + m
|
| 1086 |
+
if z == 0:
|
| 1087 |
+
return z * m
|
| 1088 |
+
if m == ctx.inf or m == ctx.ninf: return z/m
|
| 1089 |
+
raise ValueError
|
| 1090 |
+
x = z.real
|
| 1091 |
+
ctx.prec += max(0, ctx.mag(x))
|
| 1092 |
+
pi = +ctx.pi
|
| 1093 |
+
away = abs(x) > pi/2
|
| 1094 |
+
if m == 1:
|
| 1095 |
+
if away:
|
| 1096 |
+
return ctx.inf
|
| 1097 |
+
if away:
|
| 1098 |
+
d = ctx.nint(x/pi)
|
| 1099 |
+
z = z-pi*d
|
| 1100 |
+
P = 2*d*ctx.ellipk(m)
|
| 1101 |
+
else:
|
| 1102 |
+
P = 0
|
| 1103 |
+
c, s = ctx.cos_sin(z)
|
| 1104 |
+
return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P
|
| 1105 |
+
|
| 1106 |
+
@defun_wrapped
|
| 1107 |
+
def ellipe(ctx, *args):
|
| 1108 |
+
r"""
|
| 1109 |
+
Called with a single argument `m`, evaluates the Legendre complete
|
| 1110 |
+
elliptic integral of the second kind, `E(m)`, defined by
|
| 1111 |
+
|
| 1112 |
+
.. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\,
|
| 1113 |
+
\frac{\pi}{2}
|
| 1114 |
+
\,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right).
|
| 1115 |
+
|
| 1116 |
+
Called with two arguments `\phi, m`, evaluates the incomplete elliptic
|
| 1117 |
+
integral of the second kind
|
| 1118 |
+
|
| 1119 |
+
.. math ::
|
| 1120 |
+
|
| 1121 |
+
E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt =
|
| 1122 |
+
\int_0^{\sin z}
|
| 1123 |
+
\frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt.
|
| 1124 |
+
|
| 1125 |
+
The incomplete integral reduces to a complete integral when
|
| 1126 |
+
`\phi = \frac{\pi}{2}`; that is,
|
| 1127 |
+
|
| 1128 |
+
.. math ::
|
| 1129 |
+
|
| 1130 |
+
E\left(\frac{\pi}{2}, m\right) = E(m).
|
| 1131 |
+
|
| 1132 |
+
In the defining integral, it is assumed that the principal branch
|
| 1133 |
+
of the square root is taken and that the path of integration avoids
|
| 1134 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
|
| 1135 |
+
the function extends quasi-periodically as
|
| 1136 |
+
|
| 1137 |
+
.. math ::
|
| 1138 |
+
|
| 1139 |
+
E(\phi + n \pi, m) = 2 n E(m) + E(\phi,m), n \in \mathbb{Z}.
|
| 1140 |
+
|
| 1141 |
+
**Plots**
|
| 1142 |
+
|
| 1143 |
+
.. literalinclude :: /plots/ellipe.py
|
| 1144 |
+
.. image :: /plots/ellipe.png
|
| 1145 |
+
|
| 1146 |
+
**Examples for the complete integral**
|
| 1147 |
+
|
| 1148 |
+
Basic values and limits::
|
| 1149 |
+
|
| 1150 |
+
>>> from mpmath import *
|
| 1151 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1152 |
+
>>> ellipe(0)
|
| 1153 |
+
1.570796326794896619231322
|
| 1154 |
+
>>> ellipe(1)
|
| 1155 |
+
1.0
|
| 1156 |
+
>>> ellipe(-1)
|
| 1157 |
+
1.910098894513856008952381
|
| 1158 |
+
>>> ellipe(2)
|
| 1159 |
+
(0.5990701173677961037199612 + 0.5990701173677961037199612j)
|
| 1160 |
+
>>> ellipe(inf)
|
| 1161 |
+
(0.0 + +infj)
|
| 1162 |
+
>>> ellipe(-inf)
|
| 1163 |
+
+inf
|
| 1164 |
+
|
| 1165 |
+
Verifying the defining integral and hypergeometric
|
| 1166 |
+
representation::
|
| 1167 |
+
|
| 1168 |
+
>>> ellipe(0.5)
|
| 1169 |
+
1.350643881047675502520175
|
| 1170 |
+
>>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
|
| 1171 |
+
1.350643881047675502520175
|
| 1172 |
+
>>> pi/2*hyp2f1(0.5,-0.5,1,0.5)
|
| 1173 |
+
1.350643881047675502520175
|
| 1174 |
+
|
| 1175 |
+
Evaluation is supported for arbitrary complex `m`::
|
| 1176 |
+
|
| 1177 |
+
>>> ellipe(0.5+0.25j)
|
| 1178 |
+
(1.360868682163129682716687 - 0.1238733442561786843557315j)
|
| 1179 |
+
>>> ellipe(3+4j)
|
| 1180 |
+
(1.499553520933346954333612 - 1.577879007912758274533309j)
|
| 1181 |
+
|
| 1182 |
+
A definite integral::
|
| 1183 |
+
|
| 1184 |
+
>>> quad(ellipe, [0,1])
|
| 1185 |
+
1.333333333333333333333333
|
| 1186 |
+
|
| 1187 |
+
**Examples for the incomplete integral**
|
| 1188 |
+
|
| 1189 |
+
Basic values and limits::
|
| 1190 |
+
|
| 1191 |
+
>>> ellipe(0,1)
|
| 1192 |
+
0.0
|
| 1193 |
+
>>> ellipe(0,0)
|
| 1194 |
+
0.0
|
| 1195 |
+
>>> ellipe(1,0)
|
| 1196 |
+
1.0
|
| 1197 |
+
>>> ellipe(2+3j,0)
|
| 1198 |
+
(2.0 + 3.0j)
|
| 1199 |
+
>>> ellipe(1,1); sin(1)
|
| 1200 |
+
0.8414709848078965066525023
|
| 1201 |
+
0.8414709848078965066525023
|
| 1202 |
+
>>> ellipe(pi/2, -0.5); ellipe(-0.5)
|
| 1203 |
+
1.751771275694817862026502
|
| 1204 |
+
1.751771275694817862026502
|
| 1205 |
+
>>> ellipe(pi/2, 1); ellipe(-pi/2, 1)
|
| 1206 |
+
1.0
|
| 1207 |
+
-1.0
|
| 1208 |
+
>>> ellipe(1.5, 1)
|
| 1209 |
+
0.9974949866040544309417234
|
| 1210 |
+
|
| 1211 |
+
Comparing with numerical integration::
|
| 1212 |
+
|
| 1213 |
+
>>> z,m = 0.5, 1.25
|
| 1214 |
+
>>> ellipe(z,m)
|
| 1215 |
+
0.4740152182652628394264449
|
| 1216 |
+
>>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z])
|
| 1217 |
+
0.4740152182652628394264449
|
| 1218 |
+
|
| 1219 |
+
The arguments may be complex numbers::
|
| 1220 |
+
|
| 1221 |
+
>>> ellipe(3j, 0.5)
|
| 1222 |
+
(0.0 + 7.551991234890371873502105j)
|
| 1223 |
+
>>> ellipe(3+4j, 5-6j)
|
| 1224 |
+
(24.15299022574220502424466 + 75.2503670480325997418156j)
|
| 1225 |
+
>>> k = 35
|
| 1226 |
+
>>> z,m = 2+3j, 1.25
|
| 1227 |
+
>>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m)
|
| 1228 |
+
(48.30138799412005235090766 + 17.47255216721987688224357j)
|
| 1229 |
+
(48.30138799412005235090766 + 17.47255216721987688224357j)
|
| 1230 |
+
|
| 1231 |
+
For `|\Re(z)| < \pi/2`, the function can be expressed as a
|
| 1232 |
+
hypergeometric series of two variables
|
| 1233 |
+
(see :func:`~mpmath.appellf1`)::
|
| 1234 |
+
|
| 1235 |
+
>>> z,m = 0.5, 0.25
|
| 1236 |
+
>>> ellipe(z,m)
|
| 1237 |
+
0.4950017030164151928870375
|
| 1238 |
+
>>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
|
| 1239 |
+
0.4950017030164151928870376
|
| 1240 |
+
|
| 1241 |
+
"""
|
| 1242 |
+
if len(args) == 1:
|
| 1243 |
+
return ctx._ellipe(args[0])
|
| 1244 |
+
else:
|
| 1245 |
+
phi, m = args
|
| 1246 |
+
z = phi
|
| 1247 |
+
if not (ctx.isnormal(z) and ctx.isnormal(m)):
|
| 1248 |
+
if m == 0:
|
| 1249 |
+
return z + m
|
| 1250 |
+
if z == 0:
|
| 1251 |
+
return z * m
|
| 1252 |
+
if m == ctx.inf or m == ctx.ninf:
|
| 1253 |
+
return ctx.inf
|
| 1254 |
+
raise ValueError
|
| 1255 |
+
x = z.real
|
| 1256 |
+
ctx.prec += max(0, ctx.mag(x))
|
| 1257 |
+
pi = +ctx.pi
|
| 1258 |
+
away = abs(x) > pi/2
|
| 1259 |
+
if away:
|
| 1260 |
+
d = ctx.nint(x/pi)
|
| 1261 |
+
z = z-pi*d
|
| 1262 |
+
P = 2*d*ctx.ellipe(m)
|
| 1263 |
+
else:
|
| 1264 |
+
P = 0
|
| 1265 |
+
def terms():
|
| 1266 |
+
c, s = ctx.cos_sin(z)
|
| 1267 |
+
x = c**2
|
| 1268 |
+
y = 1-m*s**2
|
| 1269 |
+
RF = ctx.elliprf(x, y, 1)
|
| 1270 |
+
RD = ctx.elliprd(x, y, 1)
|
| 1271 |
+
return s*RF, -m*s**3*RD/3
|
| 1272 |
+
return ctx.sum_accurately(terms) + P
|
| 1273 |
+
|
| 1274 |
+
@defun_wrapped
|
| 1275 |
+
def ellippi(ctx, *args):
|
| 1276 |
+
r"""
|
| 1277 |
+
Called with three arguments `n, \phi, m`, evaluates the Legendre
|
| 1278 |
+
incomplete elliptic integral of the third kind
|
| 1279 |
+
|
| 1280 |
+
.. math ::
|
| 1281 |
+
|
| 1282 |
+
\Pi(n; \phi, m) = \int_0^{\phi}
|
| 1283 |
+
\frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} =
|
| 1284 |
+
\int_0^{\sin \phi}
|
| 1285 |
+
\frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}.
|
| 1286 |
+
|
| 1287 |
+
Called with two arguments `n, m`, evaluates the complete
|
| 1288 |
+
elliptic integral of the third kind
|
| 1289 |
+
`\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`.
|
| 1290 |
+
|
| 1291 |
+
In the defining integral, it is assumed that the principal branch
|
| 1292 |
+
of the square root is taken and that the path of integration avoids
|
| 1293 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
|
| 1294 |
+
the function extends quasi-periodically as
|
| 1295 |
+
|
| 1296 |
+
.. math ::
|
| 1297 |
+
|
| 1298 |
+
\Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}.
|
| 1299 |
+
|
| 1300 |
+
**Plots**
|
| 1301 |
+
|
| 1302 |
+
.. literalinclude :: /plots/ellippi.py
|
| 1303 |
+
.. image :: /plots/ellippi.png
|
| 1304 |
+
|
| 1305 |
+
**Examples for the complete integral**
|
| 1306 |
+
|
| 1307 |
+
Some basic values and limits::
|
| 1308 |
+
|
| 1309 |
+
>>> from mpmath import *
|
| 1310 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1311 |
+
>>> ellippi(0,-5); ellipk(-5)
|
| 1312 |
+
0.9555039270640439337379334
|
| 1313 |
+
0.9555039270640439337379334
|
| 1314 |
+
>>> ellippi(inf,2)
|
| 1315 |
+
0.0
|
| 1316 |
+
>>> ellippi(2,inf)
|
| 1317 |
+
0.0
|
| 1318 |
+
>>> abs(ellippi(1,5))
|
| 1319 |
+
+inf
|
| 1320 |
+
>>> abs(ellippi(0.25,1))
|
| 1321 |
+
+inf
|
| 1322 |
+
|
| 1323 |
+
Evaluation in terms of simpler functions::
|
| 1324 |
+
|
| 1325 |
+
>>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25)
|
| 1326 |
+
1.956616279119236207279727
|
| 1327 |
+
1.956616279119236207279727
|
| 1328 |
+
>>> ellippi(3,0); pi/(2*sqrt(-2))
|
| 1329 |
+
(0.0 - 1.11072073453959156175397j)
|
| 1330 |
+
(0.0 - 1.11072073453959156175397j)
|
| 1331 |
+
>>> ellippi(-3,0); pi/(2*sqrt(4))
|
| 1332 |
+
0.7853981633974483096156609
|
| 1333 |
+
0.7853981633974483096156609
|
| 1334 |
+
|
| 1335 |
+
**Examples for the incomplete integral**
|
| 1336 |
+
|
| 1337 |
+
Basic values and limits::
|
| 1338 |
+
|
| 1339 |
+
>>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5)
|
| 1340 |
+
1.622944760954741603710555
|
| 1341 |
+
1.622944760954741603710555
|
| 1342 |
+
>>> ellippi(1,0,1)
|
| 1343 |
+
0.0
|
| 1344 |
+
>>> ellippi(inf,0,1)
|
| 1345 |
+
0.0
|
| 1346 |
+
>>> ellippi(0,0.25,0.5); ellipf(0.25,0.5)
|
| 1347 |
+
0.2513040086544925794134591
|
| 1348 |
+
0.2513040086544925794134591
|
| 1349 |
+
>>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2
|
| 1350 |
+
2.054332933256248668692452
|
| 1351 |
+
2.054332933256248668692452
|
| 1352 |
+
>>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75)
|
| 1353 |
+
135.240868757890840755058
|
| 1354 |
+
135.240868757890840755058
|
| 1355 |
+
>>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3)
|
| 1356 |
+
0.9190227391656969903987269
|
| 1357 |
+
0.9190227391656969903987269
|
| 1358 |
+
|
| 1359 |
+
Complex arguments are supported::
|
| 1360 |
+
|
| 1361 |
+
>>> ellippi(0.5, 5+6j-2*pi, -7-8j)
|
| 1362 |
+
(-0.3612856620076747660410167 + 0.5217735339984807829755815j)
|
| 1363 |
+
|
| 1364 |
+
Some degenerate cases::
|
| 1365 |
+
|
| 1366 |
+
>>> ellippi(1,1)
|
| 1367 |
+
+inf
|
| 1368 |
+
>>> ellippi(1,0)
|
| 1369 |
+
+inf
|
| 1370 |
+
>>> ellippi(1,2,0)
|
| 1371 |
+
+inf
|
| 1372 |
+
>>> ellippi(1,2,1)
|
| 1373 |
+
+inf
|
| 1374 |
+
>>> ellippi(1,0,1)
|
| 1375 |
+
0.0
|
| 1376 |
+
|
| 1377 |
+
"""
|
| 1378 |
+
if len(args) == 2:
|
| 1379 |
+
n, m = args
|
| 1380 |
+
complete = True
|
| 1381 |
+
z = phi = ctx.pi/2
|
| 1382 |
+
else:
|
| 1383 |
+
n, phi, m = args
|
| 1384 |
+
complete = False
|
| 1385 |
+
z = phi
|
| 1386 |
+
if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)):
|
| 1387 |
+
if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m):
|
| 1388 |
+
raise ValueError
|
| 1389 |
+
if complete:
|
| 1390 |
+
if m == 0:
|
| 1391 |
+
if n == 1:
|
| 1392 |
+
return ctx.inf
|
| 1393 |
+
return ctx.pi/(2*ctx.sqrt(1-n))
|
| 1394 |
+
if n == 0: return ctx.ellipk(m)
|
| 1395 |
+
if ctx.isinf(n) or ctx.isinf(m): return ctx.zero
|
| 1396 |
+
else:
|
| 1397 |
+
if z == 0: return z
|
| 1398 |
+
if ctx.isinf(n): return ctx.zero
|
| 1399 |
+
if ctx.isinf(m): return ctx.zero
|
| 1400 |
+
if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m):
|
| 1401 |
+
raise ValueError
|
| 1402 |
+
if complete:
|
| 1403 |
+
if m == 1:
|
| 1404 |
+
if n == 1:
|
| 1405 |
+
return ctx.inf
|
| 1406 |
+
return -ctx.inf/ctx.sign(n-1)
|
| 1407 |
+
away = False
|
| 1408 |
+
else:
|
| 1409 |
+
x = z.real
|
| 1410 |
+
ctx.prec += max(0, ctx.mag(x))
|
| 1411 |
+
pi = +ctx.pi
|
| 1412 |
+
away = abs(x) > pi/2
|
| 1413 |
+
if away:
|
| 1414 |
+
d = ctx.nint(x/pi)
|
| 1415 |
+
z = z-pi*d
|
| 1416 |
+
P = 2*d*ctx.ellippi(n,m)
|
| 1417 |
+
if ctx.isinf(P):
|
| 1418 |
+
return ctx.inf
|
| 1419 |
+
else:
|
| 1420 |
+
P = 0
|
| 1421 |
+
def terms():
|
| 1422 |
+
if complete:
|
| 1423 |
+
c, s = ctx.zero, ctx.one
|
| 1424 |
+
else:
|
| 1425 |
+
c, s = ctx.cos_sin(z)
|
| 1426 |
+
x = c**2
|
| 1427 |
+
y = 1-m*s**2
|
| 1428 |
+
RF = ctx.elliprf(x, y, 1)
|
| 1429 |
+
RJ = ctx.elliprj(x, y, 1, 1-n*s**2)
|
| 1430 |
+
return s*RF, n*s**3*RJ/3
|
| 1431 |
+
return ctx.sum_accurately(terms) + P
|