Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_atan_ops.h +50 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h +23 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/celu_compositeexplicitautograd_dispatch.h +26 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_native.h +24 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward.h +30 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_ops.h +39 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h +25 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_native.h +22 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu.h +35 -0
- videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsr_tensor_compositeimplicitautograd_dispatch.h +26 -0
- vllm/lib/python3.10/site-packages/airportsdata/__init__.py +132 -0
- vllm/lib/python3.10/site-packages/airportsdata/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/git/db.py +71 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/identification.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/math2.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/rational.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc +0 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/approximation.py +246 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/calculus.py +6 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py +2115 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py +973 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/odes.py +288 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/optimization.py +1102 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/polynomials.py +213 -0
- vllm/lib/python3.10/site-packages/mpmath/calculus/quadrature.py +1115 -0
- vllm/lib/python3.10/site-packages/mpmath/libmp/__init__.py +77 -0
- vllm/lib/python3.10/site-packages/mpmath/libmp/__pycache__/__init__.cpython-310.pyc +0 -0
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/_foreach_atan_ops.h
ADDED
|
@@ -0,0 +1,50 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API _foreach_atan {
|
| 18 |
+
using schema = ::std::vector<at::Tensor> (at::TensorList);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_atan")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_atan(Tensor[] self) -> Tensor[]")
|
| 24 |
+
static ::std::vector<at::Tensor> call(at::TensorList self);
|
| 25 |
+
static ::std::vector<at::Tensor> redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API _foreach_atan_ {
|
| 29 |
+
using schema = void (at::TensorList);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_atan_")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_atan_(Tensor(a!)[] self) -> ()")
|
| 35 |
+
static void call(at::TensorList self);
|
| 36 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
struct TORCH_API _foreach_atan_out {
|
| 40 |
+
using schema = void (at::TensorList, at::TensorList);
|
| 41 |
+
using ptr_schema = schema*;
|
| 42 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 43 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::_foreach_atan")
|
| 44 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 45 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "_foreach_atan.out(Tensor[] self, *, Tensor(a!)[] out) -> ()")
|
| 46 |
+
static void call(at::TensorList self, at::TensorList out);
|
| 47 |
+
static void redispatch(c10::DispatchKeySet dispatchKeySet, at::TensorList self, at::TensorList out);
|
| 48 |
+
};
|
| 49 |
+
|
| 50 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/addcdiv.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/addcdiv_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
|
| 26 |
+
inline at::Tensor & addcdiv_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) {
|
| 27 |
+
return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out);
|
| 28 |
+
}
|
| 29 |
+
// aten::addcdiv.out(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1, Tensor(a!) out) -> Tensor(a!)
|
| 30 |
+
inline at::Tensor & addcdiv_outf(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value, at::Tensor & out) {
|
| 31 |
+
return at::_ops::addcdiv_out::call(self, tensor1, tensor2, value, out);
|
| 32 |
+
}
|
| 33 |
+
|
| 34 |
+
// aten::addcdiv(Tensor self, Tensor tensor1, Tensor tensor2, *, Scalar value=1) -> Tensor
|
| 35 |
+
inline at::Tensor addcdiv(const at::Tensor & self, const at::Tensor & tensor1, const at::Tensor & tensor2, const at::Scalar & value=1) {
|
| 36 |
+
return at::_ops::addcdiv::call(self, tensor1, tensor2, value);
|
| 37 |
+
}
|
| 38 |
+
|
| 39 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/avg_pool2d_backward_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API avg_pool2d_backward_grad_input {
|
| 18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional<int64_t>, at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d_backward")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "grad_input")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d_backward.grad_input(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override, *, Tensor(a!) grad_input) -> Tensor(a!)")
|
| 24 |
+
static at::Tensor & call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input);
|
| 25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override, at::Tensor & grad_input);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API avg_pool2d_backward {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, at::IntArrayRef, at::IntArrayRef, at::IntArrayRef, bool, bool, c10::optional<int64_t>);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::avg_pool2d_backward")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "avg_pool2d_backward(Tensor grad_output, Tensor self, int[2] kernel_size, int[2] stride, int[2] padding, bool ceil_mode, bool count_include_pad, int? divisor_override) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & grad_output, const at::Tensor & self, at::IntArrayRef kernel_size, at::IntArrayRef stride, at::IntArrayRef padding, bool ceil_mode, bool count_include_pad, c10::optional<int64_t> divisor_override);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/batch_norm_backward_elemt_cuda_dispatch.h
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace cuda {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor batch_norm_backward_elemt(const at::Tensor & grad_out, const at::Tensor & input, const at::Tensor & mean, const at::Tensor & invstd, const c10::optional<at::Tensor> & weight, const at::Tensor & sum_dy, const at::Tensor & sum_dy_xmu, const at::Tensor & count);
|
| 21 |
+
|
| 22 |
+
} // namespace cuda
|
| 23 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/celu_compositeexplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeexplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor celu(const at::Tensor & self, const at::Scalar & alpha=1.0);
|
| 21 |
+
TORCH_API at::Tensor & celu_out(at::Tensor & out, const at::Tensor & self, const at::Scalar & alpha=1.0);
|
| 22 |
+
TORCH_API at::Tensor & celu_outf(const at::Tensor & self, const at::Scalar & alpha, at::Tensor & out);
|
| 23 |
+
TORCH_API at::Tensor & celu_(at::Tensor & self, const at::Scalar & alpha=1.0);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeexplicitautograd
|
| 26 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/cummin_native.h
ADDED
|
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, int64_t dim);
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(const at::Tensor & self, int64_t dim, at::Tensor & values, at::Tensor & indices);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> cummin(const at::Tensor & self, at::Dimname dim);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> cummin_out(const at::Tensor & self, at::Dimname dim, at::Tensor & values, at::Tensor & indices);
|
| 23 |
+
} // namespace native
|
| 24 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/diagonal_copy_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor & diagonal_copy_out(const at::Tensor & self, int64_t offset, int64_t dim1, int64_t dim2, at::Tensor & out);
|
| 20 |
+
TORCH_API at::Tensor diagonal_copy(const at::Tensor & self, int64_t offset=0, int64_t dim1=0, int64_t dim2=1);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/huber_loss_backward_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor huber_loss_backward(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta);
|
| 20 |
+
TORCH_API at::Tensor & huber_loss_backward_out(const at::Tensor & grad_output, const at::Tensor & self, const at::Tensor & target, int64_t reduction, double delta, at::Tensor & grad_input);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/linalg_ldl_factor_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor> linalg_ldl_factor(const at::Tensor & self, bool hermitian=false);
|
| 21 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_out(at::Tensor & LD, at::Tensor & pivots, const at::Tensor & self, bool hermitian=false);
|
| 22 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &> linalg_ldl_factor_outf(const at::Tensor & self, bool hermitian, at::Tensor & LD, at::Tensor & pivots);
|
| 23 |
+
|
| 24 |
+
} // namespace compositeimplicitautograd
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/matrix_exp_backward.h
ADDED
|
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/matrix_exp_backward_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::matrix_exp_backward(Tensor self, Tensor grad) -> Tensor
|
| 26 |
+
inline at::Tensor matrix_exp_backward(const at::Tensor & self, const at::Tensor & grad) {
|
| 27 |
+
return at::_ops::matrix_exp_backward::call(self, grad);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/max_unpool3d_ops.h
ADDED
|
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Operator.h
|
| 4 |
+
|
| 5 |
+
#include <tuple>
|
| 6 |
+
#include <vector>
|
| 7 |
+
|
| 8 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 9 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 10 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 11 |
+
#include <ATen/core/ATen_fwd.h>
|
| 12 |
+
|
| 13 |
+
namespace at {
|
| 14 |
+
namespace _ops {
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
struct TORCH_API max_unpool3d_out {
|
| 18 |
+
using schema = at::Tensor & (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef, at::Tensor &);
|
| 19 |
+
using ptr_schema = schema*;
|
| 20 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 21 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_unpool3d")
|
| 22 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "out")
|
| 23 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_unpool3d.out(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding, *, Tensor(a!) out) -> Tensor(a!)")
|
| 24 |
+
static at::Tensor & call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out);
|
| 25 |
+
static at::Tensor & redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding, at::Tensor & out);
|
| 26 |
+
};
|
| 27 |
+
|
| 28 |
+
struct TORCH_API max_unpool3d {
|
| 29 |
+
using schema = at::Tensor (const at::Tensor &, const at::Tensor &, c10::SymIntArrayRef, at::IntArrayRef, at::IntArrayRef);
|
| 30 |
+
using ptr_schema = schema*;
|
| 31 |
+
// See Note [static constexpr char* members for windows NVCC]
|
| 32 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(name, "aten::max_unpool3d")
|
| 33 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(overload_name, "")
|
| 34 |
+
STATIC_CONSTEXPR_STR_INL_EXCEPT_WIN_CUDA(schema_str, "max_unpool3d(Tensor self, Tensor indices, SymInt[3] output_size, int[3] stride, int[3] padding) -> Tensor")
|
| 35 |
+
static at::Tensor call(const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding);
|
| 36 |
+
static at::Tensor redispatch(c10::DispatchKeySet dispatchKeySet, const at::Tensor & self, const at::Tensor & indices, c10::SymIntArrayRef output_size, at::IntArrayRef stride, at::IntArrayRef padding);
|
| 37 |
+
};
|
| 38 |
+
|
| 39 |
+
}} // namespace at::_ops
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/maximum_meta_dispatch.h
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace meta {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor maximum(const at::Tensor & self, const at::Tensor & other);
|
| 21 |
+
TORCH_API at::Tensor & maximum_out(at::Tensor & out, const at::Tensor & self, const at::Tensor & other);
|
| 22 |
+
TORCH_API at::Tensor & maximum_outf(const at::Tensor & self, const at::Tensor & other, at::Tensor & out);
|
| 23 |
+
|
| 24 |
+
} // namespace meta
|
| 25 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/miopen_batch_norm_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API ::std::tuple<at::Tensor &,at::Tensor &,at::Tensor &> miopen_batch_norm_out(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon, at::Tensor & out0, at::Tensor & out1, at::Tensor & out2);
|
| 20 |
+
TORCH_API ::std::tuple<at::Tensor,at::Tensor,at::Tensor> miopen_batch_norm(const at::Tensor & input, const at::Tensor & weight, const c10::optional<at::Tensor> & bias, const c10::optional<at::Tensor> & running_mean, const c10::optional<at::Tensor> & running_var, bool training, double exponential_average_factor, double epsilon);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/mish_backward_native.h
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from NativeFunction.h
|
| 4 |
+
|
| 5 |
+
#include <c10/core/Scalar.h>
|
| 6 |
+
#include <c10/core/Storage.h>
|
| 7 |
+
#include <c10/core/TensorOptions.h>
|
| 8 |
+
#include <c10/util/Deprecated.h>
|
| 9 |
+
#include <c10/util/Optional.h>
|
| 10 |
+
#include <c10/core/QScheme.h>
|
| 11 |
+
#include <ATen/core/Reduction.h>
|
| 12 |
+
#include <ATen/core/Tensor.h>
|
| 13 |
+
#include <tuple>
|
| 14 |
+
#include <vector>
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
namespace at {
|
| 18 |
+
namespace native {
|
| 19 |
+
TORCH_API at::Tensor math_mish_backward(const at::Tensor & grad_output, const at::Tensor & self);
|
| 20 |
+
TORCH_API at::Tensor mish_backward(const at::Tensor & grad_output, const at::Tensor & self);
|
| 21 |
+
} // namespace native
|
| 22 |
+
} // namespace at
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/rrelu.h
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
|
| 3 |
+
// @generated by torchgen/gen.py from Function.h
|
| 4 |
+
|
| 5 |
+
#include <ATen/Context.h>
|
| 6 |
+
#include <ATen/DeviceGuard.h>
|
| 7 |
+
#include <ATen/TensorUtils.h>
|
| 8 |
+
#include <ATen/TracerMode.h>
|
| 9 |
+
#include <ATen/core/Generator.h>
|
| 10 |
+
#include <ATen/core/Reduction.h>
|
| 11 |
+
#include <ATen/core/Tensor.h>
|
| 12 |
+
#include <c10/core/Scalar.h>
|
| 13 |
+
#include <c10/core/Storage.h>
|
| 14 |
+
#include <c10/core/TensorOptions.h>
|
| 15 |
+
#include <c10/util/Deprecated.h>
|
| 16 |
+
#include <c10/util/Optional.h>
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
|
| 20 |
+
#include <ATen/ops/rrelu_ops.h>
|
| 21 |
+
|
| 22 |
+
namespace at {
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
// aten::rrelu(Tensor self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor
|
| 26 |
+
inline at::Tensor rrelu(const at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional<at::Generator> generator=c10::nullopt) {
|
| 27 |
+
return at::_ops::rrelu::call(self, lower, upper, training, generator);
|
| 28 |
+
}
|
| 29 |
+
|
| 30 |
+
// aten::rrelu_(Tensor(a!) self, Scalar lower=0.125, Scalar upper=0.3333333333333333, bool training=False, Generator? generator=None) -> Tensor(a!)
|
| 31 |
+
inline at::Tensor & rrelu_(at::Tensor & self, const at::Scalar & lower=0.125, const at::Scalar & upper=0.3333333333333333, bool training=false, c10::optional<at::Generator> generator=c10::nullopt) {
|
| 32 |
+
return at::_ops::rrelu_::call(self, lower, upper, training, generator);
|
| 33 |
+
}
|
| 34 |
+
|
| 35 |
+
}
|
videollama2/lib/python3.10/site-packages/torch/include/ATen/ops/sparse_bsr_tensor_compositeimplicitautograd_dispatch.h
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#pragma once
|
| 2 |
+
// @generated by torchgen/gen.py from DispatchKeyFunction.h
|
| 3 |
+
|
| 4 |
+
// NB: The implementing C++ file is RegisterDispatchKey.cpp
|
| 5 |
+
|
| 6 |
+
// The only #includes we need are for custom classes that have defaults in the C++ API
|
| 7 |
+
#include <c10/core/MemoryFormat.h>
|
| 8 |
+
#include <c10/core/Scalar.h>
|
| 9 |
+
#include <ATen/core/Reduction.h>
|
| 10 |
+
|
| 11 |
+
// Forward declarations of any types needed in the operator signatures.
|
| 12 |
+
// We can't directly include these classes because it will cause circular include dependencies.
|
| 13 |
+
// This file is included by TensorBody.h, which defines the Tensor class.
|
| 14 |
+
#include <ATen/core/ATen_fwd.h>
|
| 15 |
+
|
| 16 |
+
namespace at {
|
| 17 |
+
|
| 18 |
+
namespace compositeimplicitautograd {
|
| 19 |
+
|
| 20 |
+
TORCH_API at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, at::TensorOptions options);
|
| 21 |
+
TORCH_API at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::IntArrayRef size, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
|
| 22 |
+
TORCH_API at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, at::TensorOptions options);
|
| 23 |
+
TORCH_API at::Tensor sparse_bsr_tensor(const at::Tensor & crow_indices, const at::Tensor & col_indices, const at::Tensor & values, c10::optional<at::ScalarType> dtype, c10::optional<at::Layout> layout, c10::optional<at::Device> device, c10::optional<bool> pin_memory);
|
| 24 |
+
|
| 25 |
+
} // namespace compositeimplicitautograd
|
| 26 |
+
} // namespace at
|
vllm/lib/python3.10/site-packages/airportsdata/__init__.py
ADDED
|
@@ -0,0 +1,132 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python3
|
| 2 |
+
|
| 3 |
+
"""
|
| 4 |
+
Extensive database of location and timezone data for nearly every airport and landing strip in the world.
|
| 5 |
+
"""
|
| 6 |
+
from __future__ import annotations
|
| 7 |
+
|
| 8 |
+
import csv
|
| 9 |
+
from pathlib import Path
|
| 10 |
+
from typing import Dict, Literal, TypedDict
|
| 11 |
+
|
| 12 |
+
__project_name__ = __package__
|
| 13 |
+
__min_python_version__ = (3, 9) # minimum version of Python required to run; supported until 4 October 2024
|
| 14 |
+
__version__ = '20241001' # numbering follows the release date
|
| 15 |
+
__author__ = 'Mike Borsetti <mike@borsetti.com>'
|
| 16 |
+
__copyright__ = 'Copyright 2020- Mike Borsetti'
|
| 17 |
+
__license__ = 'MIT'
|
| 18 |
+
__url__ = f'https://github.com/mborsetti/{__project_name__}'
|
| 19 |
+
|
| 20 |
+
Airport = TypedDict(
|
| 21 |
+
'Airport',
|
| 22 |
+
{
|
| 23 |
+
'icao': str,
|
| 24 |
+
'iata': str,
|
| 25 |
+
'name': str,
|
| 26 |
+
'city': str,
|
| 27 |
+
'subd': str,
|
| 28 |
+
'country': str,
|
| 29 |
+
'elevation': float,
|
| 30 |
+
'lat': float,
|
| 31 |
+
'lon': float,
|
| 32 |
+
'tz': str,
|
| 33 |
+
'lid': str,
|
| 34 |
+
},
|
| 35 |
+
)
|
| 36 |
+
CodeType = Literal['ICAO', 'IATA', 'LID']
|
| 37 |
+
IATAMAC = TypedDict('IATAMAC', {'name': str, 'country': str, 'airports': Dict[str, Airport]})
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
def load(code_type: CodeType = 'ICAO') -> Dict[str, 'Airport']:
|
| 41 |
+
"""Loads airport data into a dict
|
| 42 |
+
|
| 43 |
+
:param code_type: optional argument defining the key in the dictionary: 'ICAO' (default if omitted),
|
| 44 |
+
'IATA' (for IATA Location Codes) or 'LID' (for U.S. FAA Location Identifiers).
|
| 45 |
+
|
| 46 |
+
:return: a dict of dicts, each entry having the following keys:
|
| 47 |
+
'icao': ICAO 4-letter Location Indicator or 4-alphanumeric FAA/TC LID
|
| 48 |
+
'iata': IATA 3-letter Location Code or an empty string
|
| 49 |
+
'name': Official name (diacritized latin script)
|
| 50 |
+
'city': City
|
| 51 |
+
'subd': Subdivision (e.g. state, province, region, etc.)
|
| 52 |
+
'country': ISO 3166-1 alpha 2-code (plus 'XK' for Kosovo)
|
| 53 |
+
'elevation': MSL elevation (the highest point of the landing area) in feet
|
| 54 |
+
'lat': Latitude (decimal)
|
| 55 |
+
'lon': Longitude (decimal)
|
| 56 |
+
'tz': Timezone expressed as a tz database name (IANA-compliant) or empty string for country 'AQ' (Antarctica).
|
| 57 |
+
Originally sourced from [TimeZoneDB](https://timezonedb.com)
|
| 58 |
+
'lid': The FAA Location Identifier (for US country only; others is blank)
|
| 59 |
+
"""
|
| 60 |
+
# with open(os.path.join(dir, 'airports.json'), encoding='utf8') as f:
|
| 61 |
+
# airports = json.load(f)
|
| 62 |
+
# if code_type.lower() == 'icao':
|
| 63 |
+
# return airports
|
| 64 |
+
# else:
|
| 65 |
+
# return {airport['iata']: airport for airport in dict(airports).values() if airport['iata']}
|
| 66 |
+
#
|
| 67 |
+
#
|
| 68 |
+
key = code_type.lower()
|
| 69 |
+
if key not in ('icao', 'iata', 'lid'):
|
| 70 |
+
raise ValueError(f'code_type must be one of ICAO, IATA or LID; received {code_type}')
|
| 71 |
+
this_dir = Path(__file__).parent
|
| 72 |
+
airports: Dict[str, Airport] = {}
|
| 73 |
+
with this_dir.joinpath('airports.csv').open(encoding='utf8') as f:
|
| 74 |
+
reader = csv.DictReader(f, quoting=csv.QUOTE_NONNUMERIC)
|
| 75 |
+
for row in reader:
|
| 76 |
+
airports[row[key]] = row # type: ignore[assignment]
|
| 77 |
+
airports.pop('', None)
|
| 78 |
+
return airports
|
| 79 |
+
|
| 80 |
+
|
| 81 |
+
def load_iata_macs() -> dict[str, IATAMAC]:
|
| 82 |
+
"""Loads IATA's Multi Airport Cities (for the "purpose of pricing, fare construction and mileage creation")
|
| 83 |
+
data into a dict.
|
| 84 |
+
|
| 85 |
+
:return: a dict of dicts, each entry having the following keys:
|
| 86 |
+
'name': The IATA city name,
|
| 87 |
+
'country': The IATA country code,
|
| 88 |
+
'airports': a dict with the same data returned by load() for each airport that makes up the Multi Airport
|
| 89 |
+
City, where the key is the airport's IATA code.
|
| 90 |
+
"""
|
| 91 |
+
# with open(os.path.join(dir, 'airports.json'), encoding='utf8') as f:
|
| 92 |
+
# airports = json.load(f)
|
| 93 |
+
# if code_type.lower() == 'icao':
|
| 94 |
+
# return airports
|
| 95 |
+
# else:
|
| 96 |
+
# return {airport['iata']: airport for airport in dict(airports).values() if airport['iata']}
|
| 97 |
+
#
|
| 98 |
+
#
|
| 99 |
+
airports = load('IATA')
|
| 100 |
+
this_dir = Path(__file__).parent
|
| 101 |
+
iata_macs: dict[str, IATAMAC] = {}
|
| 102 |
+
row_d: dict[str, str]
|
| 103 |
+
with this_dir.joinpath('iata_macs.csv').open(encoding='utf8') as f:
|
| 104 |
+
reader = csv.DictReader(f, quoting=csv.QUOTE_NONNUMERIC)
|
| 105 |
+
for row_d in reader:
|
| 106 |
+
for key, value in row_d.items():
|
| 107 |
+
if key == 'Country':
|
| 108 |
+
country = value
|
| 109 |
+
elif key == 'City Code':
|
| 110 |
+
multi_airport_city_code = value
|
| 111 |
+
elif key == 'City Name':
|
| 112 |
+
name = value
|
| 113 |
+
elif key == 'Airport Code':
|
| 114 |
+
airport = value
|
| 115 |
+
if multi_airport_city_code not in iata_macs:
|
| 116 |
+
iata_macs[multi_airport_city_code] = { # type: ignore[assignment]
|
| 117 |
+
'name': name,
|
| 118 |
+
'country': country,
|
| 119 |
+
'airports': {airport: airports[airport]},
|
| 120 |
+
}
|
| 121 |
+
else:
|
| 122 |
+
iata_macs[multi_airport_city_code]['airports'][airport] = airports[airport]
|
| 123 |
+
return iata_macs
|
| 124 |
+
|
| 125 |
+
|
| 126 |
+
# Python 3.9 code used to save the dict to CSV:
|
| 127 |
+
# with open('airports.csv', 'w', newline='') as f:
|
| 128 |
+
# fieldnames = airports[list(airports.keys())[0]].keys()
|
| 129 |
+
# writer = csv.DictWriter(f, fieldnames=fieldnames, quoting=csv.QUOTE_NONNUMERIC)
|
| 130 |
+
# writer.writeheader()
|
| 131 |
+
# for data in airports.values():
|
| 132 |
+
# writer.writerow(data)
|
vllm/lib/python3.10/site-packages/airportsdata/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (3.79 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/git/db.py
ADDED
|
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This module is part of GitPython and is released under the
|
| 2 |
+
# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/
|
| 3 |
+
|
| 4 |
+
"""Module with our own gitdb implementation - it uses the git command."""
|
| 5 |
+
|
| 6 |
+
__all__ = ["GitCmdObjectDB", "GitDB"]
|
| 7 |
+
|
| 8 |
+
from gitdb.base import OInfo, OStream
|
| 9 |
+
from gitdb.db import GitDB, LooseObjectDB
|
| 10 |
+
from gitdb.exc import BadObject
|
| 11 |
+
|
| 12 |
+
from git.util import bin_to_hex, hex_to_bin
|
| 13 |
+
from git.exc import GitCommandError
|
| 14 |
+
|
| 15 |
+
# typing-------------------------------------------------
|
| 16 |
+
|
| 17 |
+
from typing import TYPE_CHECKING
|
| 18 |
+
|
| 19 |
+
from git.types import PathLike
|
| 20 |
+
|
| 21 |
+
if TYPE_CHECKING:
|
| 22 |
+
from git.cmd import Git
|
| 23 |
+
|
| 24 |
+
# --------------------------------------------------------
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
class GitCmdObjectDB(LooseObjectDB):
|
| 28 |
+
"""A database representing the default git object store, which includes loose
|
| 29 |
+
objects, pack files and an alternates file.
|
| 30 |
+
|
| 31 |
+
It will create objects only in the loose object database.
|
| 32 |
+
"""
|
| 33 |
+
|
| 34 |
+
def __init__(self, root_path: PathLike, git: "Git") -> None:
|
| 35 |
+
"""Initialize this instance with the root and a git command."""
|
| 36 |
+
super().__init__(root_path)
|
| 37 |
+
self._git = git
|
| 38 |
+
|
| 39 |
+
def info(self, binsha: bytes) -> OInfo:
|
| 40 |
+
"""Get a git object header (using git itself)."""
|
| 41 |
+
hexsha, typename, size = self._git.get_object_header(bin_to_hex(binsha))
|
| 42 |
+
return OInfo(hex_to_bin(hexsha), typename, size)
|
| 43 |
+
|
| 44 |
+
def stream(self, binsha: bytes) -> OStream:
|
| 45 |
+
"""Get git object data as a stream supporting ``read()`` (using git itself)."""
|
| 46 |
+
hexsha, typename, size, stream = self._git.stream_object_data(bin_to_hex(binsha))
|
| 47 |
+
return OStream(hex_to_bin(hexsha), typename, size, stream)
|
| 48 |
+
|
| 49 |
+
# { Interface
|
| 50 |
+
|
| 51 |
+
def partial_to_complete_sha_hex(self, partial_hexsha: str) -> bytes:
|
| 52 |
+
"""
|
| 53 |
+
:return:
|
| 54 |
+
Full binary 20 byte sha from the given partial hexsha
|
| 55 |
+
|
| 56 |
+
:raise gitdb.exc.AmbiguousObjectName:
|
| 57 |
+
|
| 58 |
+
:raise gitdb.exc.BadObject:
|
| 59 |
+
|
| 60 |
+
:note:
|
| 61 |
+
Currently we only raise :exc:`~gitdb.exc.BadObject` as git does not
|
| 62 |
+
communicate ambiguous objects separately.
|
| 63 |
+
"""
|
| 64 |
+
try:
|
| 65 |
+
hexsha, _typename, _size = self._git.get_object_header(partial_hexsha)
|
| 66 |
+
return hex_to_bin(hexsha)
|
| 67 |
+
except (GitCommandError, ValueError) as e:
|
| 68 |
+
raise BadObject(partial_hexsha) from e
|
| 69 |
+
# END handle exceptions
|
| 70 |
+
|
| 71 |
+
# } END interface
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (8.26 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_base.cpython-310.pyc
ADDED
|
Binary file (16.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_fp.cpython-310.pyc
ADDED
|
Binary file (7.81 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_iv.cpython-310.pyc
ADDED
|
Binary file (20.5 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp.cpython-310.pyc
ADDED
|
Binary file (43 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-310.pyc
ADDED
|
Binary file (34.5 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/identification.cpython-310.pyc
ADDED
|
Binary file (28.3 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/math2.cpython-310.pyc
ADDED
|
Binary file (15 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/rational.cpython-310.pyc
ADDED
|
Binary file (5.96 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/usertools.cpython-310.pyc
ADDED
|
Binary file (3.59 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/__pycache__/visualization.cpython-310.pyc
ADDED
|
Binary file (9.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (342 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/approximation.cpython-310.pyc
ADDED
|
Binary file (9.11 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/calculus.cpython-310.pyc
ADDED
|
Binary file (466 Bytes). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-310.pyc
ADDED
|
Binary file (20.4 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-310.pyc
ADDED
|
Binary file (69.6 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-310.pyc
ADDED
|
Binary file (30.7 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/odes.cpython-310.pyc
ADDED
|
Binary file (10.2 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/optimization.cpython-310.pyc
ADDED
|
Binary file (29.9 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-310.pyc
ADDED
|
Binary file (7.72 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-310.pyc
ADDED
|
Binary file (39.5 kB). View file
|
|
|
vllm/lib/python3.10/site-packages/mpmath/calculus/approximation.py
ADDED
|
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..libmp.backend import xrange
|
| 2 |
+
from .calculus import defun
|
| 3 |
+
|
| 4 |
+
#----------------------------------------------------------------------------#
|
| 5 |
+
# Approximation methods #
|
| 6 |
+
#----------------------------------------------------------------------------#
|
| 7 |
+
|
| 8 |
+
# The Chebyshev approximation formula is given at:
|
| 9 |
+
# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
|
| 10 |
+
|
| 11 |
+
# The only major changes in the following code is that we return the
|
| 12 |
+
# expanded polynomial coefficients instead of Chebyshev coefficients,
|
| 13 |
+
# and that we automatically transform [a,b] -> [-1,1] and back
|
| 14 |
+
# for convenience.
|
| 15 |
+
|
| 16 |
+
# Coefficient in Chebyshev approximation
|
| 17 |
+
def chebcoeff(ctx,f,a,b,j,N):
|
| 18 |
+
s = ctx.mpf(0)
|
| 19 |
+
h = ctx.mpf(0.5)
|
| 20 |
+
for k in range(1, N+1):
|
| 21 |
+
t = ctx.cospi((k-h)/N)
|
| 22 |
+
s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
|
| 23 |
+
return 2*s/N
|
| 24 |
+
|
| 25 |
+
# Generate Chebyshev polynomials T_n(ax+b) in expanded form
|
| 26 |
+
def chebT(ctx, a=1, b=0):
|
| 27 |
+
Tb = [1]
|
| 28 |
+
yield Tb
|
| 29 |
+
Ta = [b, a]
|
| 30 |
+
while 1:
|
| 31 |
+
yield Ta
|
| 32 |
+
# Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
|
| 33 |
+
Tmp = [0] + [2*a*t for t in Ta]
|
| 34 |
+
for i, c in enumerate(Ta): Tmp[i] += 2*b*c
|
| 35 |
+
for i, c in enumerate(Tb): Tmp[i] -= c
|
| 36 |
+
Ta, Tb = Tmp, Ta
|
| 37 |
+
|
| 38 |
+
@defun
|
| 39 |
+
def chebyfit(ctx, f, interval, N, error=False):
|
| 40 |
+
r"""
|
| 41 |
+
Computes a polynomial of degree `N-1` that approximates the
|
| 42 |
+
given function `f` on the interval `[a, b]`. With ``error=True``,
|
| 43 |
+
:func:`~mpmath.chebyfit` also returns an accurate estimate of the
|
| 44 |
+
maximum absolute error; that is, the maximum value of
|
| 45 |
+
`|f(x) - P(x)|` for `x \in [a, b]`.
|
| 46 |
+
|
| 47 |
+
:func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
|
| 48 |
+
which gives a nearly optimal solution: that is, the maximum
|
| 49 |
+
error of the approximating polynomial is very close to
|
| 50 |
+
the smallest possible for any polynomial of the same degree.
|
| 51 |
+
|
| 52 |
+
Chebyshev approximation is very useful if one needs repeated
|
| 53 |
+
evaluation of an expensive function, such as function defined
|
| 54 |
+
implicitly by an integral or a differential equation. (For
|
| 55 |
+
example, it could be used to turn a slow mpmath function
|
| 56 |
+
into a fast machine-precision version of the same.)
|
| 57 |
+
|
| 58 |
+
**Examples**
|
| 59 |
+
|
| 60 |
+
Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
|
| 61 |
+
of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
|
| 62 |
+
|
| 63 |
+
>>> from mpmath import *
|
| 64 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 65 |
+
>>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
|
| 66 |
+
>>> nprint(poly)
|
| 67 |
+
[0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
|
| 68 |
+
>>> nprint(err, 12)
|
| 69 |
+
1.61351758081e-5
|
| 70 |
+
|
| 71 |
+
The polynomial can be evaluated using ``polyval``::
|
| 72 |
+
|
| 73 |
+
>>> nprint(polyval(poly, 1.6), 12)
|
| 74 |
+
-0.0291858904138
|
| 75 |
+
>>> nprint(cos(1.6), 12)
|
| 76 |
+
-0.0291995223013
|
| 77 |
+
|
| 78 |
+
Sampling the true error at 1000 points shows that the error
|
| 79 |
+
estimate generated by ``chebyfit`` is remarkably good::
|
| 80 |
+
|
| 81 |
+
>>> error = lambda x: abs(cos(x) - polyval(poly, x))
|
| 82 |
+
>>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
|
| 83 |
+
1.61349954245e-5
|
| 84 |
+
|
| 85 |
+
**Choice of degree**
|
| 86 |
+
|
| 87 |
+
The degree `N` can be set arbitrarily high, to obtain an
|
| 88 |
+
arbitrarily good approximation. As a rule of thumb, an
|
| 89 |
+
`N`-term Chebyshev approximation is good to `N/(b-a)` decimal
|
| 90 |
+
places on a unit interval (although this depends on how
|
| 91 |
+
well-behaved `f` is). The cost grows accordingly: ``chebyfit``
|
| 92 |
+
evaluates the function `(N^2)/2` times to compute the
|
| 93 |
+
coefficients and an additional `N` times to estimate the error.
|
| 94 |
+
|
| 95 |
+
**Possible issues**
|
| 96 |
+
|
| 97 |
+
One should be careful to use a sufficiently high working
|
| 98 |
+
precision both when calling ``chebyfit`` and when evaluating
|
| 99 |
+
the resulting polynomial, as the polynomial is sometimes
|
| 100 |
+
ill-conditioned. It is for example difficult to reach
|
| 101 |
+
15-digit accuracy when evaluating the polynomial using
|
| 102 |
+
machine precision floats, no matter the theoretical
|
| 103 |
+
accuracy of the polynomial. (The option to return the
|
| 104 |
+
coefficients in Chebyshev form should be made available
|
| 105 |
+
in the future.)
|
| 106 |
+
|
| 107 |
+
It is important to note the Chebyshev approximation works
|
| 108 |
+
poorly if `f` is not smooth. A function containing singularities,
|
| 109 |
+
rapid oscillation, etc can be approximated more effectively by
|
| 110 |
+
multiplying it by a weight function that cancels out the
|
| 111 |
+
nonsmooth features, or by dividing the interval into several
|
| 112 |
+
segments.
|
| 113 |
+
"""
|
| 114 |
+
a, b = ctx._as_points(interval)
|
| 115 |
+
orig = ctx.prec
|
| 116 |
+
try:
|
| 117 |
+
ctx.prec = orig + int(N**0.5) + 20
|
| 118 |
+
c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
|
| 119 |
+
d = [ctx.zero] * N
|
| 120 |
+
d[0] = -c[0]/2
|
| 121 |
+
h = ctx.mpf(0.5)
|
| 122 |
+
T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
|
| 123 |
+
for (k, Tk) in zip(range(N), T):
|
| 124 |
+
for i in range(len(Tk)):
|
| 125 |
+
d[i] += c[k]*Tk[i]
|
| 126 |
+
d = d[::-1]
|
| 127 |
+
# Estimate maximum error
|
| 128 |
+
err = ctx.zero
|
| 129 |
+
for k in range(N):
|
| 130 |
+
x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
|
| 131 |
+
err = max(err, abs(f(x) - ctx.polyval(d, x)))
|
| 132 |
+
finally:
|
| 133 |
+
ctx.prec = orig
|
| 134 |
+
if error:
|
| 135 |
+
return d, +err
|
| 136 |
+
else:
|
| 137 |
+
return d
|
| 138 |
+
|
| 139 |
+
@defun
|
| 140 |
+
def fourier(ctx, f, interval, N):
|
| 141 |
+
r"""
|
| 142 |
+
Computes the Fourier series of degree `N` of the given function
|
| 143 |
+
on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
|
| 144 |
+
two lists `(c, s)` of coefficients (the cosine series and sine
|
| 145 |
+
series, respectively), such that
|
| 146 |
+
|
| 147 |
+
.. math ::
|
| 148 |
+
|
| 149 |
+
f(x) \sim \sum_{k=0}^N
|
| 150 |
+
c_k \cos(k m x) + s_k \sin(k m x)
|
| 151 |
+
|
| 152 |
+
where `m = 2 \pi / (b-a)`.
|
| 153 |
+
|
| 154 |
+
Note that many texts define the first coefficient as `2 c_0` instead
|
| 155 |
+
of `c_0`. The easiest way to evaluate the computed series correctly
|
| 156 |
+
is to pass it to :func:`~mpmath.fourierval`.
|
| 157 |
+
|
| 158 |
+
**Examples**
|
| 159 |
+
|
| 160 |
+
The function `f(x) = x` has a simple Fourier series on the standard
|
| 161 |
+
interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
|
| 162 |
+
the function has odd symmetry), and the sine coefficients are
|
| 163 |
+
rational numbers::
|
| 164 |
+
|
| 165 |
+
>>> from mpmath import *
|
| 166 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 167 |
+
>>> c, s = fourier(lambda x: x, [-pi, pi], 5)
|
| 168 |
+
>>> nprint(c)
|
| 169 |
+
[0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
|
| 170 |
+
>>> nprint(s)
|
| 171 |
+
[0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
|
| 172 |
+
|
| 173 |
+
This computes a Fourier series of a nonsymmetric function on
|
| 174 |
+
a nonstandard interval::
|
| 175 |
+
|
| 176 |
+
>>> I = [-1, 1.5]
|
| 177 |
+
>>> f = lambda x: x**2 - 4*x + 1
|
| 178 |
+
>>> cs = fourier(f, I, 4)
|
| 179 |
+
>>> nprint(cs[0])
|
| 180 |
+
[0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
|
| 181 |
+
>>> nprint(cs[1])
|
| 182 |
+
[0.0, -2.6255, 0.580905, 0.219974, -0.540057]
|
| 183 |
+
|
| 184 |
+
It is instructive to plot a function along with its truncated
|
| 185 |
+
Fourier series::
|
| 186 |
+
|
| 187 |
+
>>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
|
| 188 |
+
|
| 189 |
+
Fourier series generally converge slowly (and may not converge
|
| 190 |
+
pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
|
| 191 |
+
series gives an `L^2` error corresponding to 2-digit accuracy::
|
| 192 |
+
|
| 193 |
+
>>> I = [-1, 1]
|
| 194 |
+
>>> cs = fourier(cosh, I, 9)
|
| 195 |
+
>>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
|
| 196 |
+
>>> nprint(sqrt(quad(g, I)))
|
| 197 |
+
0.00467963
|
| 198 |
+
|
| 199 |
+
:func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
|
| 200 |
+
the accuracy (and speed) can be improved by including all singular
|
| 201 |
+
points in the interval specification::
|
| 202 |
+
|
| 203 |
+
>>> nprint(fourier(abs, [-1, 1], 0), 10)
|
| 204 |
+
([0.5000441648], [0.0])
|
| 205 |
+
>>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
|
| 206 |
+
([0.5], [0.0])
|
| 207 |
+
|
| 208 |
+
"""
|
| 209 |
+
interval = ctx._as_points(interval)
|
| 210 |
+
a = interval[0]
|
| 211 |
+
b = interval[-1]
|
| 212 |
+
L = b-a
|
| 213 |
+
cos_series = []
|
| 214 |
+
sin_series = []
|
| 215 |
+
cutoff = ctx.eps*10
|
| 216 |
+
for n in xrange(N+1):
|
| 217 |
+
m = 2*n*ctx.pi/L
|
| 218 |
+
an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
|
| 219 |
+
bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
|
| 220 |
+
if n == 0:
|
| 221 |
+
an /= 2
|
| 222 |
+
if abs(an) < cutoff: an = ctx.zero
|
| 223 |
+
if abs(bn) < cutoff: bn = ctx.zero
|
| 224 |
+
cos_series.append(an)
|
| 225 |
+
sin_series.append(bn)
|
| 226 |
+
return cos_series, sin_series
|
| 227 |
+
|
| 228 |
+
@defun
|
| 229 |
+
def fourierval(ctx, series, interval, x):
|
| 230 |
+
"""
|
| 231 |
+
Evaluates a Fourier series (in the format computed by
|
| 232 |
+
by :func:`~mpmath.fourier` for the given interval) at the point `x`.
|
| 233 |
+
|
| 234 |
+
The series should be a pair `(c, s)` where `c` is the
|
| 235 |
+
cosine series and `s` is the sine series. The two lists
|
| 236 |
+
need not have the same length.
|
| 237 |
+
"""
|
| 238 |
+
cs, ss = series
|
| 239 |
+
ab = ctx._as_points(interval)
|
| 240 |
+
a = interval[0]
|
| 241 |
+
b = interval[-1]
|
| 242 |
+
m = 2*ctx.pi/(ab[-1]-ab[0])
|
| 243 |
+
s = ctx.zero
|
| 244 |
+
s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
|
| 245 |
+
s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
|
| 246 |
+
return s
|
vllm/lib/python3.10/site-packages/mpmath/calculus/calculus.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class CalculusMethods(object):
|
| 2 |
+
pass
|
| 3 |
+
|
| 4 |
+
def defun(f):
|
| 5 |
+
setattr(CalculusMethods, f.__name__, f)
|
| 6 |
+
return f
|
vllm/lib/python3.10/site-packages/mpmath/calculus/extrapolation.py
ADDED
|
@@ -0,0 +1,2115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
from itertools import izip
|
| 3 |
+
except ImportError:
|
| 4 |
+
izip = zip
|
| 5 |
+
|
| 6 |
+
from ..libmp.backend import xrange
|
| 7 |
+
from .calculus import defun
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
next = next
|
| 11 |
+
except NameError:
|
| 12 |
+
next = lambda _: _.next()
|
| 13 |
+
|
| 14 |
+
@defun
|
| 15 |
+
def richardson(ctx, seq):
|
| 16 |
+
r"""
|
| 17 |
+
Given a list ``seq`` of the first `N` elements of a slowly convergent
|
| 18 |
+
infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
|
| 19 |
+
Richardson extrapolate for the limit.
|
| 20 |
+
|
| 21 |
+
:func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
|
| 22 |
+
limit and `c` is the magnitude of the largest weight used during the
|
| 23 |
+
computation. The weight provides an estimate of the precision
|
| 24 |
+
lost to cancellation. Due to cancellation effects, the sequence must
|
| 25 |
+
be typically be computed at a much higher precision than the target
|
| 26 |
+
accuracy of the extrapolation.
|
| 27 |
+
|
| 28 |
+
**Applicability and issues**
|
| 29 |
+
|
| 30 |
+
The `N`-step Richardson extrapolation algorithm used by
|
| 31 |
+
:func:`~mpmath.richardson` is described in [1].
|
| 32 |
+
|
| 33 |
+
Richardson extrapolation only works for a specific type of sequence,
|
| 34 |
+
namely one converging like partial sums of
|
| 35 |
+
`P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
|
| 36 |
+
When the sequence does not convergence at such a rate
|
| 37 |
+
:func:`~mpmath.richardson` generally produces garbage.
|
| 38 |
+
|
| 39 |
+
Richardson extrapolation has the advantage of being fast: the `N`-term
|
| 40 |
+
extrapolate requires only `O(N)` arithmetic operations, and usually
|
| 41 |
+
produces an estimate that is accurate to `O(N)` digits. Contrast with
|
| 42 |
+
the Shanks transformation (see :func:`~mpmath.shanks`), which requires
|
| 43 |
+
`O(N^2)` operations.
|
| 44 |
+
|
| 45 |
+
:func:`~mpmath.richardson` is unable to produce an estimate for the
|
| 46 |
+
approximation error. One way to estimate the error is to perform
|
| 47 |
+
two extrapolations with slightly different `N` and comparing the
|
| 48 |
+
results.
|
| 49 |
+
|
| 50 |
+
Richardson extrapolation does not work for oscillating sequences.
|
| 51 |
+
As a simple workaround, :func:`~mpmath.richardson` detects if the last
|
| 52 |
+
three elements do not differ monotonically, and in that case
|
| 53 |
+
applies extrapolation only to the even-index elements.
|
| 54 |
+
|
| 55 |
+
**Example**
|
| 56 |
+
|
| 57 |
+
Applying Richardson extrapolation to the Leibniz series for `\pi`::
|
| 58 |
+
|
| 59 |
+
>>> from mpmath import *
|
| 60 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 61 |
+
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
|
| 62 |
+
... for m in range(1,30)]
|
| 63 |
+
>>> v, c = richardson(S[:10])
|
| 64 |
+
>>> v
|
| 65 |
+
3.2126984126984126984126984127
|
| 66 |
+
>>> nprint([v-pi, c])
|
| 67 |
+
[0.0711058, 2.0]
|
| 68 |
+
|
| 69 |
+
>>> v, c = richardson(S[:30])
|
| 70 |
+
>>> v
|
| 71 |
+
3.14159265468624052829954206226
|
| 72 |
+
>>> nprint([v-pi, c])
|
| 73 |
+
[1.09645e-9, 20833.3]
|
| 74 |
+
|
| 75 |
+
**References**
|
| 76 |
+
|
| 77 |
+
1. [BenderOrszag]_ pp. 375-376
|
| 78 |
+
|
| 79 |
+
"""
|
| 80 |
+
if len(seq) < 3:
|
| 81 |
+
raise ValueError("seq should be of minimum length 3")
|
| 82 |
+
if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
|
| 83 |
+
seq = seq[::2]
|
| 84 |
+
N = len(seq)//2-1
|
| 85 |
+
s = ctx.zero
|
| 86 |
+
# The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
|
| 87 |
+
# To avoid repeated factorials, we simplify the quotient
|
| 88 |
+
# of successive weights to obtain a recurrence relation
|
| 89 |
+
c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
|
| 90 |
+
maxc = 1
|
| 91 |
+
for k in xrange(N+1):
|
| 92 |
+
s += c * seq[N+k]
|
| 93 |
+
maxc = max(abs(c), maxc)
|
| 94 |
+
c *= (k-N)*ctx.mpf(k+N+1)**N
|
| 95 |
+
c /= ((1+k)*ctx.mpf(k+N)**N)
|
| 96 |
+
return s, maxc
|
| 97 |
+
|
| 98 |
+
@defun
|
| 99 |
+
def shanks(ctx, seq, table=None, randomized=False):
|
| 100 |
+
r"""
|
| 101 |
+
Given a list ``seq`` of the first `N` elements of a slowly
|
| 102 |
+
convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
|
| 103 |
+
Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
|
| 104 |
+
transformation often provides strong convergence acceleration,
|
| 105 |
+
especially if the sequence is oscillating.
|
| 106 |
+
|
| 107 |
+
The iterated Shanks transformation is computed using the Wynn
|
| 108 |
+
epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
|
| 109 |
+
epsilon table generated by Wynn's algorithm, which can be read
|
| 110 |
+
off as follows:
|
| 111 |
+
|
| 112 |
+
* The table is a list of lists forming a lower triangular matrix,
|
| 113 |
+
where higher row and column indices correspond to more accurate
|
| 114 |
+
values.
|
| 115 |
+
* The columns with even index hold dummy entries (required for the
|
| 116 |
+
computation) and the columns with odd index hold the actual
|
| 117 |
+
extrapolates.
|
| 118 |
+
* The last element in the last row is typically the most
|
| 119 |
+
accurate estimate of the limit.
|
| 120 |
+
* The difference to the third last element in the last row
|
| 121 |
+
provides an estimate of the approximation error.
|
| 122 |
+
* The magnitude of the second last element provides an estimate
|
| 123 |
+
of the numerical accuracy lost to cancellation.
|
| 124 |
+
|
| 125 |
+
For convenience, so the extrapolation is stopped at an odd index
|
| 126 |
+
so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
|
| 127 |
+
limit.
|
| 128 |
+
|
| 129 |
+
Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
|
| 130 |
+
This can be used to efficiently extend a previous computation after
|
| 131 |
+
new elements have been appended to the sequence. The table will
|
| 132 |
+
then be updated in-place.
|
| 133 |
+
|
| 134 |
+
**The Shanks transformation**
|
| 135 |
+
|
| 136 |
+
The Shanks transformation is defined as follows (see [2]): given
|
| 137 |
+
the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
|
| 138 |
+
given by
|
| 139 |
+
|
| 140 |
+
.. math ::
|
| 141 |
+
|
| 142 |
+
S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
|
| 143 |
+
|
| 144 |
+
The Shanks transformation gives the exact limit `A_{\infty}` in a
|
| 145 |
+
single step if `A_k = A + a q^k`. Note in particular that it
|
| 146 |
+
extrapolates the exact sum of a geometric series in a single step.
|
| 147 |
+
|
| 148 |
+
Applying the Shanks transformation once often improves convergence
|
| 149 |
+
substantially for an arbitrary sequence, but the optimal effect is
|
| 150 |
+
obtained by applying it iteratively:
|
| 151 |
+
`S(S(A_k)), S(S(S(A_k))), \ldots`.
|
| 152 |
+
|
| 153 |
+
Wynn's epsilon algorithm provides an efficient way to generate
|
| 154 |
+
the table of iterated Shanks transformations. It reduces the
|
| 155 |
+
computation of each element to essentially a single division, at
|
| 156 |
+
the cost of requiring dummy elements in the table. See [1] for
|
| 157 |
+
details.
|
| 158 |
+
|
| 159 |
+
**Precision issues**
|
| 160 |
+
|
| 161 |
+
Due to cancellation effects, the sequence must be typically be
|
| 162 |
+
computed at a much higher precision than the target accuracy
|
| 163 |
+
of the extrapolation.
|
| 164 |
+
|
| 165 |
+
If the Shanks transformation converges to the exact limit (such
|
| 166 |
+
as if the sequence is a geometric series), then a division by
|
| 167 |
+
zero occurs. By default, :func:`~mpmath.shanks` handles this case by
|
| 168 |
+
terminating the iteration and returning the table it has
|
| 169 |
+
generated so far. With *randomized=True*, it will instead
|
| 170 |
+
replace the zero by a pseudorandom number close to zero.
|
| 171 |
+
(TODO: find a better solution to this problem.)
|
| 172 |
+
|
| 173 |
+
**Examples**
|
| 174 |
+
|
| 175 |
+
We illustrate by applying Shanks transformation to the Leibniz
|
| 176 |
+
series for `\pi`::
|
| 177 |
+
|
| 178 |
+
>>> from mpmath import *
|
| 179 |
+
>>> mp.dps = 50
|
| 180 |
+
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
|
| 181 |
+
... for m in range(1,30)]
|
| 182 |
+
>>>
|
| 183 |
+
>>> T = shanks(S[:7])
|
| 184 |
+
>>> for row in T:
|
| 185 |
+
... nprint(row)
|
| 186 |
+
...
|
| 187 |
+
[-0.75]
|
| 188 |
+
[1.25, 3.16667]
|
| 189 |
+
[-1.75, 3.13333, -28.75]
|
| 190 |
+
[2.25, 3.14524, 82.25, 3.14234]
|
| 191 |
+
[-2.75, 3.13968, -177.75, 3.14139, -969.937]
|
| 192 |
+
[3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
|
| 193 |
+
|
| 194 |
+
The extrapolated accuracy is about 4 digits, and about 4 digits
|
| 195 |
+
may have been lost due to cancellation::
|
| 196 |
+
|
| 197 |
+
>>> L = T[-1]
|
| 198 |
+
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
|
| 199 |
+
[2.22532e-5, 4.78309e-5, 3515.06]
|
| 200 |
+
|
| 201 |
+
Now we extend the computation::
|
| 202 |
+
|
| 203 |
+
>>> T = shanks(S[:25], T)
|
| 204 |
+
>>> L = T[-1]
|
| 205 |
+
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
|
| 206 |
+
[3.75527e-19, 1.48478e-19, 2.96014e+17]
|
| 207 |
+
|
| 208 |
+
The value for pi is now accurate to 18 digits. About 18 digits may
|
| 209 |
+
also have been lost to cancellation.
|
| 210 |
+
|
| 211 |
+
Here is an example with a geometric series, where the convergence
|
| 212 |
+
is immediate (the sum is exactly 1)::
|
| 213 |
+
|
| 214 |
+
>>> mp.dps = 15
|
| 215 |
+
>>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
|
| 216 |
+
... nprint(row)
|
| 217 |
+
[4.0]
|
| 218 |
+
[8.0, 1.0]
|
| 219 |
+
|
| 220 |
+
**References**
|
| 221 |
+
|
| 222 |
+
1. [GravesMorris]_
|
| 223 |
+
|
| 224 |
+
2. [BenderOrszag]_ pp. 368-375
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
if len(seq) < 2:
|
| 228 |
+
raise ValueError("seq should be of minimum length 2")
|
| 229 |
+
if table:
|
| 230 |
+
START = len(table)
|
| 231 |
+
else:
|
| 232 |
+
START = 0
|
| 233 |
+
table = []
|
| 234 |
+
STOP = len(seq) - 1
|
| 235 |
+
if STOP & 1:
|
| 236 |
+
STOP -= 1
|
| 237 |
+
one = ctx.one
|
| 238 |
+
eps = +ctx.eps
|
| 239 |
+
if randomized:
|
| 240 |
+
from random import Random
|
| 241 |
+
rnd = Random()
|
| 242 |
+
rnd.seed(START)
|
| 243 |
+
for i in xrange(START, STOP):
|
| 244 |
+
row = []
|
| 245 |
+
for j in xrange(i+1):
|
| 246 |
+
if j == 0:
|
| 247 |
+
a, b = 0, seq[i+1]-seq[i]
|
| 248 |
+
else:
|
| 249 |
+
if j == 1:
|
| 250 |
+
a = seq[i]
|
| 251 |
+
else:
|
| 252 |
+
a = table[i-1][j-2]
|
| 253 |
+
b = row[j-1] - table[i-1][j-1]
|
| 254 |
+
if not b:
|
| 255 |
+
if randomized:
|
| 256 |
+
b = (1 + rnd.getrandbits(10))*eps
|
| 257 |
+
elif i & 1:
|
| 258 |
+
return table[:-1]
|
| 259 |
+
else:
|
| 260 |
+
return table
|
| 261 |
+
row.append(a + one/b)
|
| 262 |
+
table.append(row)
|
| 263 |
+
return table
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class levin_class:
|
| 267 |
+
# levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
|
| 268 |
+
r"""
|
| 269 |
+
This interface implements Levin's (nonlinear) sequence transformation for
|
| 270 |
+
convergence acceleration and summation of divergent series. It performs
|
| 271 |
+
better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
|
| 272 |
+
or alternating divergent series.
|
| 273 |
+
|
| 274 |
+
Let *A* be the series we want to sum:
|
| 275 |
+
|
| 276 |
+
.. math ::
|
| 277 |
+
|
| 278 |
+
A = \sum_{k=0}^{\infty} a_k
|
| 279 |
+
|
| 280 |
+
Attention: all `a_k` must be non-zero!
|
| 281 |
+
|
| 282 |
+
Let `s_n` be the partial sums of this series:
|
| 283 |
+
|
| 284 |
+
.. math ::
|
| 285 |
+
|
| 286 |
+
s_n = \sum_{k=0}^n a_k.
|
| 287 |
+
|
| 288 |
+
**Methods**
|
| 289 |
+
|
| 290 |
+
Calling ``levin`` returns an object with the following methods.
|
| 291 |
+
|
| 292 |
+
``update(...)`` works with the list of individual terms `a_k` of *A*, and
|
| 293 |
+
``update_step(...)`` works with the list of partial sums `s_k` of *A*:
|
| 294 |
+
|
| 295 |
+
.. code ::
|
| 296 |
+
|
| 297 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 298 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 299 |
+
|
| 300 |
+
``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
|
| 301 |
+
works with the partial sums `s_k`:
|
| 302 |
+
|
| 303 |
+
.. code ::
|
| 304 |
+
|
| 305 |
+
v, e = ...step(a_k)
|
| 306 |
+
v, e = ...step_psum(s_k)
|
| 307 |
+
|
| 308 |
+
*v* is the current estimate for *A*, and *e* is an error estimate which is
|
| 309 |
+
simply the difference between the current estimate and the last estimate.
|
| 310 |
+
One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
|
| 311 |
+
|
| 312 |
+
**A word of caution**
|
| 313 |
+
|
| 314 |
+
One can only hope for good results (i.e. convergence acceleration or
|
| 315 |
+
resummation) if the `s_n` have some well defind asymptotic behavior for
|
| 316 |
+
large `n` and are not erratic or random. Furthermore one usually needs very
|
| 317 |
+
high working precision because of the numerical cancellation. If the working
|
| 318 |
+
precision is insufficient, levin may produce silently numerical garbage.
|
| 319 |
+
Furthermore even if the Levin-transformation converges, in the general case
|
| 320 |
+
there is no proof that the result is mathematically sound. Only for very
|
| 321 |
+
special classes of problems one can prove that the Levin-transformation
|
| 322 |
+
converges to the expected result (for example Stieltjes-type integrals).
|
| 323 |
+
Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
|
| 324 |
+
to Shanks/Wynn-epsilon, Richardson & co.
|
| 325 |
+
In summary one can say that the Levin-transformation is powerful but
|
| 326 |
+
unreliable and that it may need a copious amount of working precision.
|
| 327 |
+
|
| 328 |
+
The Levin transform has several variants differing in the choice of weights.
|
| 329 |
+
Some variants are better suited for the possible flavours of convergence
|
| 330 |
+
behaviour of *A* than other variants:
|
| 331 |
+
|
| 332 |
+
.. code ::
|
| 333 |
+
|
| 334 |
+
convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
|
| 335 |
+
|
| 336 |
+
logarithmic + - + -
|
| 337 |
+
linear + + + +
|
| 338 |
+
alternating divergent + + + +
|
| 339 |
+
|
| 340 |
+
"+" means the variant is suitable,"-" means the variant is not suitable;
|
| 341 |
+
for comparison the Shanks/Wynn-epsilon transform is listed, too.
|
| 342 |
+
|
| 343 |
+
The variant is controlled though the variant keyword (i.e. ``variant="u"``,
|
| 344 |
+
``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
|
| 345 |
+
|
| 346 |
+
Finally it is possible to use the Sidi-S transform instead of the Levin transform
|
| 347 |
+
by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
|
| 348 |
+
Levin transformation for some divergent series (see the examples).
|
| 349 |
+
|
| 350 |
+
Parameters:
|
| 351 |
+
|
| 352 |
+
.. code ::
|
| 353 |
+
|
| 354 |
+
method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
|
| 355 |
+
variant "u","t" or "v" chooses the weight variant.
|
| 356 |
+
|
| 357 |
+
The Levin transform is also accessible through the nsum interface.
|
| 358 |
+
``method="l"`` or ``method="levin"`` select the normal Levin transform while
|
| 359 |
+
``method="sidi"``
|
| 360 |
+
selects the Sidi-S transform. The variant is in both cases selected through the
|
| 361 |
+
levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
|
| 362 |
+
it will miss the point where the Levin transform converges resulting in numerical
|
| 363 |
+
overflow/garbage. For highly divergent series a copious amount of working precision
|
| 364 |
+
must be chosen.
|
| 365 |
+
|
| 366 |
+
**Examples**
|
| 367 |
+
|
| 368 |
+
First we sum the zeta function::
|
| 369 |
+
|
| 370 |
+
>>> from mpmath import mp
|
| 371 |
+
>>> mp.prec = 53
|
| 372 |
+
>>> eps = mp.mpf(mp.eps)
|
| 373 |
+
>>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
|
| 374 |
+
... L = mp.levin(method = "levin", variant = "u")
|
| 375 |
+
... S, s, n = [], 0, 1
|
| 376 |
+
... while 1:
|
| 377 |
+
... s += mp.one / (n * n)
|
| 378 |
+
... n += 1
|
| 379 |
+
... S.append(s)
|
| 380 |
+
... v, e = L.update_psum(S)
|
| 381 |
+
... if e < eps:
|
| 382 |
+
... break
|
| 383 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 384 |
+
>>> print(mp.chop(v - mp.pi ** 2 / 6))
|
| 385 |
+
0.0
|
| 386 |
+
>>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
|
| 387 |
+
>>> print(mp.chop(v - w))
|
| 388 |
+
0.0
|
| 389 |
+
|
| 390 |
+
Now we sum the zeta function outside its range of convergence
|
| 391 |
+
(attention: This does not work at the negative integers!)::
|
| 392 |
+
|
| 393 |
+
>>> eps = mp.mpf(mp.eps)
|
| 394 |
+
>>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
|
| 395 |
+
... L = mp.levin(method = "levin", variant = "v")
|
| 396 |
+
... A, n = [], 1
|
| 397 |
+
... while 1:
|
| 398 |
+
... s = mp.mpf(n) ** (2 + 3j)
|
| 399 |
+
... n += 1
|
| 400 |
+
... A.append(s)
|
| 401 |
+
... v, e = L.update(A)
|
| 402 |
+
... if e < eps:
|
| 403 |
+
... break
|
| 404 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 405 |
+
>>> print(mp.chop(v - mp.zeta(-2-3j)))
|
| 406 |
+
0.0
|
| 407 |
+
>>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
|
| 408 |
+
>>> print(mp.chop(v - w))
|
| 409 |
+
0.0
|
| 410 |
+
|
| 411 |
+
Now we sum the divergent asymptotic expansion of an integral related to the
|
| 412 |
+
exponential integral (see also [2] p.373). The Sidi-S transform works best here::
|
| 413 |
+
|
| 414 |
+
>>> z = mp.mpf(10)
|
| 415 |
+
>>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
|
| 416 |
+
>>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
|
| 417 |
+
>>> eps = mp.mpf(mp.eps)
|
| 418 |
+
>>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
|
| 419 |
+
... L = mp.levin(method = "sidi", variant = "t")
|
| 420 |
+
... n = 0
|
| 421 |
+
... while 1:
|
| 422 |
+
... s = (-1)**n * mp.fac(n) * z ** (-n)
|
| 423 |
+
... v, e = L.step(s)
|
| 424 |
+
... n += 1
|
| 425 |
+
... if e < eps:
|
| 426 |
+
... break
|
| 427 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 428 |
+
>>> print(mp.chop(v - exact))
|
| 429 |
+
0.0
|
| 430 |
+
>>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
|
| 431 |
+
>>> print(mp.chop(v - w))
|
| 432 |
+
0.0
|
| 433 |
+
|
| 434 |
+
Another highly divergent integral is also summable::
|
| 435 |
+
|
| 436 |
+
>>> z = mp.mpf(2)
|
| 437 |
+
>>> eps = mp.mpf(mp.eps)
|
| 438 |
+
>>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
|
| 439 |
+
>>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
|
| 440 |
+
>>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
|
| 441 |
+
... L = mp.levin(method = "levin", variant = "t")
|
| 442 |
+
... n, s = 0, 0
|
| 443 |
+
... while 1:
|
| 444 |
+
... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
|
| 445 |
+
... n += 1
|
| 446 |
+
... v, e = L.step_psum(s)
|
| 447 |
+
... if e < eps:
|
| 448 |
+
... break
|
| 449 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 450 |
+
>>> print(mp.chop(v - exact))
|
| 451 |
+
0.0
|
| 452 |
+
>>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
|
| 453 |
+
... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
|
| 454 |
+
>>> print(mp.chop(v - w))
|
| 455 |
+
0.0
|
| 456 |
+
|
| 457 |
+
These examples run with 15-20 decimal digits precision. For higher precision the
|
| 458 |
+
working precision must be raised.
|
| 459 |
+
|
| 460 |
+
**Examples for nsum**
|
| 461 |
+
|
| 462 |
+
Here we calculate Euler's constant as the constant term in the Laurent
|
| 463 |
+
expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
|
| 464 |
+
the logarithmic convergence behaviour of the Dirichlet series for zeta::
|
| 465 |
+
|
| 466 |
+
>>> mp.dps = 30
|
| 467 |
+
>>> z = mp.mpf(10) ** (-10)
|
| 468 |
+
>>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
|
| 469 |
+
>>> print(mp.chop(a - mp.euler, tol = 1e-10))
|
| 470 |
+
0.0
|
| 471 |
+
|
| 472 |
+
The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
|
| 473 |
+
|
| 474 |
+
>>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
|
| 475 |
+
>>> print(mp.chop(a - mp.log(2)))
|
| 476 |
+
0.0
|
| 477 |
+
|
| 478 |
+
Hypergeometric series can also be summed outside their range of convergence.
|
| 479 |
+
The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
|
| 480 |
+
point where the Levin transform converges resulting in numerical overflow/garbage::
|
| 481 |
+
|
| 482 |
+
>>> z = 2 + 1j
|
| 483 |
+
>>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
|
| 484 |
+
>>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
|
| 485 |
+
>>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
|
| 486 |
+
>>> print(mp.chop(exact-v))
|
| 487 |
+
0.0
|
| 488 |
+
|
| 489 |
+
References:
|
| 490 |
+
|
| 491 |
+
[1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
|
| 492 |
+
Convergence and the Summation of Divergent Series" arXiv:math/0306302
|
| 493 |
+
|
| 494 |
+
[2] A. Sidi - "Pratical Extrapolation Methods"
|
| 495 |
+
|
| 496 |
+
[3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
|
| 497 |
+
|
| 498 |
+
"""
|
| 499 |
+
|
| 500 |
+
def __init__(self, method = "levin", variant = "u"):
|
| 501 |
+
self.variant = variant
|
| 502 |
+
self.n = 0
|
| 503 |
+
self.a0 = 0
|
| 504 |
+
self.theta = 1
|
| 505 |
+
self.A = []
|
| 506 |
+
self.B = []
|
| 507 |
+
self.last = 0
|
| 508 |
+
self.last_s = False
|
| 509 |
+
|
| 510 |
+
if method == "levin":
|
| 511 |
+
self.factor = self.factor_levin
|
| 512 |
+
elif method == "sidi":
|
| 513 |
+
self.factor = self.factor_sidi
|
| 514 |
+
else:
|
| 515 |
+
raise ValueError("levin: unknown method \"%s\"" % method)
|
| 516 |
+
|
| 517 |
+
def factor_levin(self, i):
|
| 518 |
+
# original levin
|
| 519 |
+
# [1] p.50,e.7.5-7 (with n-j replaced by i)
|
| 520 |
+
return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
|
| 521 |
+
|
| 522 |
+
def factor_sidi(self, i):
|
| 523 |
+
# sidi analogon to levin (factorial series)
|
| 524 |
+
# [1] p.59,e.8.3-16 (with n-j replaced by i)
|
| 525 |
+
return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
|
| 526 |
+
|
| 527 |
+
def run(self, s, a0, a1 = 0):
|
| 528 |
+
if self.variant=="t":
|
| 529 |
+
# levin t
|
| 530 |
+
w=a0
|
| 531 |
+
elif self.variant=="u":
|
| 532 |
+
# levin u
|
| 533 |
+
w=a0*(self.theta+self.n)
|
| 534 |
+
elif self.variant=="v":
|
| 535 |
+
# levin v
|
| 536 |
+
w=a0*a1/(a0-a1)
|
| 537 |
+
else:
|
| 538 |
+
assert False, "unknown variant"
|
| 539 |
+
|
| 540 |
+
if w==0:
|
| 541 |
+
raise ValueError("levin: zero weight")
|
| 542 |
+
|
| 543 |
+
self.A.append(s/w)
|
| 544 |
+
self.B.append(1/w)
|
| 545 |
+
|
| 546 |
+
for i in range(self.n-1,-1,-1):
|
| 547 |
+
if i==self.n-1:
|
| 548 |
+
f=1
|
| 549 |
+
else:
|
| 550 |
+
f=self.factor(i)
|
| 551 |
+
|
| 552 |
+
self.A[i]=self.A[i+1]-f*self.A[i]
|
| 553 |
+
self.B[i]=self.B[i+1]-f*self.B[i]
|
| 554 |
+
|
| 555 |
+
self.n+=1
|
| 556 |
+
|
| 557 |
+
###########################################################################
|
| 558 |
+
|
| 559 |
+
def update_psum(self,S):
|
| 560 |
+
"""
|
| 561 |
+
This routine applies the convergence acceleration to the list of partial sums.
|
| 562 |
+
|
| 563 |
+
A = sum(a_k, k = 0..infinity)
|
| 564 |
+
s_n = sum(a_k, k = 0..n)
|
| 565 |
+
|
| 566 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 567 |
+
|
| 568 |
+
output:
|
| 569 |
+
v current estimate of the series A
|
| 570 |
+
e an error estimate which is simply the difference between the current
|
| 571 |
+
estimate and the last estimate.
|
| 572 |
+
"""
|
| 573 |
+
|
| 574 |
+
if self.variant!="v":
|
| 575 |
+
if self.n==0:
|
| 576 |
+
self.run(S[0],S[0])
|
| 577 |
+
while self.n<len(S):
|
| 578 |
+
self.run(S[self.n],S[self.n]-S[self.n-1])
|
| 579 |
+
else:
|
| 580 |
+
if len(S)==1:
|
| 581 |
+
self.last=0
|
| 582 |
+
return S[0],abs(S[0])
|
| 583 |
+
|
| 584 |
+
if self.n==0:
|
| 585 |
+
self.a1=S[1]-S[0]
|
| 586 |
+
self.run(S[0],S[0],self.a1)
|
| 587 |
+
|
| 588 |
+
while self.n<len(S)-1:
|
| 589 |
+
na1=S[self.n+1]-S[self.n]
|
| 590 |
+
self.run(S[self.n],self.a1,na1)
|
| 591 |
+
self.a1=na1
|
| 592 |
+
|
| 593 |
+
value=self.A[0]/self.B[0]
|
| 594 |
+
err=abs(value-self.last)
|
| 595 |
+
self.last=value
|
| 596 |
+
|
| 597 |
+
return value,err
|
| 598 |
+
|
| 599 |
+
def update(self,X):
|
| 600 |
+
"""
|
| 601 |
+
This routine applies the convergence acceleration to the list of individual terms.
|
| 602 |
+
|
| 603 |
+
A = sum(a_k, k = 0..infinity)
|
| 604 |
+
|
| 605 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 606 |
+
|
| 607 |
+
output:
|
| 608 |
+
v current estimate of the series A
|
| 609 |
+
e an error estimate which is simply the difference between the current
|
| 610 |
+
estimate and the last estimate.
|
| 611 |
+
"""
|
| 612 |
+
|
| 613 |
+
if self.variant!="v":
|
| 614 |
+
if self.n==0:
|
| 615 |
+
self.s=X[0]
|
| 616 |
+
self.run(self.s,X[0])
|
| 617 |
+
while self.n<len(X):
|
| 618 |
+
self.s+=X[self.n]
|
| 619 |
+
self.run(self.s,X[self.n])
|
| 620 |
+
else:
|
| 621 |
+
if len(X)==1:
|
| 622 |
+
self.last=0
|
| 623 |
+
return X[0],abs(X[0])
|
| 624 |
+
|
| 625 |
+
if self.n==0:
|
| 626 |
+
self.s=X[0]
|
| 627 |
+
self.run(self.s,X[0],X[1])
|
| 628 |
+
|
| 629 |
+
while self.n<len(X)-1:
|
| 630 |
+
self.s+=X[self.n]
|
| 631 |
+
self.run(self.s,X[self.n],X[self.n+1])
|
| 632 |
+
|
| 633 |
+
value=self.A[0]/self.B[0]
|
| 634 |
+
err=abs(value-self.last)
|
| 635 |
+
self.last=value
|
| 636 |
+
|
| 637 |
+
return value,err
|
| 638 |
+
|
| 639 |
+
###########################################################################
|
| 640 |
+
|
| 641 |
+
def step_psum(self,s):
|
| 642 |
+
"""
|
| 643 |
+
This routine applies the convergence acceleration to the partial sums.
|
| 644 |
+
|
| 645 |
+
A = sum(a_k, k = 0..infinity)
|
| 646 |
+
s_n = sum(a_k, k = 0..n)
|
| 647 |
+
|
| 648 |
+
v, e = ...step_psum(s_k)
|
| 649 |
+
|
| 650 |
+
output:
|
| 651 |
+
v current estimate of the series A
|
| 652 |
+
e an error estimate which is simply the difference between the current
|
| 653 |
+
estimate and the last estimate.
|
| 654 |
+
"""
|
| 655 |
+
|
| 656 |
+
if self.variant!="v":
|
| 657 |
+
if self.n==0:
|
| 658 |
+
self.last_s=s
|
| 659 |
+
self.run(s,s)
|
| 660 |
+
else:
|
| 661 |
+
self.run(s,s-self.last_s)
|
| 662 |
+
self.last_s=s
|
| 663 |
+
else:
|
| 664 |
+
if isinstance(self.last_s,bool):
|
| 665 |
+
self.last_s=s
|
| 666 |
+
self.last_w=s
|
| 667 |
+
self.last=0
|
| 668 |
+
return s,abs(s)
|
| 669 |
+
|
| 670 |
+
na1=s-self.last_s
|
| 671 |
+
self.run(self.last_s,self.last_w,na1)
|
| 672 |
+
self.last_w=na1
|
| 673 |
+
self.last_s=s
|
| 674 |
+
|
| 675 |
+
value=self.A[0]/self.B[0]
|
| 676 |
+
err=abs(value-self.last)
|
| 677 |
+
self.last=value
|
| 678 |
+
|
| 679 |
+
return value,err
|
| 680 |
+
|
| 681 |
+
def step(self,x):
|
| 682 |
+
"""
|
| 683 |
+
This routine applies the convergence acceleration to the individual terms.
|
| 684 |
+
|
| 685 |
+
A = sum(a_k, k = 0..infinity)
|
| 686 |
+
|
| 687 |
+
v, e = ...step(a_k)
|
| 688 |
+
|
| 689 |
+
output:
|
| 690 |
+
v current estimate of the series A
|
| 691 |
+
e an error estimate which is simply the difference between the current
|
| 692 |
+
estimate and the last estimate.
|
| 693 |
+
"""
|
| 694 |
+
|
| 695 |
+
if self.variant!="v":
|
| 696 |
+
if self.n==0:
|
| 697 |
+
self.s=x
|
| 698 |
+
self.run(self.s,x)
|
| 699 |
+
else:
|
| 700 |
+
self.s+=x
|
| 701 |
+
self.run(self.s,x)
|
| 702 |
+
else:
|
| 703 |
+
if isinstance(self.last_s,bool):
|
| 704 |
+
self.last_s=x
|
| 705 |
+
self.s=0
|
| 706 |
+
self.last=0
|
| 707 |
+
return x,abs(x)
|
| 708 |
+
|
| 709 |
+
self.s+=self.last_s
|
| 710 |
+
self.run(self.s,self.last_s,x)
|
| 711 |
+
self.last_s=x
|
| 712 |
+
|
| 713 |
+
value=self.A[0]/self.B[0]
|
| 714 |
+
err=abs(value-self.last)
|
| 715 |
+
self.last=value
|
| 716 |
+
|
| 717 |
+
return value,err
|
| 718 |
+
|
| 719 |
+
def levin(ctx, method = "levin", variant = "u"):
|
| 720 |
+
L = levin_class(method = method, variant = variant)
|
| 721 |
+
L.ctx = ctx
|
| 722 |
+
return L
|
| 723 |
+
|
| 724 |
+
levin.__doc__ = levin_class.__doc__
|
| 725 |
+
defun(levin)
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
class cohen_alt_class:
|
| 729 |
+
# cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
|
| 730 |
+
r"""
|
| 731 |
+
This interface implements the convergence acceleration of alternating series
|
| 732 |
+
as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
|
| 733 |
+
of Alternating Series". This series transformation works only well if the
|
| 734 |
+
individual terms of the series have an alternating sign. It belongs to the
|
| 735 |
+
class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
|
| 736 |
+
or Levin transform). This series transformation is also able to sum some types
|
| 737 |
+
of divergent series. See the paper under which conditions this resummation is
|
| 738 |
+
mathematical sound.
|
| 739 |
+
|
| 740 |
+
Let *A* be the series we want to sum:
|
| 741 |
+
|
| 742 |
+
.. math ::
|
| 743 |
+
|
| 744 |
+
A = \sum_{k=0}^{\infty} a_k
|
| 745 |
+
|
| 746 |
+
Let `s_n` be the partial sums of this series:
|
| 747 |
+
|
| 748 |
+
.. math ::
|
| 749 |
+
|
| 750 |
+
s_n = \sum_{k=0}^n a_k.
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
**Interface**
|
| 754 |
+
|
| 755 |
+
Calling ``cohen_alt`` returns an object with the following methods.
|
| 756 |
+
|
| 757 |
+
Then ``update(...)`` works with the list of individual terms `a_k` and
|
| 758 |
+
``update_psum(...)`` works with the list of partial sums `s_k`:
|
| 759 |
+
|
| 760 |
+
.. code ::
|
| 761 |
+
|
| 762 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 763 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 764 |
+
|
| 765 |
+
*v* is the current estimate for *A*, and *e* is an error estimate which is
|
| 766 |
+
simply the difference between the current estimate and the last estimate.
|
| 767 |
+
|
| 768 |
+
**Examples**
|
| 769 |
+
|
| 770 |
+
Here we compute the alternating zeta function using ``update_psum``::
|
| 771 |
+
|
| 772 |
+
>>> from mpmath import mp
|
| 773 |
+
>>> AC = mp.cohen_alt()
|
| 774 |
+
>>> S, s, n = [], 0, 1
|
| 775 |
+
>>> while 1:
|
| 776 |
+
... s += -((-1) ** n) * mp.one / (n * n)
|
| 777 |
+
... n += 1
|
| 778 |
+
... S.append(s)
|
| 779 |
+
... v, e = AC.update_psum(S)
|
| 780 |
+
... if e < mp.eps:
|
| 781 |
+
... break
|
| 782 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 783 |
+
>>> print(mp.chop(v - mp.pi ** 2 / 12))
|
| 784 |
+
0.0
|
| 785 |
+
|
| 786 |
+
Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
|
| 787 |
+
|
| 788 |
+
>>> A = []
|
| 789 |
+
>>> AC = mp.cohen_alt()
|
| 790 |
+
>>> n = 1
|
| 791 |
+
>>> while 1:
|
| 792 |
+
... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
|
| 793 |
+
... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
|
| 794 |
+
... n += 1
|
| 795 |
+
... v, e = AC.update(A)
|
| 796 |
+
... if e < mp.eps:
|
| 797 |
+
... break
|
| 798 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 799 |
+
>>> v = mp.exp(v)
|
| 800 |
+
>>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
|
| 801 |
+
0.0
|
| 802 |
+
|
| 803 |
+
``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
|
| 804 |
+
|
| 805 |
+
>>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
|
| 806 |
+
>>> print(mp.chop(v - mp.log(2)))
|
| 807 |
+
0.0
|
| 808 |
+
>>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
|
| 809 |
+
>>> print(mp.chop(v - mp.pi / 4))
|
| 810 |
+
0.0
|
| 811 |
+
>>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
|
| 812 |
+
>>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
|
| 813 |
+
0.0
|
| 814 |
+
|
| 815 |
+
"""
|
| 816 |
+
|
| 817 |
+
def __init__(self):
|
| 818 |
+
self.last=0
|
| 819 |
+
|
| 820 |
+
def update(self, A):
|
| 821 |
+
"""
|
| 822 |
+
This routine applies the convergence acceleration to the list of individual terms.
|
| 823 |
+
|
| 824 |
+
A = sum(a_k, k = 0..infinity)
|
| 825 |
+
|
| 826 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 827 |
+
|
| 828 |
+
output:
|
| 829 |
+
v current estimate of the series A
|
| 830 |
+
e an error estimate which is simply the difference between the current
|
| 831 |
+
estimate and the last estimate.
|
| 832 |
+
"""
|
| 833 |
+
|
| 834 |
+
n = len(A)
|
| 835 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
| 836 |
+
d = (d + 1 / d) / 2
|
| 837 |
+
b = -self.ctx.one
|
| 838 |
+
c = -d
|
| 839 |
+
s = 0
|
| 840 |
+
|
| 841 |
+
for k in xrange(n):
|
| 842 |
+
c = b - c
|
| 843 |
+
if k % 2 == 0:
|
| 844 |
+
s = s + c * A[k]
|
| 845 |
+
else:
|
| 846 |
+
s = s - c * A[k]
|
| 847 |
+
b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
|
| 848 |
+
|
| 849 |
+
value = s / d
|
| 850 |
+
|
| 851 |
+
err = abs(value - self.last)
|
| 852 |
+
self.last = value
|
| 853 |
+
|
| 854 |
+
return value, err
|
| 855 |
+
|
| 856 |
+
def update_psum(self, S):
|
| 857 |
+
"""
|
| 858 |
+
This routine applies the convergence acceleration to the list of partial sums.
|
| 859 |
+
|
| 860 |
+
A = sum(a_k, k = 0..infinity)
|
| 861 |
+
s_n = sum(a_k ,k = 0..n)
|
| 862 |
+
|
| 863 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 864 |
+
|
| 865 |
+
output:
|
| 866 |
+
v current estimate of the series A
|
| 867 |
+
e an error estimate which is simply the difference between the current
|
| 868 |
+
estimate and the last estimate.
|
| 869 |
+
"""
|
| 870 |
+
|
| 871 |
+
n = len(S)
|
| 872 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
| 873 |
+
d = (d + 1 / d) / 2
|
| 874 |
+
b = self.ctx.one
|
| 875 |
+
s = 0
|
| 876 |
+
|
| 877 |
+
for k in xrange(n):
|
| 878 |
+
b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
|
| 879 |
+
s += b * S[k]
|
| 880 |
+
|
| 881 |
+
value = s / d
|
| 882 |
+
|
| 883 |
+
err = abs(value - self.last)
|
| 884 |
+
self.last = value
|
| 885 |
+
|
| 886 |
+
return value, err
|
| 887 |
+
|
| 888 |
+
def cohen_alt(ctx):
|
| 889 |
+
L = cohen_alt_class()
|
| 890 |
+
L.ctx = ctx
|
| 891 |
+
return L
|
| 892 |
+
|
| 893 |
+
cohen_alt.__doc__ = cohen_alt_class.__doc__
|
| 894 |
+
defun(cohen_alt)
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
@defun
|
| 898 |
+
def sumap(ctx, f, interval, integral=None, error=False):
|
| 899 |
+
r"""
|
| 900 |
+
Evaluates an infinite series of an analytic summand *f* using the
|
| 901 |
+
Abel-Plana formula
|
| 902 |
+
|
| 903 |
+
.. math ::
|
| 904 |
+
|
| 905 |
+
\sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
|
| 906 |
+
i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
|
| 907 |
+
|
| 908 |
+
Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
|
| 909 |
+
the Abel-Plana formula does not require derivatives. However,
|
| 910 |
+
it only works when `|f(it)-f(-it)|` does not
|
| 911 |
+
increase too rapidly with `t`.
|
| 912 |
+
|
| 913 |
+
**Examples**
|
| 914 |
+
|
| 915 |
+
The Abel-Plana formula is particularly useful when the summand
|
| 916 |
+
decreases like a power of `k`; for example when the sum is a pure
|
| 917 |
+
zeta function::
|
| 918 |
+
|
| 919 |
+
>>> from mpmath import *
|
| 920 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 921 |
+
>>> sumap(lambda k: 1/k**2.5, [1,inf])
|
| 922 |
+
1.34148725725091717975677
|
| 923 |
+
>>> zeta(2.5)
|
| 924 |
+
1.34148725725091717975677
|
| 925 |
+
>>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
|
| 926 |
+
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
|
| 927 |
+
>>> zeta(2.5+2.5j, 1+1j)
|
| 928 |
+
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
|
| 929 |
+
|
| 930 |
+
If the series is alternating, numerical quadrature along the real
|
| 931 |
+
line is likely to give poor results, so it is better to evaluate
|
| 932 |
+
the first term symbolically whenever possible:
|
| 933 |
+
|
| 934 |
+
>>> n=3; z=-0.75
|
| 935 |
+
>>> I = expint(n,-log(z))
|
| 936 |
+
>>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
|
| 937 |
+
-0.6917036036904594510141448
|
| 938 |
+
>>> polylog(n,z)
|
| 939 |
+
-0.6917036036904594510141448
|
| 940 |
+
|
| 941 |
+
"""
|
| 942 |
+
prec = ctx.prec
|
| 943 |
+
try:
|
| 944 |
+
ctx.prec += 10
|
| 945 |
+
a, b = interval
|
| 946 |
+
if b != ctx.inf:
|
| 947 |
+
raise ValueError("b should be equal to ctx.inf")
|
| 948 |
+
g = lambda x: f(x+a)
|
| 949 |
+
if integral is None:
|
| 950 |
+
i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
|
| 951 |
+
else:
|
| 952 |
+
i1, err1 = integral, 0
|
| 953 |
+
j = ctx.j
|
| 954 |
+
p = ctx.pi * 2
|
| 955 |
+
if ctx._is_real_type(i1):
|
| 956 |
+
h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
|
| 957 |
+
else:
|
| 958 |
+
h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
|
| 959 |
+
i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
|
| 960 |
+
err = err1+err2
|
| 961 |
+
v = i1+i2+0.5*g(ctx.mpf(0))
|
| 962 |
+
finally:
|
| 963 |
+
ctx.prec = prec
|
| 964 |
+
if error:
|
| 965 |
+
return +v, err
|
| 966 |
+
return +v
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
@defun
|
| 970 |
+
def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
|
| 971 |
+
adiffs=None, bdiffs=None, verbose=False, error=False,
|
| 972 |
+
_fast_abort=False):
|
| 973 |
+
r"""
|
| 974 |
+
Uses the Euler-Maclaurin formula to compute an approximation accurate
|
| 975 |
+
to within ``tol`` (which defaults to the present epsilon) of the sum
|
| 976 |
+
|
| 977 |
+
.. math ::
|
| 978 |
+
|
| 979 |
+
S = \sum_{k=a}^b f(k)
|
| 980 |
+
|
| 981 |
+
where `(a,b)` are given by ``interval`` and `a` or `b` may be
|
| 982 |
+
infinite. The approximation is
|
| 983 |
+
|
| 984 |
+
.. math ::
|
| 985 |
+
|
| 986 |
+
S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
|
| 987 |
+
\sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
|
| 988 |
+
\left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
|
| 989 |
+
|
| 990 |
+
The last sum in the Euler-Maclaurin formula is not generally
|
| 991 |
+
convergent (a notable exception is if `f` is a polynomial, in
|
| 992 |
+
which case Euler-Maclaurin actually gives an exact result).
|
| 993 |
+
|
| 994 |
+
The summation is stopped as soon as the quotient between two
|
| 995 |
+
consecutive terms falls below *reject*. That is, by default
|
| 996 |
+
(*reject* = 10), the summation is continued as long as each
|
| 997 |
+
term adds at least one decimal.
|
| 998 |
+
|
| 999 |
+
Although not convergent, convergence to a given tolerance can
|
| 1000 |
+
often be "forced" if `b = \infty` by summing up to `a+N` and then
|
| 1001 |
+
applying the Euler-Maclaurin formula to the sum over the range
|
| 1002 |
+
`(a+N+1, \ldots, \infty)`. This procedure is implemented by
|
| 1003 |
+
:func:`~mpmath.nsum`.
|
| 1004 |
+
|
| 1005 |
+
By default numerical quadrature and differentiation is used.
|
| 1006 |
+
If the symbolic values of the integral and endpoint derivatives
|
| 1007 |
+
are known, it is more efficient to pass the value of the
|
| 1008 |
+
integral explicitly as ``integral`` and the derivatives
|
| 1009 |
+
explicitly as ``adiffs`` and ``bdiffs``. The derivatives
|
| 1010 |
+
should be given as iterables that yield
|
| 1011 |
+
`f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
|
| 1012 |
+
|
| 1013 |
+
**Examples**
|
| 1014 |
+
|
| 1015 |
+
Summation of an infinite series, with automatic and symbolic
|
| 1016 |
+
integral and derivative values (the second should be much faster)::
|
| 1017 |
+
|
| 1018 |
+
>>> from mpmath import *
|
| 1019 |
+
>>> mp.dps = 50; mp.pretty = True
|
| 1020 |
+
>>> sumem(lambda n: 1/n**2, [32, inf])
|
| 1021 |
+
0.03174336652030209012658168043874142714132886413417
|
| 1022 |
+
>>> I = mpf(1)/32
|
| 1023 |
+
>>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
|
| 1024 |
+
>>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
|
| 1025 |
+
0.03174336652030209012658168043874142714132886413417
|
| 1026 |
+
|
| 1027 |
+
An exact evaluation of a finite polynomial sum::
|
| 1028 |
+
|
| 1029 |
+
>>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
|
| 1030 |
+
10500155000624963999742499550000.0
|
| 1031 |
+
>>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
|
| 1032 |
+
10500155000624963999742499550000
|
| 1033 |
+
|
| 1034 |
+
"""
|
| 1035 |
+
tol = tol or +ctx.eps
|
| 1036 |
+
interval = ctx._as_points(interval)
|
| 1037 |
+
a = ctx.convert(interval[0])
|
| 1038 |
+
b = ctx.convert(interval[-1])
|
| 1039 |
+
err = ctx.zero
|
| 1040 |
+
prev = 0
|
| 1041 |
+
M = 10000
|
| 1042 |
+
if a == ctx.ninf: adiffs = (0 for n in xrange(M))
|
| 1043 |
+
else: adiffs = adiffs or ctx.diffs(f, a)
|
| 1044 |
+
if b == ctx.inf: bdiffs = (0 for n in xrange(M))
|
| 1045 |
+
else: bdiffs = bdiffs or ctx.diffs(f, b)
|
| 1046 |
+
orig = ctx.prec
|
| 1047 |
+
#verbose = 1
|
| 1048 |
+
try:
|
| 1049 |
+
ctx.prec += 10
|
| 1050 |
+
s = ctx.zero
|
| 1051 |
+
for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
|
| 1052 |
+
if k & 1:
|
| 1053 |
+
term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
|
| 1054 |
+
mag = abs(term)
|
| 1055 |
+
if verbose:
|
| 1056 |
+
print("term", k, "magnitude =", ctx.nstr(mag))
|
| 1057 |
+
if k > 4 and mag < tol:
|
| 1058 |
+
s += term
|
| 1059 |
+
break
|
| 1060 |
+
elif k > 4 and abs(prev) / mag < reject:
|
| 1061 |
+
err += mag
|
| 1062 |
+
if _fast_abort:
|
| 1063 |
+
return [s, (s, err)][error]
|
| 1064 |
+
if verbose:
|
| 1065 |
+
print("Failed to converge")
|
| 1066 |
+
break
|
| 1067 |
+
else:
|
| 1068 |
+
s += term
|
| 1069 |
+
prev = term
|
| 1070 |
+
# Endpoint correction
|
| 1071 |
+
if a != ctx.ninf: s += f(a)/2
|
| 1072 |
+
if b != ctx.inf: s += f(b)/2
|
| 1073 |
+
# Tail integral
|
| 1074 |
+
if verbose:
|
| 1075 |
+
print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
|
| 1076 |
+
if integral:
|
| 1077 |
+
s += integral
|
| 1078 |
+
else:
|
| 1079 |
+
integral, ierr = ctx.quad(f, interval, error=True)
|
| 1080 |
+
if verbose:
|
| 1081 |
+
print("Integration error:", ierr)
|
| 1082 |
+
s += integral
|
| 1083 |
+
err += ierr
|
| 1084 |
+
finally:
|
| 1085 |
+
ctx.prec = orig
|
| 1086 |
+
if error:
|
| 1087 |
+
return s, err
|
| 1088 |
+
else:
|
| 1089 |
+
return s
|
| 1090 |
+
|
| 1091 |
+
@defun
|
| 1092 |
+
def adaptive_extrapolation(ctx, update, emfun, kwargs):
|
| 1093 |
+
option = kwargs.get
|
| 1094 |
+
if ctx._fixed_precision:
|
| 1095 |
+
tol = option('tol', ctx.eps*2**10)
|
| 1096 |
+
else:
|
| 1097 |
+
tol = option('tol', ctx.eps/2**10)
|
| 1098 |
+
verbose = option('verbose', False)
|
| 1099 |
+
maxterms = option('maxterms', ctx.dps*10)
|
| 1100 |
+
method = set(option('method', 'r+s').split('+'))
|
| 1101 |
+
skip = option('skip', 0)
|
| 1102 |
+
steps = iter(option('steps', xrange(10, 10**9, 10)))
|
| 1103 |
+
strict = option('strict')
|
| 1104 |
+
#steps = (10 for i in xrange(1000))
|
| 1105 |
+
summer=[]
|
| 1106 |
+
if 'd' in method or 'direct' in method:
|
| 1107 |
+
TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
|
| 1108 |
+
else:
|
| 1109 |
+
TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
|
| 1110 |
+
TRY_SHANKS = ('s' in method) or ('shanks' in method)
|
| 1111 |
+
TRY_EULER_MACLAURIN = ('e' in method) or \
|
| 1112 |
+
('euler-maclaurin' in method)
|
| 1113 |
+
|
| 1114 |
+
def init_levin(m):
|
| 1115 |
+
variant = kwargs.get("levin_variant", "u")
|
| 1116 |
+
if isinstance(variant, str):
|
| 1117 |
+
if variant == "all":
|
| 1118 |
+
variant = ["u", "v", "t"]
|
| 1119 |
+
else:
|
| 1120 |
+
variant = [variant]
|
| 1121 |
+
for s in variant:
|
| 1122 |
+
L = levin_class(method = m, variant = s)
|
| 1123 |
+
L.ctx = ctx
|
| 1124 |
+
L.name = m + "(" + s + ")"
|
| 1125 |
+
summer.append(L)
|
| 1126 |
+
|
| 1127 |
+
if ('l' in method) or ('levin' in method):
|
| 1128 |
+
init_levin("levin")
|
| 1129 |
+
|
| 1130 |
+
if ('sidi' in method):
|
| 1131 |
+
init_levin("sidi")
|
| 1132 |
+
|
| 1133 |
+
if ('a' in method) or ('alternating' in method):
|
| 1134 |
+
L = cohen_alt_class()
|
| 1135 |
+
L.ctx = ctx
|
| 1136 |
+
L.name = "alternating"
|
| 1137 |
+
summer.append(L)
|
| 1138 |
+
|
| 1139 |
+
last_richardson_value = 0
|
| 1140 |
+
shanks_table = []
|
| 1141 |
+
index = 0
|
| 1142 |
+
step = 10
|
| 1143 |
+
partial = []
|
| 1144 |
+
best = ctx.zero
|
| 1145 |
+
orig = ctx.prec
|
| 1146 |
+
try:
|
| 1147 |
+
if 'workprec' in kwargs:
|
| 1148 |
+
ctx.prec = kwargs['workprec']
|
| 1149 |
+
elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
|
| 1150 |
+
ctx.prec = (ctx.prec+10) * 4
|
| 1151 |
+
else:
|
| 1152 |
+
ctx.prec += 30
|
| 1153 |
+
while 1:
|
| 1154 |
+
if index >= maxterms:
|
| 1155 |
+
break
|
| 1156 |
+
|
| 1157 |
+
# Get new batch of terms
|
| 1158 |
+
try:
|
| 1159 |
+
step = next(steps)
|
| 1160 |
+
except StopIteration:
|
| 1161 |
+
pass
|
| 1162 |
+
if verbose:
|
| 1163 |
+
print("-"*70)
|
| 1164 |
+
print("Adding terms #%i-#%i" % (index, index+step))
|
| 1165 |
+
update(partial, xrange(index, index+step))
|
| 1166 |
+
index += step
|
| 1167 |
+
|
| 1168 |
+
# Check direct error
|
| 1169 |
+
best = partial[-1]
|
| 1170 |
+
error = abs(best - partial[-2])
|
| 1171 |
+
if verbose:
|
| 1172 |
+
print("Direct error: %s" % ctx.nstr(error))
|
| 1173 |
+
if error <= tol:
|
| 1174 |
+
return best
|
| 1175 |
+
|
| 1176 |
+
# Check each extrapolation method
|
| 1177 |
+
if TRY_RICHARDSON:
|
| 1178 |
+
value, maxc = ctx.richardson(partial)
|
| 1179 |
+
# Convergence
|
| 1180 |
+
richardson_error = abs(value - last_richardson_value)
|
| 1181 |
+
if verbose:
|
| 1182 |
+
print("Richardson error: %s" % ctx.nstr(richardson_error))
|
| 1183 |
+
# Convergence
|
| 1184 |
+
if richardson_error <= tol:
|
| 1185 |
+
return value
|
| 1186 |
+
last_richardson_value = value
|
| 1187 |
+
# Unreliable due to cancellation
|
| 1188 |
+
if ctx.eps*maxc > tol:
|
| 1189 |
+
if verbose:
|
| 1190 |
+
print("Ran out of precision for Richardson")
|
| 1191 |
+
TRY_RICHARDSON = False
|
| 1192 |
+
if richardson_error < error:
|
| 1193 |
+
error = richardson_error
|
| 1194 |
+
best = value
|
| 1195 |
+
if TRY_SHANKS:
|
| 1196 |
+
shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
|
| 1197 |
+
row = shanks_table[-1]
|
| 1198 |
+
if len(row) == 2:
|
| 1199 |
+
est1 = row[-1]
|
| 1200 |
+
shanks_error = 0
|
| 1201 |
+
else:
|
| 1202 |
+
est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
|
| 1203 |
+
shanks_error = abs(est1-est2)
|
| 1204 |
+
if verbose:
|
| 1205 |
+
print("Shanks error: %s" % ctx.nstr(shanks_error))
|
| 1206 |
+
if shanks_error <= tol:
|
| 1207 |
+
return est1
|
| 1208 |
+
if ctx.eps*maxc > tol:
|
| 1209 |
+
if verbose:
|
| 1210 |
+
print("Ran out of precision for Shanks")
|
| 1211 |
+
TRY_SHANKS = False
|
| 1212 |
+
if shanks_error < error:
|
| 1213 |
+
error = shanks_error
|
| 1214 |
+
best = est1
|
| 1215 |
+
for L in summer:
|
| 1216 |
+
est, lerror = L.update_psum(partial)
|
| 1217 |
+
if verbose:
|
| 1218 |
+
print("%s error: %s" % (L.name, ctx.nstr(lerror)))
|
| 1219 |
+
if lerror <= tol:
|
| 1220 |
+
return est
|
| 1221 |
+
if lerror < error:
|
| 1222 |
+
error = lerror
|
| 1223 |
+
best = est
|
| 1224 |
+
if TRY_EULER_MACLAURIN:
|
| 1225 |
+
if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1):
|
| 1226 |
+
if verbose:
|
| 1227 |
+
print ("NOT using Euler-Maclaurin: the series appears"
|
| 1228 |
+
" to be alternating, so numerical\n quadrature"
|
| 1229 |
+
" will most likely fail")
|
| 1230 |
+
TRY_EULER_MACLAURIN = False
|
| 1231 |
+
else:
|
| 1232 |
+
value, em_error = emfun(index, tol)
|
| 1233 |
+
value += partial[-1]
|
| 1234 |
+
if verbose:
|
| 1235 |
+
print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
|
| 1236 |
+
if em_error <= tol:
|
| 1237 |
+
return value
|
| 1238 |
+
if em_error < error:
|
| 1239 |
+
best = value
|
| 1240 |
+
finally:
|
| 1241 |
+
ctx.prec = orig
|
| 1242 |
+
if strict:
|
| 1243 |
+
raise ctx.NoConvergence
|
| 1244 |
+
if verbose:
|
| 1245 |
+
print("Warning: failed to converge to target accuracy")
|
| 1246 |
+
return best
|
| 1247 |
+
|
| 1248 |
+
@defun
|
| 1249 |
+
def nsum(ctx, f, *intervals, **options):
|
| 1250 |
+
r"""
|
| 1251 |
+
Computes the sum
|
| 1252 |
+
|
| 1253 |
+
.. math :: S = \sum_{k=a}^b f(k)
|
| 1254 |
+
|
| 1255 |
+
where `(a, b)` = *interval*, and where `a = -\infty` and/or
|
| 1256 |
+
`b = \infty` are allowed, or more generally
|
| 1257 |
+
|
| 1258 |
+
.. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
|
| 1259 |
+
\sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
|
| 1260 |
+
|
| 1261 |
+
if multiple intervals are given.
|
| 1262 |
+
|
| 1263 |
+
Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
|
| 1264 |
+
where the first converges rapidly and the second converges slowly,
|
| 1265 |
+
are::
|
| 1266 |
+
|
| 1267 |
+
>>> from mpmath import *
|
| 1268 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 1269 |
+
>>> nsum(lambda n: 1/fac(n), [0, inf])
|
| 1270 |
+
2.71828182845905
|
| 1271 |
+
>>> nsum(lambda n: 1/n**2, [1, inf])
|
| 1272 |
+
1.64493406684823
|
| 1273 |
+
|
| 1274 |
+
When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
|
| 1275 |
+
accurately estimate the sums of slowly convergent series. If the series is
|
| 1276 |
+
finite, :func:`~mpmath.nsum` currently does not attempt to perform any
|
| 1277 |
+
extrapolation, and simply calls :func:`~mpmath.fsum`.
|
| 1278 |
+
|
| 1279 |
+
Multidimensional infinite series are reduced to a single-dimensional
|
| 1280 |
+
series over expanding hypercubes; if both infinite and finite dimensions
|
| 1281 |
+
are present, the finite ranges are moved innermost. For more advanced
|
| 1282 |
+
control over the summation order, use nested calls to :func:`~mpmath.nsum`,
|
| 1283 |
+
or manually rewrite the sum as a single-dimensional series.
|
| 1284 |
+
|
| 1285 |
+
**Options**
|
| 1286 |
+
|
| 1287 |
+
*tol*
|
| 1288 |
+
Desired maximum final error. Defaults roughly to the
|
| 1289 |
+
epsilon of the working precision.
|
| 1290 |
+
|
| 1291 |
+
*method*
|
| 1292 |
+
Which summation algorithm to use (described below).
|
| 1293 |
+
Default: ``'richardson+shanks'``.
|
| 1294 |
+
|
| 1295 |
+
*maxterms*
|
| 1296 |
+
Cancel after at most this many terms. Default: 10*dps.
|
| 1297 |
+
|
| 1298 |
+
*steps*
|
| 1299 |
+
An iterable giving the number of terms to add between
|
| 1300 |
+
each extrapolation attempt. The default sequence is
|
| 1301 |
+
[10, 20, 30, 40, ...]. For example, if you know that
|
| 1302 |
+
approximately 100 terms will be required, efficiency might be
|
| 1303 |
+
improved by setting this to [100, 10]. Then the first
|
| 1304 |
+
extrapolation will be performed after 100 terms, the second
|
| 1305 |
+
after 110, etc.
|
| 1306 |
+
|
| 1307 |
+
*verbose*
|
| 1308 |
+
Print details about progress.
|
| 1309 |
+
|
| 1310 |
+
*ignore*
|
| 1311 |
+
If enabled, any term that raises ``ArithmeticError``
|
| 1312 |
+
or ``ValueError`` (e.g. through division by zero) is replaced
|
| 1313 |
+
by a zero. This is convenient for lattice sums with
|
| 1314 |
+
a singular term near the origin.
|
| 1315 |
+
|
| 1316 |
+
**Methods**
|
| 1317 |
+
|
| 1318 |
+
Unfortunately, an algorithm that can efficiently sum any infinite
|
| 1319 |
+
series does not exist. :func:`~mpmath.nsum` implements several different
|
| 1320 |
+
algorithms that each work well in different cases. The *method*
|
| 1321 |
+
keyword argument selects a method.
|
| 1322 |
+
|
| 1323 |
+
The default method is ``'r+s'``, i.e. both Richardson extrapolation
|
| 1324 |
+
and Shanks transformation is attempted. A slower method that
|
| 1325 |
+
handles more cases is ``'r+s+e'``. For very high precision
|
| 1326 |
+
summation, or if the summation needs to be fast (for example if
|
| 1327 |
+
multiple sums need to be evaluated), it is a good idea to
|
| 1328 |
+
investigate which one method works best and only use that.
|
| 1329 |
+
|
| 1330 |
+
``'richardson'`` / ``'r'``:
|
| 1331 |
+
Uses Richardson extrapolation. Provides useful extrapolation
|
| 1332 |
+
when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
|
| 1333 |
+
for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
|
| 1334 |
+
additional information.
|
| 1335 |
+
|
| 1336 |
+
``'shanks'`` / ``'s'``:
|
| 1337 |
+
Uses Shanks transformation. Typically provides useful
|
| 1338 |
+
extrapolation when `f(k) \sim c^k` or when successive terms
|
| 1339 |
+
alternate signs. Is able to sum some divergent series.
|
| 1340 |
+
See :func:`~mpmath.shanks` for additional information.
|
| 1341 |
+
|
| 1342 |
+
``'levin'`` / ``'l'``:
|
| 1343 |
+
Uses the Levin transformation. It performs better than the Shanks
|
| 1344 |
+
transformation for logarithmic convergent or alternating divergent
|
| 1345 |
+
series. The ``'levin_variant'``-keyword selects the variant. Valid
|
| 1346 |
+
choices are "u", "t", "v" and "all" whereby "all" uses all three
|
| 1347 |
+
u,t and v simultanously (This is good for performance comparison in
|
| 1348 |
+
conjunction with "verbose=True"). Instead of the Levin transform one can
|
| 1349 |
+
also use the Sidi-S transform by selecting the method ``'sidi'``.
|
| 1350 |
+
See :func:`~mpmath.levin` for additional details.
|
| 1351 |
+
|
| 1352 |
+
``'alternating'`` / ``'a'``:
|
| 1353 |
+
This is the convergence acceleration of alternating series developped
|
| 1354 |
+
by Cohen, Villegras and Zagier.
|
| 1355 |
+
See :func:`~mpmath.cohen_alt` for additional details.
|
| 1356 |
+
|
| 1357 |
+
``'euler-maclaurin'`` / ``'e'``:
|
| 1358 |
+
Uses the Euler-Maclaurin summation formula to approximate
|
| 1359 |
+
the remainder sum by an integral. This requires high-order
|
| 1360 |
+
numerical derivatives and numerical integration. The advantage
|
| 1361 |
+
of this algorithm is that it works regardless of the
|
| 1362 |
+
decay rate of `f`, as long as `f` is sufficiently smooth.
|
| 1363 |
+
See :func:`~mpmath.sumem` for additional information.
|
| 1364 |
+
|
| 1365 |
+
``'direct'`` / ``'d'``:
|
| 1366 |
+
Does not perform any extrapolation. This can be used
|
| 1367 |
+
(and should only be used for) rapidly convergent series.
|
| 1368 |
+
The summation automatically stops when the terms
|
| 1369 |
+
decrease below the target tolerance.
|
| 1370 |
+
|
| 1371 |
+
**Basic examples**
|
| 1372 |
+
|
| 1373 |
+
A finite sum::
|
| 1374 |
+
|
| 1375 |
+
>>> nsum(lambda k: 1/k, [1, 6])
|
| 1376 |
+
2.45
|
| 1377 |
+
|
| 1378 |
+
Summation of a series going to negative infinity and a doubly
|
| 1379 |
+
infinite series::
|
| 1380 |
+
|
| 1381 |
+
>>> nsum(lambda k: 1/k**2, [-inf, -1])
|
| 1382 |
+
1.64493406684823
|
| 1383 |
+
>>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
|
| 1384 |
+
3.15334809493716
|
| 1385 |
+
|
| 1386 |
+
:func:`~mpmath.nsum` handles sums of complex numbers::
|
| 1387 |
+
|
| 1388 |
+
>>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
|
| 1389 |
+
(1.6 + 0.8j)
|
| 1390 |
+
|
| 1391 |
+
The following sum converges very rapidly, so it is most
|
| 1392 |
+
efficient to sum it by disabling convergence acceleration::
|
| 1393 |
+
|
| 1394 |
+
>>> mp.dps = 1000
|
| 1395 |
+
>>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
|
| 1396 |
+
... method='direct')
|
| 1397 |
+
>>> b = (cos(1)+sin(1))/4
|
| 1398 |
+
>>> abs(a-b) < mpf('1e-998')
|
| 1399 |
+
True
|
| 1400 |
+
|
| 1401 |
+
**Examples with Richardson extrapolation**
|
| 1402 |
+
|
| 1403 |
+
Richardson extrapolation works well for sums over rational
|
| 1404 |
+
functions, as well as their alternating counterparts::
|
| 1405 |
+
|
| 1406 |
+
>>> mp.dps = 50
|
| 1407 |
+
>>> nsum(lambda k: 1 / k**3, [1, inf],
|
| 1408 |
+
... method='richardson')
|
| 1409 |
+
1.2020569031595942853997381615114499907649862923405
|
| 1410 |
+
>>> zeta(3)
|
| 1411 |
+
1.2020569031595942853997381615114499907649862923405
|
| 1412 |
+
|
| 1413 |
+
>>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
|
| 1414 |
+
... method='richardson')
|
| 1415 |
+
2.9348022005446793094172454999380755676568497036204
|
| 1416 |
+
>>> pi**2/2-2
|
| 1417 |
+
2.9348022005446793094172454999380755676568497036204
|
| 1418 |
+
|
| 1419 |
+
>>> nsum(lambda k: (-1)**k / k**3, [1, inf],
|
| 1420 |
+
... method='richardson')
|
| 1421 |
+
-0.90154267736969571404980362113358749307373971925537
|
| 1422 |
+
>>> -3*zeta(3)/4
|
| 1423 |
+
-0.90154267736969571404980362113358749307373971925538
|
| 1424 |
+
|
| 1425 |
+
**Examples with Shanks transformation**
|
| 1426 |
+
|
| 1427 |
+
The Shanks transformation works well for geometric series
|
| 1428 |
+
and typically provides excellent acceleration for Taylor
|
| 1429 |
+
series near the border of their disk of convergence.
|
| 1430 |
+
Here we apply it to a series for `\log(2)`, which can be
|
| 1431 |
+
seen as the Taylor series for `\log(1+x)` with `x = 1`::
|
| 1432 |
+
|
| 1433 |
+
>>> nsum(lambda k: -(-1)**k/k, [1, inf],
|
| 1434 |
+
... method='shanks')
|
| 1435 |
+
0.69314718055994530941723212145817656807550013436025
|
| 1436 |
+
>>> log(2)
|
| 1437 |
+
0.69314718055994530941723212145817656807550013436025
|
| 1438 |
+
|
| 1439 |
+
Here we apply it to a slowly convergent geometric series::
|
| 1440 |
+
|
| 1441 |
+
>>> nsum(lambda k: mpf('0.995')**k, [0, inf],
|
| 1442 |
+
... method='shanks')
|
| 1443 |
+
200.0
|
| 1444 |
+
|
| 1445 |
+
Finally, Shanks' method works very well for alternating series
|
| 1446 |
+
where `f(k) = (-1)^k g(k)`, and often does so regardless of
|
| 1447 |
+
the exact decay rate of `g(k)`::
|
| 1448 |
+
|
| 1449 |
+
>>> mp.dps = 15
|
| 1450 |
+
>>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
|
| 1451 |
+
... method='shanks')
|
| 1452 |
+
0.765147024625408
|
| 1453 |
+
>>> (2-sqrt(2))*zeta(1.5)/2
|
| 1454 |
+
0.765147024625408
|
| 1455 |
+
|
| 1456 |
+
The following slowly convergent alternating series has no known
|
| 1457 |
+
closed-form value. Evaluating the sum a second time at higher
|
| 1458 |
+
precision indicates that the value is probably correct::
|
| 1459 |
+
|
| 1460 |
+
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
|
| 1461 |
+
... method='shanks')
|
| 1462 |
+
0.924299897222939
|
| 1463 |
+
>>> mp.dps = 30
|
| 1464 |
+
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
|
| 1465 |
+
... method='shanks')
|
| 1466 |
+
0.92429989722293885595957018136
|
| 1467 |
+
|
| 1468 |
+
**Examples with Levin transformation**
|
| 1469 |
+
|
| 1470 |
+
The following example calculates Euler's constant as the constant term in
|
| 1471 |
+
the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
|
| 1472 |
+
because of the logarithmic convergence behaviour of the Dirichlet series
|
| 1473 |
+
for zeta.
|
| 1474 |
+
|
| 1475 |
+
>>> mp.dps = 30
|
| 1476 |
+
>>> z = mp.mpf(10) ** (-10)
|
| 1477 |
+
>>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
|
| 1478 |
+
>>> print(mp.chop(a - mp.euler, tol = 1e-10))
|
| 1479 |
+
0.0
|
| 1480 |
+
|
| 1481 |
+
Now we sum the zeta function outside its range of convergence
|
| 1482 |
+
(attention: This does not work at the negative integers!):
|
| 1483 |
+
|
| 1484 |
+
>>> mp.dps = 15
|
| 1485 |
+
>>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
|
| 1486 |
+
>>> print(mp.chop(w - mp.zeta(-2-3j)))
|
| 1487 |
+
0.0
|
| 1488 |
+
|
| 1489 |
+
The next example resummates an asymptotic series expansion of an integral
|
| 1490 |
+
related to the exponential integral.
|
| 1491 |
+
|
| 1492 |
+
>>> mp.dps = 15
|
| 1493 |
+
>>> z = mp.mpf(10)
|
| 1494 |
+
>>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
|
| 1495 |
+
>>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
|
| 1496 |
+
>>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
|
| 1497 |
+
>>> print(mp.chop(w - exact))
|
| 1498 |
+
0.0
|
| 1499 |
+
|
| 1500 |
+
Following highly divergent asymptotic expansion needs some care. Firstly we
|
| 1501 |
+
need copious amount of working precision. Secondly the stepsize must not be
|
| 1502 |
+
chosen to large, otherwise nsum may miss the point where the Levin transform
|
| 1503 |
+
converges and reach the point where only numerical garbage is produced due to
|
| 1504 |
+
numerical cancellation.
|
| 1505 |
+
|
| 1506 |
+
>>> mp.dps = 15
|
| 1507 |
+
>>> z = mp.mpf(2)
|
| 1508 |
+
>>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
|
| 1509 |
+
>>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
|
| 1510 |
+
>>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
|
| 1511 |
+
... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
|
| 1512 |
+
>>> print(mp.chop(w - exact))
|
| 1513 |
+
0.0
|
| 1514 |
+
|
| 1515 |
+
The hypergeoemtric function can also be summed outside its range of convergence:
|
| 1516 |
+
|
| 1517 |
+
>>> mp.dps = 15
|
| 1518 |
+
>>> z = 2 + 1j
|
| 1519 |
+
>>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
|
| 1520 |
+
>>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
|
| 1521 |
+
>>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
|
| 1522 |
+
>>> print(mp.chop(exact-v))
|
| 1523 |
+
0.0
|
| 1524 |
+
|
| 1525 |
+
**Examples with Cohen's alternating series resummation**
|
| 1526 |
+
|
| 1527 |
+
The next example sums the alternating zeta function:
|
| 1528 |
+
|
| 1529 |
+
>>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
|
| 1530 |
+
>>> print(mp.chop(v - mp.log(2)))
|
| 1531 |
+
0.0
|
| 1532 |
+
|
| 1533 |
+
The derivate of the alternating zeta function outside its range of
|
| 1534 |
+
convergence:
|
| 1535 |
+
|
| 1536 |
+
>>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
|
| 1537 |
+
>>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
|
| 1538 |
+
0.0
|
| 1539 |
+
|
| 1540 |
+
**Examples with Euler-Maclaurin summation**
|
| 1541 |
+
|
| 1542 |
+
The sum in the following example has the wrong rate of convergence
|
| 1543 |
+
for either Richardson or Shanks to be effective.
|
| 1544 |
+
|
| 1545 |
+
>>> f = lambda k: log(k)/k**2.5
|
| 1546 |
+
>>> mp.dps = 15
|
| 1547 |
+
>>> nsum(f, [1, inf], method='euler-maclaurin')
|
| 1548 |
+
0.38734195032621
|
| 1549 |
+
>>> -diff(zeta, 2.5)
|
| 1550 |
+
0.38734195032621
|
| 1551 |
+
|
| 1552 |
+
Increasing ``steps`` improves speed at higher precision::
|
| 1553 |
+
|
| 1554 |
+
>>> mp.dps = 50
|
| 1555 |
+
>>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
|
| 1556 |
+
0.38734195032620997271199237593105101319948228874688
|
| 1557 |
+
>>> -diff(zeta, 2.5)
|
| 1558 |
+
0.38734195032620997271199237593105101319948228874688
|
| 1559 |
+
|
| 1560 |
+
**Divergent series**
|
| 1561 |
+
|
| 1562 |
+
The Shanks transformation is able to sum some *divergent*
|
| 1563 |
+
series. In particular, it is often able to sum Taylor series
|
| 1564 |
+
beyond their radius of convergence (this is due to a relation
|
| 1565 |
+
between the Shanks transformation and Pade approximations;
|
| 1566 |
+
see :func:`~mpmath.pade` for an alternative way to evaluate divergent
|
| 1567 |
+
Taylor series). Furthermore the Levin-transform examples above
|
| 1568 |
+
contain some divergent series resummation.
|
| 1569 |
+
|
| 1570 |
+
Here we apply it to `\log(1+x)` far outside the region of
|
| 1571 |
+
convergence::
|
| 1572 |
+
|
| 1573 |
+
>>> mp.dps = 50
|
| 1574 |
+
>>> nsum(lambda k: -(-9)**k/k, [1, inf],
|
| 1575 |
+
... method='shanks')
|
| 1576 |
+
2.3025850929940456840179914546843642076011014886288
|
| 1577 |
+
>>> log(10)
|
| 1578 |
+
2.3025850929940456840179914546843642076011014886288
|
| 1579 |
+
|
| 1580 |
+
A particular type of divergent series that can be summed
|
| 1581 |
+
using the Shanks transformation is geometric series.
|
| 1582 |
+
The result is the same as using the closed-form formula
|
| 1583 |
+
for an infinite geometric series::
|
| 1584 |
+
|
| 1585 |
+
>>> mp.dps = 15
|
| 1586 |
+
>>> for n in range(-8, 8):
|
| 1587 |
+
... if n == 1:
|
| 1588 |
+
... continue
|
| 1589 |
+
... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
|
| 1590 |
+
... nsum(lambda k: n**k, [0, inf], method='shanks')))
|
| 1591 |
+
...
|
| 1592 |
+
-8.0 0.111111111111111 0.111111111111111
|
| 1593 |
+
-7.0 0.125 0.125
|
| 1594 |
+
-6.0 0.142857142857143 0.142857142857143
|
| 1595 |
+
-5.0 0.166666666666667 0.166666666666667
|
| 1596 |
+
-4.0 0.2 0.2
|
| 1597 |
+
-3.0 0.25 0.25
|
| 1598 |
+
-2.0 0.333333333333333 0.333333333333333
|
| 1599 |
+
-1.0 0.5 0.5
|
| 1600 |
+
0.0 1.0 1.0
|
| 1601 |
+
2.0 -1.0 -1.0
|
| 1602 |
+
3.0 -0.5 -0.5
|
| 1603 |
+
4.0 -0.333333333333333 -0.333333333333333
|
| 1604 |
+
5.0 -0.25 -0.25
|
| 1605 |
+
6.0 -0.2 -0.2
|
| 1606 |
+
7.0 -0.166666666666667 -0.166666666666667
|
| 1607 |
+
|
| 1608 |
+
**Multidimensional sums**
|
| 1609 |
+
|
| 1610 |
+
Any combination of finite and infinite ranges is allowed for the
|
| 1611 |
+
summation indices::
|
| 1612 |
+
|
| 1613 |
+
>>> mp.dps = 15
|
| 1614 |
+
>>> nsum(lambda x,y: x+y, [2,3], [4,5])
|
| 1615 |
+
28.0
|
| 1616 |
+
>>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
|
| 1617 |
+
6.0
|
| 1618 |
+
>>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
|
| 1619 |
+
6.0
|
| 1620 |
+
>>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
|
| 1621 |
+
7.0
|
| 1622 |
+
>>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
|
| 1623 |
+
7.0
|
| 1624 |
+
>>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
|
| 1625 |
+
7.0
|
| 1626 |
+
|
| 1627 |
+
Some nice examples of double series with analytic solutions or
|
| 1628 |
+
reductions to single-dimensional series (see [1])::
|
| 1629 |
+
|
| 1630 |
+
>>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
|
| 1631 |
+
1.60669515241529
|
| 1632 |
+
>>> nsum(lambda n: 1/(2**n-1), [1,inf])
|
| 1633 |
+
1.60669515241529
|
| 1634 |
+
|
| 1635 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
|
| 1636 |
+
0.278070510848213
|
| 1637 |
+
>>> pi*(pi-3*ln2)/12
|
| 1638 |
+
0.278070510848213
|
| 1639 |
+
|
| 1640 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
|
| 1641 |
+
0.129319852864168
|
| 1642 |
+
>>> altzeta(2) - altzeta(1)
|
| 1643 |
+
0.129319852864168
|
| 1644 |
+
|
| 1645 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
|
| 1646 |
+
0.0790756439455825
|
| 1647 |
+
>>> altzeta(3) - altzeta(2)
|
| 1648 |
+
0.0790756439455825
|
| 1649 |
+
|
| 1650 |
+
>>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
|
| 1651 |
+
... [1,inf], [1,inf])
|
| 1652 |
+
0.28125
|
| 1653 |
+
>>> mpf(9)/32
|
| 1654 |
+
0.28125
|
| 1655 |
+
|
| 1656 |
+
>>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
|
| 1657 |
+
... [1,inf], [1,inf], workprec=400)
|
| 1658 |
+
1.64493406684823
|
| 1659 |
+
>>> zeta(2)
|
| 1660 |
+
1.64493406684823
|
| 1661 |
+
|
| 1662 |
+
A hard example of a multidimensional sum is the Madelung constant
|
| 1663 |
+
in three dimensions (see [2]). The defining sum converges very
|
| 1664 |
+
slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
|
| 1665 |
+
obtain an accurate value through convergence acceleration. The
|
| 1666 |
+
second evaluation below uses a much more efficient, rapidly
|
| 1667 |
+
convergent 2D sum::
|
| 1668 |
+
|
| 1669 |
+
>>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
|
| 1670 |
+
... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
|
| 1671 |
+
-1.74756459463318
|
| 1672 |
+
>>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
|
| 1673 |
+
... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
|
| 1674 |
+
-1.74756459463318
|
| 1675 |
+
|
| 1676 |
+
Another example of a lattice sum in 2D::
|
| 1677 |
+
|
| 1678 |
+
>>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
|
| 1679 |
+
... [-inf,inf], ignore=True)
|
| 1680 |
+
-2.1775860903036
|
| 1681 |
+
>>> -pi*ln2
|
| 1682 |
+
-2.1775860903036
|
| 1683 |
+
|
| 1684 |
+
An example of an Eisenstein series::
|
| 1685 |
+
|
| 1686 |
+
>>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
|
| 1687 |
+
... ignore=True)
|
| 1688 |
+
(3.1512120021539 + 0.0j)
|
| 1689 |
+
|
| 1690 |
+
**References**
|
| 1691 |
+
|
| 1692 |
+
1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
|
| 1693 |
+
2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
|
| 1694 |
+
|
| 1695 |
+
"""
|
| 1696 |
+
infinite, g = standardize(ctx, f, intervals, options)
|
| 1697 |
+
if not infinite:
|
| 1698 |
+
return +g()
|
| 1699 |
+
|
| 1700 |
+
def update(partial_sums, indices):
|
| 1701 |
+
if partial_sums:
|
| 1702 |
+
psum = partial_sums[-1]
|
| 1703 |
+
else:
|
| 1704 |
+
psum = ctx.zero
|
| 1705 |
+
for k in indices:
|
| 1706 |
+
psum = psum + g(ctx.mpf(k))
|
| 1707 |
+
partial_sums.append(psum)
|
| 1708 |
+
|
| 1709 |
+
prec = ctx.prec
|
| 1710 |
+
|
| 1711 |
+
def emfun(point, tol):
|
| 1712 |
+
workprec = ctx.prec
|
| 1713 |
+
ctx.prec = prec + 10
|
| 1714 |
+
v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
|
| 1715 |
+
ctx.prec = workprec
|
| 1716 |
+
return v
|
| 1717 |
+
|
| 1718 |
+
return +ctx.adaptive_extrapolation(update, emfun, options)
|
| 1719 |
+
|
| 1720 |
+
|
| 1721 |
+
def wrapsafe(f):
|
| 1722 |
+
def g(*args):
|
| 1723 |
+
try:
|
| 1724 |
+
return f(*args)
|
| 1725 |
+
except (ArithmeticError, ValueError):
|
| 1726 |
+
return 0
|
| 1727 |
+
return g
|
| 1728 |
+
|
| 1729 |
+
def standardize(ctx, f, intervals, options):
|
| 1730 |
+
if options.get("ignore"):
|
| 1731 |
+
f = wrapsafe(f)
|
| 1732 |
+
finite = []
|
| 1733 |
+
infinite = []
|
| 1734 |
+
for k, points in enumerate(intervals):
|
| 1735 |
+
a, b = ctx._as_points(points)
|
| 1736 |
+
if b < a:
|
| 1737 |
+
return False, (lambda: ctx.zero)
|
| 1738 |
+
if a == ctx.ninf or b == ctx.inf:
|
| 1739 |
+
infinite.append((k, (a,b)))
|
| 1740 |
+
else:
|
| 1741 |
+
finite.append((k, (int(a), int(b))))
|
| 1742 |
+
if finite:
|
| 1743 |
+
f = fold_finite(ctx, f, finite)
|
| 1744 |
+
if not infinite:
|
| 1745 |
+
return False, lambda: f(*([0]*len(intervals)))
|
| 1746 |
+
if infinite:
|
| 1747 |
+
f = standardize_infinite(ctx, f, infinite)
|
| 1748 |
+
f = fold_infinite(ctx, f, infinite)
|
| 1749 |
+
args = [0] * len(intervals)
|
| 1750 |
+
d = infinite[0][0]
|
| 1751 |
+
def g(k):
|
| 1752 |
+
args[d] = k
|
| 1753 |
+
return f(*args)
|
| 1754 |
+
return True, g
|
| 1755 |
+
|
| 1756 |
+
# backwards compatible itertools.product
|
| 1757 |
+
def cartesian_product(args):
|
| 1758 |
+
pools = map(tuple, args)
|
| 1759 |
+
result = [[]]
|
| 1760 |
+
for pool in pools:
|
| 1761 |
+
result = [x+[y] for x in result for y in pool]
|
| 1762 |
+
for prod in result:
|
| 1763 |
+
yield tuple(prod)
|
| 1764 |
+
|
| 1765 |
+
def fold_finite(ctx, f, intervals):
|
| 1766 |
+
if not intervals:
|
| 1767 |
+
return f
|
| 1768 |
+
indices = [v[0] for v in intervals]
|
| 1769 |
+
points = [v[1] for v in intervals]
|
| 1770 |
+
ranges = [xrange(a, b+1) for (a,b) in points]
|
| 1771 |
+
def g(*args):
|
| 1772 |
+
args = list(args)
|
| 1773 |
+
s = ctx.zero
|
| 1774 |
+
for xs in cartesian_product(ranges):
|
| 1775 |
+
for dim, x in zip(indices, xs):
|
| 1776 |
+
args[dim] = ctx.mpf(x)
|
| 1777 |
+
s += f(*args)
|
| 1778 |
+
return s
|
| 1779 |
+
#print "Folded finite", indices
|
| 1780 |
+
return g
|
| 1781 |
+
|
| 1782 |
+
# Standardize each interval to [0,inf]
|
| 1783 |
+
def standardize_infinite(ctx, f, intervals):
|
| 1784 |
+
if not intervals:
|
| 1785 |
+
return f
|
| 1786 |
+
dim, [a,b] = intervals[-1]
|
| 1787 |
+
if a == ctx.ninf:
|
| 1788 |
+
if b == ctx.inf:
|
| 1789 |
+
def g(*args):
|
| 1790 |
+
args = list(args)
|
| 1791 |
+
k = args[dim]
|
| 1792 |
+
if k:
|
| 1793 |
+
s = f(*args)
|
| 1794 |
+
args[dim] = -k
|
| 1795 |
+
s += f(*args)
|
| 1796 |
+
return s
|
| 1797 |
+
else:
|
| 1798 |
+
return f(*args)
|
| 1799 |
+
else:
|
| 1800 |
+
def g(*args):
|
| 1801 |
+
args = list(args)
|
| 1802 |
+
args[dim] = b - args[dim]
|
| 1803 |
+
return f(*args)
|
| 1804 |
+
else:
|
| 1805 |
+
def g(*args):
|
| 1806 |
+
args = list(args)
|
| 1807 |
+
args[dim] += a
|
| 1808 |
+
return f(*args)
|
| 1809 |
+
#print "Standardized infinity along dimension", dim, a, b
|
| 1810 |
+
return standardize_infinite(ctx, g, intervals[:-1])
|
| 1811 |
+
|
| 1812 |
+
def fold_infinite(ctx, f, intervals):
|
| 1813 |
+
if len(intervals) < 2:
|
| 1814 |
+
return f
|
| 1815 |
+
dim1 = intervals[-2][0]
|
| 1816 |
+
dim2 = intervals[-1][0]
|
| 1817 |
+
# Assume intervals are [0,inf] x [0,inf] x ...
|
| 1818 |
+
def g(*args):
|
| 1819 |
+
args = list(args)
|
| 1820 |
+
#args.insert(dim2, None)
|
| 1821 |
+
n = int(args[dim1])
|
| 1822 |
+
s = ctx.zero
|
| 1823 |
+
#y = ctx.mpf(n)
|
| 1824 |
+
args[dim2] = ctx.mpf(n) #y
|
| 1825 |
+
for x in xrange(n+1):
|
| 1826 |
+
args[dim1] = ctx.mpf(x)
|
| 1827 |
+
s += f(*args)
|
| 1828 |
+
args[dim1] = ctx.mpf(n) #ctx.mpf(n)
|
| 1829 |
+
for y in xrange(n):
|
| 1830 |
+
args[dim2] = ctx.mpf(y)
|
| 1831 |
+
s += f(*args)
|
| 1832 |
+
return s
|
| 1833 |
+
#print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
|
| 1834 |
+
return fold_infinite(ctx, g, intervals[:-1])
|
| 1835 |
+
|
| 1836 |
+
@defun
|
| 1837 |
+
def nprod(ctx, f, interval, nsum=False, **kwargs):
|
| 1838 |
+
r"""
|
| 1839 |
+
Computes the product
|
| 1840 |
+
|
| 1841 |
+
.. math ::
|
| 1842 |
+
|
| 1843 |
+
P = \prod_{k=a}^b f(k)
|
| 1844 |
+
|
| 1845 |
+
where `(a, b)` = *interval*, and where `a = -\infty` and/or
|
| 1846 |
+
`b = \infty` are allowed.
|
| 1847 |
+
|
| 1848 |
+
By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
|
| 1849 |
+
:func:`~mpmath.nsum`, except applied to the partial products rather than
|
| 1850 |
+
partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
|
| 1851 |
+
supported. If ``nsum=True``, the product is instead computed via
|
| 1852 |
+
:func:`~mpmath.nsum` as
|
| 1853 |
+
|
| 1854 |
+
.. math ::
|
| 1855 |
+
|
| 1856 |
+
P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
|
| 1857 |
+
|
| 1858 |
+
This is slower, but can sometimes yield better results. It is
|
| 1859 |
+
also required (and used automatically) when Euler-Maclaurin
|
| 1860 |
+
summation is requested.
|
| 1861 |
+
|
| 1862 |
+
**Examples**
|
| 1863 |
+
|
| 1864 |
+
A simple finite product::
|
| 1865 |
+
|
| 1866 |
+
>>> from mpmath import *
|
| 1867 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1868 |
+
>>> nprod(lambda k: k, [1, 4])
|
| 1869 |
+
24.0
|
| 1870 |
+
|
| 1871 |
+
A large number of infinite products have known exact values,
|
| 1872 |
+
and can therefore be used as a reference. Most of the following
|
| 1873 |
+
examples are taken from MathWorld [1].
|
| 1874 |
+
|
| 1875 |
+
A few infinite products with simple values are::
|
| 1876 |
+
|
| 1877 |
+
>>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
|
| 1878 |
+
3.141592653589793238462643
|
| 1879 |
+
>>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
|
| 1880 |
+
2.0
|
| 1881 |
+
>>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
|
| 1882 |
+
0.6666666666666666666666667
|
| 1883 |
+
>>> nprod(lambda k: (1-1/k**2), [2, inf])
|
| 1884 |
+
0.5
|
| 1885 |
+
|
| 1886 |
+
Next, several more infinite products with more complicated
|
| 1887 |
+
values::
|
| 1888 |
+
|
| 1889 |
+
>>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
|
| 1890 |
+
5.180668317897115748416626
|
| 1891 |
+
5.180668317897115748416626
|
| 1892 |
+
|
| 1893 |
+
>>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
|
| 1894 |
+
0.2720290549821331629502366
|
| 1895 |
+
0.2720290549821331629502366
|
| 1896 |
+
|
| 1897 |
+
>>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
|
| 1898 |
+
0.8480540493529003921296502
|
| 1899 |
+
>>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
|
| 1900 |
+
0.8480540493529003921296502
|
| 1901 |
+
|
| 1902 |
+
>>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
|
| 1903 |
+
1.848936182858244485224927
|
| 1904 |
+
>>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
|
| 1905 |
+
1.848936182858244485224927
|
| 1906 |
+
|
| 1907 |
+
>>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
|
| 1908 |
+
0.9190194775937444301739244
|
| 1909 |
+
0.9190194775937444301739244
|
| 1910 |
+
|
| 1911 |
+
>>> nprod(lambda k: (1-1/k**6), [2, inf])
|
| 1912 |
+
0.9826842777421925183244759
|
| 1913 |
+
>>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
|
| 1914 |
+
0.9826842777421925183244759
|
| 1915 |
+
|
| 1916 |
+
>>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
|
| 1917 |
+
1.838038955187488860347849
|
| 1918 |
+
1.838038955187488860347849
|
| 1919 |
+
|
| 1920 |
+
>>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
|
| 1921 |
+
1.447255926890365298959138
|
| 1922 |
+
>>> exp(1+euler/2)/sqrt(2*pi)
|
| 1923 |
+
1.447255926890365298959138
|
| 1924 |
+
|
| 1925 |
+
The following two products are equivalent and can be evaluated in
|
| 1926 |
+
terms of a Jacobi theta function. Pi can be replaced by any value
|
| 1927 |
+
(as long as convergence is preserved)::
|
| 1928 |
+
|
| 1929 |
+
>>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
|
| 1930 |
+
0.3838451207481672404778686
|
| 1931 |
+
>>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
|
| 1932 |
+
0.3838451207481672404778686
|
| 1933 |
+
>>> jtheta(4,0,1/pi)
|
| 1934 |
+
0.3838451207481672404778686
|
| 1935 |
+
|
| 1936 |
+
This product does not have a known closed form value::
|
| 1937 |
+
|
| 1938 |
+
>>> nprod(lambda k: (1-1/2**k), [1, inf])
|
| 1939 |
+
0.2887880950866024212788997
|
| 1940 |
+
|
| 1941 |
+
A product taken from `-\infty`::
|
| 1942 |
+
|
| 1943 |
+
>>> nprod(lambda k: 1-k**(-3), [-inf,-2])
|
| 1944 |
+
0.8093965973662901095786805
|
| 1945 |
+
>>> cosh(pi*sqrt(3)/2)/(3*pi)
|
| 1946 |
+
0.8093965973662901095786805
|
| 1947 |
+
|
| 1948 |
+
A doubly infinite product::
|
| 1949 |
+
|
| 1950 |
+
>>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
|
| 1951 |
+
23.41432688231864337420035
|
| 1952 |
+
>>> exp(pi/tanh(pi))
|
| 1953 |
+
23.41432688231864337420035
|
| 1954 |
+
|
| 1955 |
+
A product requiring the use of Euler-Maclaurin summation to compute
|
| 1956 |
+
an accurate value::
|
| 1957 |
+
|
| 1958 |
+
>>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
|
| 1959 |
+
0.696155111336231052898125
|
| 1960 |
+
|
| 1961 |
+
**References**
|
| 1962 |
+
|
| 1963 |
+
1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
|
| 1964 |
+
|
| 1965 |
+
"""
|
| 1966 |
+
if nsum or ('e' in kwargs.get('method', '')):
|
| 1967 |
+
orig = ctx.prec
|
| 1968 |
+
try:
|
| 1969 |
+
# TODO: we are evaluating log(1+eps) -> eps, which is
|
| 1970 |
+
# inaccurate. This currently works because nsum greatly
|
| 1971 |
+
# increases the working precision. But we should be
|
| 1972 |
+
# more intelligent and handle the precision here.
|
| 1973 |
+
ctx.prec += 10
|
| 1974 |
+
v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
|
| 1975 |
+
finally:
|
| 1976 |
+
ctx.prec = orig
|
| 1977 |
+
return +ctx.exp(v)
|
| 1978 |
+
|
| 1979 |
+
a, b = ctx._as_points(interval)
|
| 1980 |
+
if a == ctx.ninf:
|
| 1981 |
+
if b == ctx.inf:
|
| 1982 |
+
return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
|
| 1983 |
+
return ctx.nprod(f, [-b, ctx.inf], **kwargs)
|
| 1984 |
+
elif b != ctx.inf:
|
| 1985 |
+
return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
|
| 1986 |
+
|
| 1987 |
+
a = int(a)
|
| 1988 |
+
|
| 1989 |
+
def update(partial_products, indices):
|
| 1990 |
+
if partial_products:
|
| 1991 |
+
pprod = partial_products[-1]
|
| 1992 |
+
else:
|
| 1993 |
+
pprod = ctx.one
|
| 1994 |
+
for k in indices:
|
| 1995 |
+
pprod = pprod * f(a + ctx.mpf(k))
|
| 1996 |
+
partial_products.append(pprod)
|
| 1997 |
+
|
| 1998 |
+
return +ctx.adaptive_extrapolation(update, None, kwargs)
|
| 1999 |
+
|
| 2000 |
+
|
| 2001 |
+
@defun
|
| 2002 |
+
def limit(ctx, f, x, direction=1, exp=False, **kwargs):
|
| 2003 |
+
r"""
|
| 2004 |
+
Computes an estimate of the limit
|
| 2005 |
+
|
| 2006 |
+
.. math ::
|
| 2007 |
+
|
| 2008 |
+
\lim_{t \to x} f(t)
|
| 2009 |
+
|
| 2010 |
+
where `x` may be finite or infinite.
|
| 2011 |
+
|
| 2012 |
+
For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
|
| 2013 |
+
consecutive integer values of `n`, where the approach direction
|
| 2014 |
+
`d` may be specified using the *direction* keyword argument.
|
| 2015 |
+
For infinite `x`, :func:`~mpmath.limit` evaluates values of
|
| 2016 |
+
`f(\mathrm{sign}(x) \cdot n)`.
|
| 2017 |
+
|
| 2018 |
+
If the approach to the limit is not sufficiently fast to give
|
| 2019 |
+
an accurate estimate directly, :func:`~mpmath.limit` attempts to find
|
| 2020 |
+
the limit using Richardson extrapolation or the Shanks
|
| 2021 |
+
transformation. You can select between these methods using
|
| 2022 |
+
the *method* keyword (see documentation of :func:`~mpmath.nsum` for
|
| 2023 |
+
more information).
|
| 2024 |
+
|
| 2025 |
+
**Options**
|
| 2026 |
+
|
| 2027 |
+
The following options are available with essentially the
|
| 2028 |
+
same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
|
| 2029 |
+
*steps*, *verbose*.
|
| 2030 |
+
|
| 2031 |
+
If the option *exp=True* is set, `f` will be
|
| 2032 |
+
sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
|
| 2033 |
+
instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
|
| 2034 |
+
This can sometimes improve the rate of convergence so that
|
| 2035 |
+
:func:`~mpmath.limit` may return a more accurate answer (and faster).
|
| 2036 |
+
However, do note that this can only be used if `f`
|
| 2037 |
+
supports fast and accurate evaluation for arguments that
|
| 2038 |
+
are extremely close to the limit point (or if infinite,
|
| 2039 |
+
very large arguments).
|
| 2040 |
+
|
| 2041 |
+
**Examples**
|
| 2042 |
+
|
| 2043 |
+
A basic evaluation of a removable singularity::
|
| 2044 |
+
|
| 2045 |
+
>>> from mpmath import *
|
| 2046 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 2047 |
+
>>> limit(lambda x: (x-sin(x))/x**3, 0)
|
| 2048 |
+
0.166666666666666666666666666667
|
| 2049 |
+
|
| 2050 |
+
Computing the exponential function using its limit definition::
|
| 2051 |
+
|
| 2052 |
+
>>> limit(lambda n: (1+3/n)**n, inf)
|
| 2053 |
+
20.0855369231876677409285296546
|
| 2054 |
+
>>> exp(3)
|
| 2055 |
+
20.0855369231876677409285296546
|
| 2056 |
+
|
| 2057 |
+
A limit for `\pi`::
|
| 2058 |
+
|
| 2059 |
+
>>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
|
| 2060 |
+
>>> limit(f, inf)
|
| 2061 |
+
3.14159265358979323846264338328
|
| 2062 |
+
|
| 2063 |
+
Calculating the coefficient in Stirling's formula::
|
| 2064 |
+
|
| 2065 |
+
>>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
|
| 2066 |
+
2.50662827463100050241576528481
|
| 2067 |
+
>>> sqrt(2*pi)
|
| 2068 |
+
2.50662827463100050241576528481
|
| 2069 |
+
|
| 2070 |
+
Evaluating Euler's constant `\gamma` using the limit representation
|
| 2071 |
+
|
| 2072 |
+
.. math ::
|
| 2073 |
+
|
| 2074 |
+
\gamma = \lim_{n \rightarrow \infty } \left[ \left(
|
| 2075 |
+
\sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
|
| 2076 |
+
|
| 2077 |
+
(which converges notoriously slowly)::
|
| 2078 |
+
|
| 2079 |
+
>>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
|
| 2080 |
+
>>> limit(f, inf)
|
| 2081 |
+
0.577215664901532860606512090082
|
| 2082 |
+
>>> +euler
|
| 2083 |
+
0.577215664901532860606512090082
|
| 2084 |
+
|
| 2085 |
+
With default settings, the following limit converges too slowly
|
| 2086 |
+
to be evaluated accurately. Changing to exponential sampling
|
| 2087 |
+
however gives a perfect result::
|
| 2088 |
+
|
| 2089 |
+
>>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
|
| 2090 |
+
>>> limit(f, inf)
|
| 2091 |
+
0.992831158558330281129249686491
|
| 2092 |
+
>>> limit(f, inf, exp=True)
|
| 2093 |
+
1.0
|
| 2094 |
+
|
| 2095 |
+
"""
|
| 2096 |
+
|
| 2097 |
+
if ctx.isinf(x):
|
| 2098 |
+
direction = ctx.sign(x)
|
| 2099 |
+
g = lambda k: f(ctx.mpf(k+1)*direction)
|
| 2100 |
+
else:
|
| 2101 |
+
direction *= ctx.one
|
| 2102 |
+
g = lambda k: f(x + direction/(k+1))
|
| 2103 |
+
if exp:
|
| 2104 |
+
h = g
|
| 2105 |
+
g = lambda k: h(2**k)
|
| 2106 |
+
|
| 2107 |
+
def update(values, indices):
|
| 2108 |
+
for k in indices:
|
| 2109 |
+
values.append(g(k+1))
|
| 2110 |
+
|
| 2111 |
+
# XXX: steps used by nsum don't work well
|
| 2112 |
+
if not 'steps' in kwargs:
|
| 2113 |
+
kwargs['steps'] = [10]
|
| 2114 |
+
|
| 2115 |
+
return +ctx.adaptive_extrapolation(update, None, kwargs)
|
vllm/lib/python3.10/site-packages/mpmath/calculus/inverselaplace.py
ADDED
|
@@ -0,0 +1,973 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# contributed to mpmath by Kristopher L. Kuhlman, February 2017
|
| 2 |
+
# contributed to mpmath by Guillermo Navas-Palencia, February 2022
|
| 3 |
+
|
| 4 |
+
class InverseLaplaceTransform(object):
|
| 5 |
+
r"""
|
| 6 |
+
Inverse Laplace transform methods are implemented using this
|
| 7 |
+
class, in order to simplify the code and provide a common
|
| 8 |
+
infrastructure.
|
| 9 |
+
|
| 10 |
+
Implement a custom inverse Laplace transform algorithm by
|
| 11 |
+
subclassing :class:`InverseLaplaceTransform` and implementing the
|
| 12 |
+
appropriate methods. The subclass can then be used by
|
| 13 |
+
:func:`~mpmath.invertlaplace` by passing it as the *method*
|
| 14 |
+
argument.
|
| 15 |
+
"""
|
| 16 |
+
|
| 17 |
+
def __init__(self, ctx):
|
| 18 |
+
self.ctx = ctx
|
| 19 |
+
|
| 20 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
| 21 |
+
r"""
|
| 22 |
+
Determine the vector of Laplace parameter values needed for an
|
| 23 |
+
algorithm, this will depend on the choice of algorithm (de
|
| 24 |
+
Hoog is default), the algorithm-specific parameters passed (or
|
| 25 |
+
default ones), and desired time.
|
| 26 |
+
"""
|
| 27 |
+
raise NotImplementedError
|
| 28 |
+
|
| 29 |
+
def calc_time_domain_solution(self, fp):
|
| 30 |
+
r"""
|
| 31 |
+
Compute the time domain solution, after computing the
|
| 32 |
+
Laplace-space function evaluations at the abscissa required
|
| 33 |
+
for the algorithm. Abscissa computed for one algorithm are
|
| 34 |
+
typically not useful for another algorithm.
|
| 35 |
+
"""
|
| 36 |
+
raise NotImplementedError
|
| 37 |
+
|
| 38 |
+
|
| 39 |
+
class FixedTalbot(InverseLaplaceTransform):
|
| 40 |
+
|
| 41 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
| 42 |
+
r"""The "fixed" Talbot method deforms the Bromwich contour towards
|
| 43 |
+
`-\infty` in the shape of a parabola. Traditionally the Talbot
|
| 44 |
+
algorithm has adjustable parameters, but the "fixed" version
|
| 45 |
+
does not. The `r` parameter could be passed in as a parameter,
|
| 46 |
+
if you want to override the default given by (Abate & Valko,
|
| 47 |
+
2004).
|
| 48 |
+
|
| 49 |
+
The Laplace parameter is sampled along a parabola opening
|
| 50 |
+
along the negative imaginary axis, with the base of the
|
| 51 |
+
parabola along the real axis at
|
| 52 |
+
`p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in
|
| 53 |
+
the approximation (degree) grows, the abscissa required for
|
| 54 |
+
function evaluation tend towards `-\infty`, requiring high
|
| 55 |
+
precision to prevent overflow. If any poles, branch cuts or
|
| 56 |
+
other singularities exist such that the deformed Bromwich
|
| 57 |
+
contour lies to the left of the singularity, the method will
|
| 58 |
+
fail.
|
| 59 |
+
|
| 60 |
+
**Optional arguments**
|
| 61 |
+
|
| 62 |
+
:class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
|
| 63 |
+
recognizes the following keywords
|
| 64 |
+
|
| 65 |
+
*tmax*
|
| 66 |
+
maximum time associated with vector of times
|
| 67 |
+
(typically just the time requested)
|
| 68 |
+
*degree*
|
| 69 |
+
integer order of approximation (M = number of terms)
|
| 70 |
+
*r*
|
| 71 |
+
abscissa for `p_0` (otherwise computed using rule
|
| 72 |
+
of thumb `2M/5`)
|
| 73 |
+
|
| 74 |
+
The working precision will be increased according to a rule of
|
| 75 |
+
thumb. If 'degree' is not specified, the working precision and
|
| 76 |
+
degree are chosen to hopefully achieve the dps of the calling
|
| 77 |
+
context. If 'degree' is specified, the working precision is
|
| 78 |
+
chosen to achieve maximum resulting precision for the
|
| 79 |
+
specified degree.
|
| 80 |
+
|
| 81 |
+
.. math ::
|
| 82 |
+
|
| 83 |
+
p_0=\frac{r}{t}
|
| 84 |
+
|
| 85 |
+
.. math ::
|
| 86 |
+
|
| 87 |
+
p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left(
|
| 88 |
+
\frac{i\pi}{M}\right) + j \right] \qquad 1\le i <M
|
| 89 |
+
|
| 90 |
+
where `j=\sqrt{-1}`, `r=2M/5`, and `t_\mathrm{max}` is the
|
| 91 |
+
maximum specified time.
|
| 92 |
+
|
| 93 |
+
"""
|
| 94 |
+
|
| 95 |
+
# required
|
| 96 |
+
# ------------------------------
|
| 97 |
+
# time of desired approximation
|
| 98 |
+
self.t = self.ctx.convert(t)
|
| 99 |
+
|
| 100 |
+
# optional
|
| 101 |
+
# ------------------------------
|
| 102 |
+
# maximum time desired (used for scaling) default is requested
|
| 103 |
+
# time.
|
| 104 |
+
self.tmax = self.ctx.convert(kwargs.get('tmax', self.t))
|
| 105 |
+
|
| 106 |
+
# empirical relationships used here based on a linear fit of
|
| 107 |
+
# requested and delivered dps for exponentially decaying time
|
| 108 |
+
# functions for requested dps up to 512.
|
| 109 |
+
|
| 110 |
+
if 'degree' in kwargs:
|
| 111 |
+
self.degree = kwargs['degree']
|
| 112 |
+
self.dps_goal = self.degree
|
| 113 |
+
else:
|
| 114 |
+
self.dps_goal = int(1.72*self.ctx.dps)
|
| 115 |
+
self.degree = max(12, int(1.38*self.dps_goal))
|
| 116 |
+
|
| 117 |
+
M = self.degree
|
| 118 |
+
|
| 119 |
+
# this is adjusting the dps of the calling context hopefully
|
| 120 |
+
# the caller doesn't monkey around with it between calling
|
| 121 |
+
# this routine and calc_time_domain_solution()
|
| 122 |
+
self.dps_orig = self.ctx.dps
|
| 123 |
+
self.ctx.dps = self.dps_goal
|
| 124 |
+
|
| 125 |
+
# Abate & Valko rule of thumb for r parameter
|
| 126 |
+
self.r = kwargs.get('r', self.ctx.fraction(2, 5)*M)
|
| 127 |
+
|
| 128 |
+
self.theta = self.ctx.linspace(0.0, self.ctx.pi, M+1)
|
| 129 |
+
|
| 130 |
+
self.cot_theta = self.ctx.matrix(M, 1)
|
| 131 |
+
self.cot_theta[0] = 0 # not used
|
| 132 |
+
|
| 133 |
+
# all but time-dependent part of p
|
| 134 |
+
self.delta = self.ctx.matrix(M, 1)
|
| 135 |
+
self.delta[0] = self.r
|
| 136 |
+
|
| 137 |
+
for i in range(1, M):
|
| 138 |
+
self.cot_theta[i] = self.ctx.cot(self.theta[i])
|
| 139 |
+
self.delta[i] = self.r*self.theta[i]*(self.cot_theta[i] + 1j)
|
| 140 |
+
|
| 141 |
+
self.p = self.ctx.matrix(M, 1)
|
| 142 |
+
self.p = self.delta/self.tmax
|
| 143 |
+
|
| 144 |
+
# NB: p is complex (mpc)
|
| 145 |
+
|
| 146 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
| 147 |
+
r"""The fixed Talbot time-domain solution is computed from the
|
| 148 |
+
Laplace-space function evaluations using
|
| 149 |
+
|
| 150 |
+
.. math ::
|
| 151 |
+
|
| 152 |
+
f(t,M)=\frac{2}{5t}\sum_{k=0}^{M-1}\Re \left[
|
| 153 |
+
\gamma_k \bar{f}(p_k)\right]
|
| 154 |
+
|
| 155 |
+
where
|
| 156 |
+
|
| 157 |
+
.. math ::
|
| 158 |
+
|
| 159 |
+
\gamma_0 = \frac{1}{2}e^{r}\bar{f}(p_0)
|
| 160 |
+
|
| 161 |
+
.. math ::
|
| 162 |
+
|
| 163 |
+
\gamma_k = e^{tp_k}\left\lbrace 1 + \frac{jk\pi}{M}\left[1 +
|
| 164 |
+
\cot \left( \frac{k \pi}{M} \right)^2 \right] - j\cot\left(
|
| 165 |
+
\frac{k \pi}{M}\right)\right \rbrace \qquad 1\le k<M.
|
| 166 |
+
|
| 167 |
+
Again, `j=\sqrt{-1}`.
|
| 168 |
+
|
| 169 |
+
Before calling this function, call
|
| 170 |
+
:class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
|
| 171 |
+
to set the parameters and compute the required coefficients.
|
| 172 |
+
|
| 173 |
+
**References**
|
| 174 |
+
|
| 175 |
+
1. Abate, J., P. Valko (2004). Multi-precision Laplace
|
| 176 |
+
transform inversion. *International Journal for Numerical
|
| 177 |
+
Methods in Engineering* 60:979-993,
|
| 178 |
+
http://dx.doi.org/10.1002/nme.995
|
| 179 |
+
2. Talbot, A. (1979). The accurate numerical inversion of
|
| 180 |
+
Laplace transforms. *IMA Journal of Applied Mathematics*
|
| 181 |
+
23(1):97, http://dx.doi.org/10.1093/imamat/23.1.97
|
| 182 |
+
"""
|
| 183 |
+
|
| 184 |
+
# required
|
| 185 |
+
# ------------------------------
|
| 186 |
+
self.t = self.ctx.convert(t)
|
| 187 |
+
|
| 188 |
+
# assume fp was computed from p matrix returned from
|
| 189 |
+
# calc_laplace_parameter(), so is already a list or matrix of
|
| 190 |
+
# mpmath 'mpc' types
|
| 191 |
+
|
| 192 |
+
# these were computed in previous call to
|
| 193 |
+
# calc_laplace_parameter()
|
| 194 |
+
theta = self.theta
|
| 195 |
+
delta = self.delta
|
| 196 |
+
M = self.degree
|
| 197 |
+
p = self.p
|
| 198 |
+
r = self.r
|
| 199 |
+
|
| 200 |
+
ans = self.ctx.matrix(M, 1)
|
| 201 |
+
ans[0] = self.ctx.exp(delta[0])*fp[0]/2
|
| 202 |
+
|
| 203 |
+
for i in range(1, M):
|
| 204 |
+
ans[i] = self.ctx.exp(delta[i])*fp[i]*(
|
| 205 |
+
1 + 1j*theta[i]*(1 + self.cot_theta[i]**2) -
|
| 206 |
+
1j*self.cot_theta[i])
|
| 207 |
+
|
| 208 |
+
result = self.ctx.fraction(2, 5)*self.ctx.fsum(ans)/self.t
|
| 209 |
+
|
| 210 |
+
# setting dps back to value when calc_laplace_parameter was
|
| 211 |
+
# called, unless flag is set.
|
| 212 |
+
if not manual_prec:
|
| 213 |
+
self.ctx.dps = self.dps_orig
|
| 214 |
+
|
| 215 |
+
return result.real
|
| 216 |
+
|
| 217 |
+
|
| 218 |
+
# ****************************************
|
| 219 |
+
|
| 220 |
+
class Stehfest(InverseLaplaceTransform):
|
| 221 |
+
|
| 222 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
| 223 |
+
r"""
|
| 224 |
+
The Gaver-Stehfest method is a discrete approximation of the
|
| 225 |
+
Widder-Post inversion algorithm, rather than a direct
|
| 226 |
+
approximation of the Bromwich contour integral.
|
| 227 |
+
|
| 228 |
+
The method abscissa along the real axis, and therefore has
|
| 229 |
+
issues inverting oscillatory functions (which have poles in
|
| 230 |
+
pairs away from the real axis).
|
| 231 |
+
|
| 232 |
+
The working precision will be increased according to a rule of
|
| 233 |
+
thumb. If 'degree' is not specified, the working precision and
|
| 234 |
+
degree are chosen to hopefully achieve the dps of the calling
|
| 235 |
+
context. If 'degree' is specified, the working precision is
|
| 236 |
+
chosen to achieve maximum resulting precision for the
|
| 237 |
+
specified degree.
|
| 238 |
+
|
| 239 |
+
.. math ::
|
| 240 |
+
|
| 241 |
+
p_k = \frac{k \log 2}{t} \qquad 1 \le k \le M
|
| 242 |
+
"""
|
| 243 |
+
|
| 244 |
+
# required
|
| 245 |
+
# ------------------------------
|
| 246 |
+
# time of desired approximation
|
| 247 |
+
self.t = self.ctx.convert(t)
|
| 248 |
+
|
| 249 |
+
# optional
|
| 250 |
+
# ------------------------------
|
| 251 |
+
|
| 252 |
+
# empirical relationships used here based on a linear fit of
|
| 253 |
+
# requested and delivered dps for exponentially decaying time
|
| 254 |
+
# functions for requested dps up to 512.
|
| 255 |
+
|
| 256 |
+
if 'degree' in kwargs:
|
| 257 |
+
self.degree = kwargs['degree']
|
| 258 |
+
self.dps_goal = int(1.38*self.degree)
|
| 259 |
+
else:
|
| 260 |
+
self.dps_goal = int(2.93*self.ctx.dps)
|
| 261 |
+
self.degree = max(16, self.dps_goal)
|
| 262 |
+
|
| 263 |
+
# _coeff routine requires even degree
|
| 264 |
+
if self.degree % 2 > 0:
|
| 265 |
+
self.degree += 1
|
| 266 |
+
|
| 267 |
+
M = self.degree
|
| 268 |
+
|
| 269 |
+
# this is adjusting the dps of the calling context
|
| 270 |
+
# hopefully the caller doesn't monkey around with it
|
| 271 |
+
# between calling this routine and calc_time_domain_solution()
|
| 272 |
+
self.dps_orig = self.ctx.dps
|
| 273 |
+
self.ctx.dps = self.dps_goal
|
| 274 |
+
|
| 275 |
+
self.V = self._coeff()
|
| 276 |
+
self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t
|
| 277 |
+
|
| 278 |
+
# NB: p is real (mpf)
|
| 279 |
+
|
| 280 |
+
def _coeff(self):
|
| 281 |
+
r"""Salzer summation weights (aka, "Stehfest coefficients")
|
| 282 |
+
only depend on the approximation order (M) and the precision"""
|
| 283 |
+
|
| 284 |
+
M = self.degree
|
| 285 |
+
M2 = int(M/2) # checked earlier that M is even
|
| 286 |
+
|
| 287 |
+
V = self.ctx.matrix(M, 1)
|
| 288 |
+
|
| 289 |
+
# Salzer summation weights
|
| 290 |
+
# get very large in magnitude and oscillate in sign,
|
| 291 |
+
# if the precision is not high enough, there will be
|
| 292 |
+
# catastrophic cancellation
|
| 293 |
+
for k in range(1, M+1):
|
| 294 |
+
z = self.ctx.matrix(min(k, M2)+1, 1)
|
| 295 |
+
for j in range(int((k+1)/2), min(k, M2)+1):
|
| 296 |
+
z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/
|
| 297 |
+
(self.ctx.fac(M2-j)*self.ctx.fac(j)*
|
| 298 |
+
self.ctx.fac(j-1)*self.ctx.fac(k-j)*
|
| 299 |
+
self.ctx.fac(2*j-k)))
|
| 300 |
+
V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z)
|
| 301 |
+
|
| 302 |
+
return V
|
| 303 |
+
|
| 304 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
| 305 |
+
r"""Compute time-domain Stehfest algorithm solution.
|
| 306 |
+
|
| 307 |
+
.. math ::
|
| 308 |
+
|
| 309 |
+
f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left(
|
| 310 |
+
p_k \right)
|
| 311 |
+
|
| 312 |
+
where
|
| 313 |
+
|
| 314 |
+
.. math ::
|
| 315 |
+
|
| 316 |
+
V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor}
|
| 317 |
+
\frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \,
|
| 318 |
+
\left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!}
|
| 319 |
+
|
| 320 |
+
As the degree increases, the abscissa (`p_k`) only increase
|
| 321 |
+
linearly towards `\infty`, but the Stehfest coefficients
|
| 322 |
+
(`V_k`) alternate in sign and increase rapidly in sign,
|
| 323 |
+
requiring high precision to prevent overflow or loss of
|
| 324 |
+
significance when evaluating the sum.
|
| 325 |
+
|
| 326 |
+
**References**
|
| 327 |
+
|
| 328 |
+
1. Widder, D. (1941). *The Laplace Transform*. Princeton.
|
| 329 |
+
2. Stehfest, H. (1970). Algorithm 368: numerical inversion of
|
| 330 |
+
Laplace transforms. *Communications of the ACM* 13(1):47-49,
|
| 331 |
+
http://dx.doi.org/10.1145/361953.361969
|
| 332 |
+
|
| 333 |
+
"""
|
| 334 |
+
|
| 335 |
+
# required
|
| 336 |
+
self.t = self.ctx.convert(t)
|
| 337 |
+
|
| 338 |
+
# assume fp was computed from p matrix returned from
|
| 339 |
+
# calc_laplace_parameter(), so is already
|
| 340 |
+
# a list or matrix of mpmath 'mpf' types
|
| 341 |
+
|
| 342 |
+
result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t
|
| 343 |
+
|
| 344 |
+
# setting dps back to value when calc_laplace_parameter was called
|
| 345 |
+
if not manual_prec:
|
| 346 |
+
self.ctx.dps = self.dps_orig
|
| 347 |
+
|
| 348 |
+
# ignore any small imaginary part
|
| 349 |
+
return result.real
|
| 350 |
+
|
| 351 |
+
|
| 352 |
+
# ****************************************
|
| 353 |
+
|
| 354 |
+
class deHoog(InverseLaplaceTransform):
|
| 355 |
+
|
| 356 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
| 357 |
+
r"""the de Hoog, Knight & Stokes algorithm is an
|
| 358 |
+
accelerated form of the Fourier series numerical
|
| 359 |
+
inverse Laplace transform algorithms.
|
| 360 |
+
|
| 361 |
+
.. math ::
|
| 362 |
+
|
| 363 |
+
p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1
|
| 364 |
+
|
| 365 |
+
where
|
| 366 |
+
|
| 367 |
+
.. math ::
|
| 368 |
+
|
| 369 |
+
\gamma = \alpha - \frac{\log \mathrm{tol}}{2T},
|
| 370 |
+
|
| 371 |
+
`j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time,
|
| 372 |
+
`\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the
|
| 373 |
+
rightmost pole or singularity, which is chosen based on the
|
| 374 |
+
desired accuracy (assuming the rightmost singularity is 0),
|
| 375 |
+
and `\mathrm{tol}=10\alpha` is the desired tolerance, which is
|
| 376 |
+
chosen in relation to `\alpha`.`
|
| 377 |
+
|
| 378 |
+
When increasing the degree, the abscissa increase towards
|
| 379 |
+
`j\infty`, but more slowly than the fixed Talbot
|
| 380 |
+
algorithm. The de Hoog et al. algorithm typically does better
|
| 381 |
+
with oscillatory functions of time, and less well-behaved
|
| 382 |
+
functions. The method tends to be slower than the Talbot and
|
| 383 |
+
Stehfest algorithsm, especially so at very high precision
|
| 384 |
+
(e.g., `>500` digits precision).
|
| 385 |
+
|
| 386 |
+
"""
|
| 387 |
+
|
| 388 |
+
# required
|
| 389 |
+
# ------------------------------
|
| 390 |
+
self.t = self.ctx.convert(t)
|
| 391 |
+
|
| 392 |
+
# optional
|
| 393 |
+
# ------------------------------
|
| 394 |
+
self.tmax = kwargs.get('tmax', self.t)
|
| 395 |
+
|
| 396 |
+
# empirical relationships used here based on a linear fit of
|
| 397 |
+
# requested and delivered dps for exponentially decaying time
|
| 398 |
+
# functions for requested dps up to 512.
|
| 399 |
+
|
| 400 |
+
if 'degree' in kwargs:
|
| 401 |
+
self.degree = kwargs['degree']
|
| 402 |
+
self.dps_goal = int(1.38*self.degree)
|
| 403 |
+
else:
|
| 404 |
+
self.dps_goal = int(self.ctx.dps*1.36)
|
| 405 |
+
self.degree = max(10, self.dps_goal)
|
| 406 |
+
|
| 407 |
+
# 2*M+1 terms in approximation
|
| 408 |
+
M = self.degree
|
| 409 |
+
|
| 410 |
+
# adjust alpha component of abscissa of convergence for higher
|
| 411 |
+
# precision
|
| 412 |
+
tmp = self.ctx.power(10.0, -self.dps_goal)
|
| 413 |
+
self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
|
| 414 |
+
|
| 415 |
+
# desired tolerance (here simply related to alpha)
|
| 416 |
+
self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0))
|
| 417 |
+
self.np = 2*self.degree+1 # number of terms in approximation
|
| 418 |
+
|
| 419 |
+
# this is adjusting the dps of the calling context
|
| 420 |
+
# hopefully the caller doesn't monkey around with it
|
| 421 |
+
# between calling this routine and calc_time_domain_solution()
|
| 422 |
+
self.dps_orig = self.ctx.dps
|
| 423 |
+
self.ctx.dps = self.dps_goal
|
| 424 |
+
|
| 425 |
+
# scaling factor (likely tun-able, but 2 is typical)
|
| 426 |
+
self.scale = kwargs.get('scale', 2)
|
| 427 |
+
self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax))
|
| 428 |
+
|
| 429 |
+
self.p = self.ctx.matrix(2*M+1, 1)
|
| 430 |
+
self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T)
|
| 431 |
+
self.p = (self.gamma + self.ctx.pi*
|
| 432 |
+
self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j)
|
| 433 |
+
|
| 434 |
+
# NB: p is complex (mpc)
|
| 435 |
+
|
| 436 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
| 437 |
+
r"""Calculate time-domain solution for
|
| 438 |
+
de Hoog, Knight & Stokes algorithm.
|
| 439 |
+
|
| 440 |
+
The un-accelerated Fourier series approach is:
|
| 441 |
+
|
| 442 |
+
.. math ::
|
| 443 |
+
|
| 444 |
+
f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'}
|
| 445 |
+
\Re\left[\bar{f}\left( p_k \right)
|
| 446 |
+
e^{i\pi t/T} \right],
|
| 447 |
+
|
| 448 |
+
where the prime on the summation indicates the first term is halved.
|
| 449 |
+
|
| 450 |
+
This simplistic approach requires so many function evaluations
|
| 451 |
+
that it is not practical. Non-linear acceleration is
|
| 452 |
+
accomplished via Pade-approximation and an analytic expression
|
| 453 |
+
for the remainder of the continued fraction. See the original
|
| 454 |
+
paper (reference 2 below) a detailed description of the
|
| 455 |
+
numerical approach.
|
| 456 |
+
|
| 457 |
+
**References**
|
| 458 |
+
|
| 459 |
+
1. Davies, B. (2005). *Integral Transforms and their
|
| 460 |
+
Applications*, Third Edition. Springer.
|
| 461 |
+
2. de Hoog, F., J. Knight, A. Stokes (1982). An improved
|
| 462 |
+
method for numerical inversion of Laplace transforms. *SIAM
|
| 463 |
+
Journal of Scientific and Statistical Computing* 3:357-366,
|
| 464 |
+
http://dx.doi.org/10.1137/0903022
|
| 465 |
+
|
| 466 |
+
"""
|
| 467 |
+
|
| 468 |
+
M = self.degree
|
| 469 |
+
np = self.np
|
| 470 |
+
T = self.T
|
| 471 |
+
|
| 472 |
+
self.t = self.ctx.convert(t)
|
| 473 |
+
|
| 474 |
+
# would it be useful to try re-using
|
| 475 |
+
# space between e&q and A&B?
|
| 476 |
+
e = self.ctx.zeros(np, M+1)
|
| 477 |
+
q = self.ctx.matrix(2*M, M)
|
| 478 |
+
d = self.ctx.matrix(np, 1)
|
| 479 |
+
A = self.ctx.zeros(np+1, 1)
|
| 480 |
+
B = self.ctx.ones(np+1, 1)
|
| 481 |
+
|
| 482 |
+
# initialize Q-D table
|
| 483 |
+
e[:, 0] = 0.0 + 0j
|
| 484 |
+
q[0, 0] = fp[1]/(fp[0]/2)
|
| 485 |
+
for i in range(1, 2*M):
|
| 486 |
+
q[i, 0] = fp[i+1]/fp[i]
|
| 487 |
+
|
| 488 |
+
# rhombus rule for filling triangular Q-D table (e & q)
|
| 489 |
+
for r in range(1, M+1):
|
| 490 |
+
# start with e, column 1, 0:2*M-2
|
| 491 |
+
mr = 2*(M-r) + 1
|
| 492 |
+
e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1]
|
| 493 |
+
if not r == M:
|
| 494 |
+
rq = r+1
|
| 495 |
+
mr = 2*(M-rq)+1 + 2
|
| 496 |
+
for i in range(mr):
|
| 497 |
+
q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1]
|
| 498 |
+
|
| 499 |
+
# build up continued fraction coefficients (d)
|
| 500 |
+
d[0] = fp[0]/2
|
| 501 |
+
for r in range(1, M+1):
|
| 502 |
+
d[2*r-1] = -q[0, r-1] # even terms
|
| 503 |
+
d[2*r] = -e[0, r] # odd terms
|
| 504 |
+
|
| 505 |
+
# seed A and B for recurrence
|
| 506 |
+
A[0] = 0.0 + 0.0j
|
| 507 |
+
A[1] = d[0]
|
| 508 |
+
B[0:2] = 1.0 + 0.0j
|
| 509 |
+
|
| 510 |
+
# base of the power series
|
| 511 |
+
z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn
|
| 512 |
+
|
| 513 |
+
# coefficients of Pade approximation (A & B)
|
| 514 |
+
# using recurrence for all but last term
|
| 515 |
+
for i in range(1, 2*M):
|
| 516 |
+
A[i+1] = A[i] + d[i]*A[i-1]*z
|
| 517 |
+
B[i+1] = B[i] + d[i]*B[i-1]*z
|
| 518 |
+
|
| 519 |
+
# "improved remainder" to continued fraction
|
| 520 |
+
brem = (1 + (d[2*M-1] - d[2*M])*z)/2
|
| 521 |
+
# powm1(x,y) computes x^y - 1 more accurately near zero
|
| 522 |
+
rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem,
|
| 523 |
+
self.ctx.fraction(1, 2))
|
| 524 |
+
|
| 525 |
+
# last term of recurrence using new remainder
|
| 526 |
+
A[np] = A[2*M] + rem*A[2*M-1]
|
| 527 |
+
B[np] = B[2*M] + rem*B[2*M-1]
|
| 528 |
+
|
| 529 |
+
# diagonal Pade approximation
|
| 530 |
+
# F=A/B represents accelerated trapezoid rule
|
| 531 |
+
result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real
|
| 532 |
+
|
| 533 |
+
# setting dps back to value when calc_laplace_parameter was called
|
| 534 |
+
if not manual_prec:
|
| 535 |
+
self.ctx.dps = self.dps_orig
|
| 536 |
+
|
| 537 |
+
return result
|
| 538 |
+
|
| 539 |
+
|
| 540 |
+
# ****************************************
|
| 541 |
+
|
| 542 |
+
class Cohen(InverseLaplaceTransform):
|
| 543 |
+
|
| 544 |
+
def calc_laplace_parameter(self, t, **kwargs):
|
| 545 |
+
r"""The Cohen algorithm accelerates the convergence of the nearly
|
| 546 |
+
alternating series resulting from the application of the trapezoidal
|
| 547 |
+
rule to the Bromwich contour inversion integral.
|
| 548 |
+
|
| 549 |
+
.. math ::
|
| 550 |
+
|
| 551 |
+
p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M
|
| 552 |
+
|
| 553 |
+
where
|
| 554 |
+
|
| 555 |
+
.. math ::
|
| 556 |
+
|
| 557 |
+
\gamma = \frac{2}{3} (d + \log(10) + \log(2 t)),
|
| 558 |
+
|
| 559 |
+
`d = \mathrm{dps\_goal}`, which is chosen based on the desired
|
| 560 |
+
accuracy using the method developed in [1] to improve numerical
|
| 561 |
+
stability. The Cohen algorithm shows robustness similar to the de Hoog
|
| 562 |
+
et al. algorithm, but it is faster than the fixed Talbot algorithm.
|
| 563 |
+
|
| 564 |
+
**Optional arguments**
|
| 565 |
+
|
| 566 |
+
*degree*
|
| 567 |
+
integer order of the approximation (M = number of terms)
|
| 568 |
+
*alpha*
|
| 569 |
+
abscissa for `p_0` (controls the discretization error)
|
| 570 |
+
|
| 571 |
+
The working precision will be increased according to a rule of
|
| 572 |
+
thumb. If 'degree' is not specified, the working precision and
|
| 573 |
+
degree are chosen to hopefully achieve the dps of the calling
|
| 574 |
+
context. If 'degree' is specified, the working precision is
|
| 575 |
+
chosen to achieve maximum resulting precision for the
|
| 576 |
+
specified degree.
|
| 577 |
+
|
| 578 |
+
**References**
|
| 579 |
+
|
| 580 |
+
1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss
|
| 581 |
+
distribution in the Gaussian copula model: a comparison of methods.
|
| 582 |
+
*Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057
|
| 583 |
+
|
| 584 |
+
"""
|
| 585 |
+
self.t = self.ctx.convert(t)
|
| 586 |
+
|
| 587 |
+
if 'degree' in kwargs:
|
| 588 |
+
self.degree = kwargs['degree']
|
| 589 |
+
self.dps_goal = int(1.5 * self.degree)
|
| 590 |
+
else:
|
| 591 |
+
self.dps_goal = int(self.ctx.dps * 1.74)
|
| 592 |
+
self.degree = max(22, int(1.31 * self.dps_goal))
|
| 593 |
+
|
| 594 |
+
M = self.degree + 1
|
| 595 |
+
|
| 596 |
+
# this is adjusting the dps of the calling context hopefully
|
| 597 |
+
# the caller doesn't monkey around with it between calling
|
| 598 |
+
# this routine and calc_time_domain_solution()
|
| 599 |
+
self.dps_orig = self.ctx.dps
|
| 600 |
+
self.ctx.dps = self.dps_goal
|
| 601 |
+
|
| 602 |
+
ttwo = 2 * self.t
|
| 603 |
+
tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo)
|
| 604 |
+
tmp = self.ctx.fraction(2, 3) * tmp
|
| 605 |
+
self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
|
| 606 |
+
|
| 607 |
+
# all but time-dependent part of p
|
| 608 |
+
a_t = self.alpha / ttwo
|
| 609 |
+
p_t = self.ctx.pi * 1j / self.t
|
| 610 |
+
|
| 611 |
+
self.p = self.ctx.matrix(M, 1)
|
| 612 |
+
self.p[0] = a_t
|
| 613 |
+
|
| 614 |
+
for i in range(1, M):
|
| 615 |
+
self.p[i] = a_t + i * p_t
|
| 616 |
+
|
| 617 |
+
def calc_time_domain_solution(self, fp, t, manual_prec=False):
|
| 618 |
+
r"""Calculate time-domain solution for Cohen algorithm.
|
| 619 |
+
|
| 620 |
+
The accelerated nearly alternating series is:
|
| 621 |
+
|
| 622 |
+
.. math ::
|
| 623 |
+
|
| 624 |
+
f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2}
|
| 625 |
+
\Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) -
|
| 626 |
+
\sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f}
|
| 627 |
+
\left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right],
|
| 628 |
+
|
| 629 |
+
where coefficients `\frac{c_{M, k}}{d_M}` are described in [1].
|
| 630 |
+
|
| 631 |
+
1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence
|
| 632 |
+
acceleration of alternating series. *Experiment. Math* 9(1):3-12
|
| 633 |
+
|
| 634 |
+
"""
|
| 635 |
+
self.t = self.ctx.convert(t)
|
| 636 |
+
|
| 637 |
+
n = self.degree
|
| 638 |
+
M = n + 1
|
| 639 |
+
|
| 640 |
+
A = self.ctx.matrix(M, 1)
|
| 641 |
+
for i in range(M):
|
| 642 |
+
A[i] = fp[i].real
|
| 643 |
+
|
| 644 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
| 645 |
+
d = (d + 1 / d) / 2
|
| 646 |
+
b = -self.ctx.one
|
| 647 |
+
c = -d
|
| 648 |
+
s = 0
|
| 649 |
+
|
| 650 |
+
for k in range(n):
|
| 651 |
+
c = b - c
|
| 652 |
+
s = s + c * A[k + 1]
|
| 653 |
+
b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
|
| 654 |
+
|
| 655 |
+
result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d)
|
| 656 |
+
|
| 657 |
+
# setting dps back to value when calc_laplace_parameter was
|
| 658 |
+
# called, unless flag is set.
|
| 659 |
+
if not manual_prec:
|
| 660 |
+
self.ctx.dps = self.dps_orig
|
| 661 |
+
|
| 662 |
+
return result
|
| 663 |
+
|
| 664 |
+
|
| 665 |
+
# ****************************************
|
| 666 |
+
|
| 667 |
+
class LaplaceTransformInversionMethods(object):
|
| 668 |
+
def __init__(ctx, *args, **kwargs):
|
| 669 |
+
ctx._fixed_talbot = FixedTalbot(ctx)
|
| 670 |
+
ctx._stehfest = Stehfest(ctx)
|
| 671 |
+
ctx._de_hoog = deHoog(ctx)
|
| 672 |
+
ctx._cohen = Cohen(ctx)
|
| 673 |
+
|
| 674 |
+
def invertlaplace(ctx, f, t, **kwargs):
|
| 675 |
+
r"""Computes the numerical inverse Laplace transform for a
|
| 676 |
+
Laplace-space function at a given time. The function being
|
| 677 |
+
evaluated is assumed to be a real-valued function of time.
|
| 678 |
+
|
| 679 |
+
The user must supply a Laplace-space function `\bar{f}(p)`,
|
| 680 |
+
and a desired time at which to estimate the time-domain
|
| 681 |
+
solution `f(t)`.
|
| 682 |
+
|
| 683 |
+
A few basic examples of Laplace-space functions with known
|
| 684 |
+
inverses (see references [1,2]) :
|
| 685 |
+
|
| 686 |
+
.. math ::
|
| 687 |
+
|
| 688 |
+
\mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p)
|
| 689 |
+
|
| 690 |
+
.. math ::
|
| 691 |
+
|
| 692 |
+
\mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t)
|
| 693 |
+
|
| 694 |
+
.. math ::
|
| 695 |
+
|
| 696 |
+
\bar{f}(p) = \frac{1}{(p+1)^2}
|
| 697 |
+
|
| 698 |
+
.. math ::
|
| 699 |
+
|
| 700 |
+
f(t) = t e^{-t}
|
| 701 |
+
|
| 702 |
+
>>> from mpmath import *
|
| 703 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 704 |
+
>>> tt = [0.001, 0.01, 0.1, 1, 10]
|
| 705 |
+
>>> fp = lambda p: 1/(p+1)**2
|
| 706 |
+
>>> ft = lambda t: t*exp(-t)
|
| 707 |
+
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot')
|
| 708 |
+
(0.000999000499833375, 8.57923043561212e-20)
|
| 709 |
+
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot')
|
| 710 |
+
(0.00990049833749168, 3.27007646698047e-19)
|
| 711 |
+
>>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot')
|
| 712 |
+
(0.090483741803596, -1.75215800052168e-18)
|
| 713 |
+
>>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot')
|
| 714 |
+
(0.367879441171442, 1.2428864009344e-17)
|
| 715 |
+
>>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot')
|
| 716 |
+
(0.000453999297624849, 4.04513489306658e-20)
|
| 717 |
+
|
| 718 |
+
The methods also work for higher precision:
|
| 719 |
+
|
| 720 |
+
>>> mp.dps = 100; mp.pretty = True
|
| 721 |
+
>>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15)
|
| 722 |
+
('0.000999000499833375', '-4.96868310693356e-105')
|
| 723 |
+
>>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15)
|
| 724 |
+
('0.00990049833749168', '1.23032291513122e-104')
|
| 725 |
+
|
| 726 |
+
.. math ::
|
| 727 |
+
|
| 728 |
+
\bar{f}(p) = \frac{1}{p^2+1}
|
| 729 |
+
|
| 730 |
+
.. math ::
|
| 731 |
+
|
| 732 |
+
f(t) = \mathrm{J}_0(t)
|
| 733 |
+
|
| 734 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 735 |
+
>>> fp = lambda p: 1/sqrt(p*p + 1)
|
| 736 |
+
>>> ft = lambda t: besselj(0,t)
|
| 737 |
+
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog')
|
| 738 |
+
(0.999999750000016, -6.09717765032273e-18)
|
| 739 |
+
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog')
|
| 740 |
+
(0.99997500015625, -5.61756281076169e-17)
|
| 741 |
+
|
| 742 |
+
.. math ::
|
| 743 |
+
|
| 744 |
+
\bar{f}(p) = \frac{\log p}{p}
|
| 745 |
+
|
| 746 |
+
.. math ::
|
| 747 |
+
|
| 748 |
+
f(t) = -\gamma -\log t
|
| 749 |
+
|
| 750 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 751 |
+
>>> fp = lambda p: log(p)/p
|
| 752 |
+
>>> ft = lambda t: -euler-log(t)
|
| 753 |
+
>>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest')
|
| 754 |
+
(6.3305396140806, -1.92126634837863e-16)
|
| 755 |
+
>>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest')
|
| 756 |
+
(4.02795452108656, -4.81486093200704e-16)
|
| 757 |
+
|
| 758 |
+
**Options**
|
| 759 |
+
|
| 760 |
+
:func:`~mpmath.invertlaplace` recognizes the following optional
|
| 761 |
+
keywords valid for all methods:
|
| 762 |
+
|
| 763 |
+
*method*
|
| 764 |
+
Chooses numerical inverse Laplace transform algorithm
|
| 765 |
+
(described below).
|
| 766 |
+
*degree*
|
| 767 |
+
Number of terms used in the approximation
|
| 768 |
+
|
| 769 |
+
**Algorithms**
|
| 770 |
+
|
| 771 |
+
Mpmath implements four numerical inverse Laplace transform
|
| 772 |
+
algorithms, attributed to: Talbot, Stehfest, and de Hoog,
|
| 773 |
+
Knight and Stokes. These can be selected by using
|
| 774 |
+
*method='talbot'*, *method='stehfest'*, *method='dehoog'* or
|
| 775 |
+
*method='cohen'* or by passing the classes *method=FixedTalbot*,
|
| 776 |
+
*method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions
|
| 777 |
+
:func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`,
|
| 778 |
+
:func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen`
|
| 779 |
+
are also available as shortcuts.
|
| 780 |
+
|
| 781 |
+
All four algorithms implement a heuristic balance between the
|
| 782 |
+
requested precision and the precision used internally for the
|
| 783 |
+
calculations. This has been tuned for a typical exponentially
|
| 784 |
+
decaying function and precision up to few hundred decimal
|
| 785 |
+
digits.
|
| 786 |
+
|
| 787 |
+
The Laplace transform converts the variable time (i.e., along
|
| 788 |
+
a line) into a parameter given by the right half of the
|
| 789 |
+
complex `p`-plane. Singularities, poles, and branch cuts in
|
| 790 |
+
the complex `p`-plane contain all the information regarding
|
| 791 |
+
the time behavior of the corresponding function. Any numerical
|
| 792 |
+
method must therefore sample `p`-plane "close enough" to the
|
| 793 |
+
singularities to accurately characterize them, while not
|
| 794 |
+
getting too close to have catastrophic cancellation, overflow,
|
| 795 |
+
or underflow issues. Most significantly, if one or more of the
|
| 796 |
+
singularities in the `p`-plane is not on the left side of the
|
| 797 |
+
Bromwich contour, its effects will be left out of the computed
|
| 798 |
+
solution, and the answer will be completely wrong.
|
| 799 |
+
|
| 800 |
+
*Talbot*
|
| 801 |
+
|
| 802 |
+
The fixed Talbot method is high accuracy and fast, but the
|
| 803 |
+
method can catastrophically fail for certain classes of time-domain
|
| 804 |
+
behavior, including a Heaviside step function for positive
|
| 805 |
+
time (e.g., `H(t-2)`), or some oscillatory behaviors. The
|
| 806 |
+
Talbot method usually has adjustable parameters, but the
|
| 807 |
+
"fixed" variety implemented here does not. This method
|
| 808 |
+
deforms the Bromwich integral contour in the shape of a
|
| 809 |
+
parabola towards `-\infty`, which leads to problems
|
| 810 |
+
when the solution has a decaying exponential in it (e.g., a
|
| 811 |
+
Heaviside step function is equivalent to multiplying by a
|
| 812 |
+
decaying exponential in Laplace space).
|
| 813 |
+
|
| 814 |
+
*Stehfest*
|
| 815 |
+
|
| 816 |
+
The Stehfest algorithm only uses abscissa along the real axis
|
| 817 |
+
of the complex `p`-plane to estimate the time-domain
|
| 818 |
+
function. Oscillatory time-domain functions have poles away
|
| 819 |
+
from the real axis, so this method does not work well with
|
| 820 |
+
oscillatory functions, especially high-frequency ones. This
|
| 821 |
+
method also depends on summation of terms in a series that
|
| 822 |
+
grows very large, and will have catastrophic cancellation
|
| 823 |
+
during summation if the working precision is too low.
|
| 824 |
+
|
| 825 |
+
*de Hoog et al.*
|
| 826 |
+
|
| 827 |
+
The de Hoog, Knight, and Stokes method is essentially a
|
| 828 |
+
Fourier-series quadrature-type approximation to the Bromwich
|
| 829 |
+
contour integral, with non-linear series acceleration and an
|
| 830 |
+
analytical expression for the remainder term. This method is
|
| 831 |
+
typically one of the most robust. This method also involves the
|
| 832 |
+
greatest amount of overhead, so it is typically the slowest of the
|
| 833 |
+
four methods at high precision.
|
| 834 |
+
|
| 835 |
+
*Cohen*
|
| 836 |
+
|
| 837 |
+
The Cohen method is a trapezoidal rule approximation to the Bromwich
|
| 838 |
+
contour integral, with linear acceleration for alternating
|
| 839 |
+
series. This method is as robust as the de Hoog et al method and the
|
| 840 |
+
fastest of the four methods at high precision, and is therefore the
|
| 841 |
+
default method.
|
| 842 |
+
|
| 843 |
+
**Singularities**
|
| 844 |
+
|
| 845 |
+
All numerical inverse Laplace transform methods have problems
|
| 846 |
+
at large time when the Laplace-space function has poles,
|
| 847 |
+
singularities, or branch cuts to the right of the origin in
|
| 848 |
+
the complex plane. For simple poles in `\bar{f}(p)` at the
|
| 849 |
+
`p`-plane origin, the time function is constant in time (e.g.,
|
| 850 |
+
`\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at
|
| 851 |
+
`p=0`). A pole in `\bar{f}(p)` to the left of the origin is a
|
| 852 |
+
decreasing function of time (e.g., `\mathcal{L}\left\lbrace
|
| 853 |
+
e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and
|
| 854 |
+
a pole to the right of the origin leads to an increasing
|
| 855 |
+
function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4}
|
| 856 |
+
\right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When
|
| 857 |
+
singularities occur off the real `p` axis, the time-domain
|
| 858 |
+
function is oscillatory. For example `\mathcal{L}\left\lbrace
|
| 859 |
+
\mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut
|
| 860 |
+
starting at `p=j=\sqrt{-1}` and is a decaying oscillatory
|
| 861 |
+
function, This range of behaviors is illustrated in Duffy [3]
|
| 862 |
+
Figure 4.10.4, p. 228.
|
| 863 |
+
|
| 864 |
+
In general as `p \rightarrow \infty` `t \rightarrow 0` and
|
| 865 |
+
vice-versa. All numerical inverse Laplace transform methods
|
| 866 |
+
require their abscissa to shift closer to the origin for
|
| 867 |
+
larger times. If the abscissa shift left of the rightmost
|
| 868 |
+
singularity in the Laplace domain, the answer will be
|
| 869 |
+
completely wrong (the effect of singularities to the right of
|
| 870 |
+
the Bromwich contour are not included in the results).
|
| 871 |
+
|
| 872 |
+
For example, the following exponentially growing function has
|
| 873 |
+
a pole at `p=3`:
|
| 874 |
+
|
| 875 |
+
.. math ::
|
| 876 |
+
|
| 877 |
+
\bar{f}(p)=\frac{1}{p^2-9}
|
| 878 |
+
|
| 879 |
+
.. math ::
|
| 880 |
+
|
| 881 |
+
f(t)=\frac{1}{3}\sinh 3t
|
| 882 |
+
|
| 883 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 884 |
+
>>> fp = lambda p: 1/(p*p-9)
|
| 885 |
+
>>> ft = lambda t: sinh(3*t)/3
|
| 886 |
+
>>> tt = [0.01,0.1,1.0,10.0]
|
| 887 |
+
>>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot')
|
| 888 |
+
(0.0100015000675014, 0.0100015000675014)
|
| 889 |
+
>>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot')
|
| 890 |
+
(0.101506764482381, 0.101506764482381)
|
| 891 |
+
>>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot')
|
| 892 |
+
(3.33929164246997, 3.33929164246997)
|
| 893 |
+
>>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot')
|
| 894 |
+
(1781079096920.74, -1.61331069624091e-14)
|
| 895 |
+
|
| 896 |
+
**References**
|
| 897 |
+
|
| 898 |
+
1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4)
|
| 899 |
+
2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform
|
| 900 |
+
Inversion, Springer.
|
| 901 |
+
3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press.
|
| 902 |
+
|
| 903 |
+
**Numerical Inverse Laplace Transform Reviews**
|
| 904 |
+
|
| 905 |
+
1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical
|
| 906 |
+
inversion of the Laplace transform: Applications to Biology,
|
| 907 |
+
Economics, Engineering, and Physics*. Elsevier.
|
| 908 |
+
2. Davies, B., B. Martin (1979). Numerical inversion of the
|
| 909 |
+
Laplace transform: a survey and comparison of methods. *Journal
|
| 910 |
+
of Computational Physics* 33:1-32,
|
| 911 |
+
http://dx.doi.org/10.1016/0021-9991(79)90025-1
|
| 912 |
+
3. Duffy, D.G. (1993). On the numerical inversion of Laplace
|
| 913 |
+
transforms: Comparison of three new methods on characteristic
|
| 914 |
+
problems from applications. *ACM Transactions on Mathematical
|
| 915 |
+
Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788
|
| 916 |
+
4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform
|
| 917 |
+
Algorithms for Laplace-Space Numerical Approaches, *Numerical
|
| 918 |
+
Algorithms*, 63(2):339-355.
|
| 919 |
+
http://dx.doi.org/10.1007/s11075-012-9625-3
|
| 920 |
+
|
| 921 |
+
"""
|
| 922 |
+
|
| 923 |
+
rule = kwargs.get('method', 'cohen')
|
| 924 |
+
if type(rule) is str:
|
| 925 |
+
lrule = rule.lower()
|
| 926 |
+
if lrule == 'talbot':
|
| 927 |
+
rule = ctx._fixed_talbot
|
| 928 |
+
elif lrule == 'stehfest':
|
| 929 |
+
rule = ctx._stehfest
|
| 930 |
+
elif lrule == 'dehoog':
|
| 931 |
+
rule = ctx._de_hoog
|
| 932 |
+
elif rule == 'cohen':
|
| 933 |
+
rule = ctx._cohen
|
| 934 |
+
else:
|
| 935 |
+
raise ValueError("unknown invlap algorithm: %s" % rule)
|
| 936 |
+
else:
|
| 937 |
+
rule = rule(ctx)
|
| 938 |
+
|
| 939 |
+
# determine the vector of Laplace-space parameter
|
| 940 |
+
# needed for the requested method and desired time
|
| 941 |
+
rule.calc_laplace_parameter(t, **kwargs)
|
| 942 |
+
|
| 943 |
+
# compute the Laplace-space function evalutations
|
| 944 |
+
# at the required abscissa.
|
| 945 |
+
fp = [f(p) for p in rule.p]
|
| 946 |
+
|
| 947 |
+
# compute the time-domain solution from the
|
| 948 |
+
# Laplace-space function evaluations
|
| 949 |
+
return rule.calc_time_domain_solution(fp, t)
|
| 950 |
+
|
| 951 |
+
# shortcuts for the above function for specific methods
|
| 952 |
+
def invlaptalbot(ctx, *args, **kwargs):
|
| 953 |
+
kwargs['method'] = 'talbot'
|
| 954 |
+
return ctx.invertlaplace(*args, **kwargs)
|
| 955 |
+
|
| 956 |
+
def invlapstehfest(ctx, *args, **kwargs):
|
| 957 |
+
kwargs['method'] = 'stehfest'
|
| 958 |
+
return ctx.invertlaplace(*args, **kwargs)
|
| 959 |
+
|
| 960 |
+
def invlapdehoog(ctx, *args, **kwargs):
|
| 961 |
+
kwargs['method'] = 'dehoog'
|
| 962 |
+
return ctx.invertlaplace(*args, **kwargs)
|
| 963 |
+
|
| 964 |
+
def invlapcohen(ctx, *args, **kwargs):
|
| 965 |
+
kwargs['method'] = 'cohen'
|
| 966 |
+
return ctx.invertlaplace(*args, **kwargs)
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
# ****************************************
|
| 970 |
+
|
| 971 |
+
if __name__ == '__main__':
|
| 972 |
+
import doctest
|
| 973 |
+
doctest.testmod()
|
vllm/lib/python3.10/site-packages/mpmath/calculus/odes.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bisect import bisect
|
| 2 |
+
from ..libmp.backend import xrange
|
| 3 |
+
|
| 4 |
+
class ODEMethods(object):
|
| 5 |
+
pass
|
| 6 |
+
|
| 7 |
+
def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
|
| 8 |
+
h = tol = ctx.ldexp(1, -tol_prec)
|
| 9 |
+
dim = len(y0)
|
| 10 |
+
xs = [x0]
|
| 11 |
+
ys = [y0]
|
| 12 |
+
x = x0
|
| 13 |
+
y = y0
|
| 14 |
+
orig = ctx.prec
|
| 15 |
+
try:
|
| 16 |
+
ctx.prec = orig*(1+n)
|
| 17 |
+
# Use n steps with Euler's method to get
|
| 18 |
+
# evaluation points for derivatives
|
| 19 |
+
for i in range(n):
|
| 20 |
+
fxy = derivs(x, y)
|
| 21 |
+
y = [y[i]+h*fxy[i] for i in xrange(len(y))]
|
| 22 |
+
x += h
|
| 23 |
+
xs.append(x)
|
| 24 |
+
ys.append(y)
|
| 25 |
+
# Compute derivatives
|
| 26 |
+
ser = [[] for d in range(dim)]
|
| 27 |
+
for j in range(n+1):
|
| 28 |
+
s = [0]*dim
|
| 29 |
+
b = (-1) ** (j & 1)
|
| 30 |
+
k = 1
|
| 31 |
+
for i in range(j+1):
|
| 32 |
+
for d in range(dim):
|
| 33 |
+
s[d] += b * ys[i][d]
|
| 34 |
+
b = (b * (j-k+1)) // (-k)
|
| 35 |
+
k += 1
|
| 36 |
+
scale = h**(-j) / ctx.fac(j)
|
| 37 |
+
for d in range(dim):
|
| 38 |
+
s[d] = s[d] * scale
|
| 39 |
+
ser[d].append(s[d])
|
| 40 |
+
finally:
|
| 41 |
+
ctx.prec = orig
|
| 42 |
+
# Estimate radius for which we can get full accuracy.
|
| 43 |
+
# XXX: do this right for zeros
|
| 44 |
+
radius = ctx.one
|
| 45 |
+
for ts in ser:
|
| 46 |
+
if ts[-1]:
|
| 47 |
+
radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
|
| 48 |
+
radius /= 2 # XXX
|
| 49 |
+
return ser, x0+radius
|
| 50 |
+
|
| 51 |
+
def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
|
| 52 |
+
r"""
|
| 53 |
+
Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
|
| 54 |
+
that is a numerical solution of the `n+1`-dimensional first-order
|
| 55 |
+
ordinary differential equation (ODE) system
|
| 56 |
+
|
| 57 |
+
.. math ::
|
| 58 |
+
|
| 59 |
+
y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 60 |
+
|
| 61 |
+
y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 62 |
+
|
| 63 |
+
\vdots
|
| 64 |
+
|
| 65 |
+
y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 66 |
+
|
| 67 |
+
The derivatives are specified by the vector-valued function
|
| 68 |
+
*F* that evaluates
|
| 69 |
+
`[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
|
| 70 |
+
The initial point `x_0` is specified by the scalar argument *x0*,
|
| 71 |
+
and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
|
| 72 |
+
specified by the vector argument *y0*.
|
| 73 |
+
|
| 74 |
+
For convenience, if the system is one-dimensional, you may optionally
|
| 75 |
+
provide just a scalar value for *y0*. In this case, *F* should accept
|
| 76 |
+
a scalar *y* argument and return a scalar. The solution function
|
| 77 |
+
*y* will return scalar values instead of length-1 vectors.
|
| 78 |
+
|
| 79 |
+
Evaluation of the solution function `y(x)` is permitted
|
| 80 |
+
for any `x \ge x_0`.
|
| 81 |
+
|
| 82 |
+
A high-order ODE can be solved by transforming it into first-order
|
| 83 |
+
vector form. This transformation is described in standard texts
|
| 84 |
+
on ODEs. Examples will also be given below.
|
| 85 |
+
|
| 86 |
+
**Options, speed and accuracy**
|
| 87 |
+
|
| 88 |
+
By default, :func:`~mpmath.odefun` uses a high-order Taylor series
|
| 89 |
+
method. For reasonably well-behaved problems, the solution will
|
| 90 |
+
be fully accurate to within the working precision. Note that
|
| 91 |
+
*F* must be possible to evaluate to very high precision
|
| 92 |
+
for the generation of Taylor series to work.
|
| 93 |
+
|
| 94 |
+
To get a faster but less accurate solution, you can set a large
|
| 95 |
+
value for *tol* (which defaults roughly to *eps*). If you just
|
| 96 |
+
want to plot the solution or perform a basic simulation,
|
| 97 |
+
*tol = 0.01* is likely sufficient.
|
| 98 |
+
|
| 99 |
+
The *degree* argument controls the degree of the solver (with
|
| 100 |
+
*method='taylor'*, this is the degree of the Taylor series
|
| 101 |
+
expansion). A higher degree means that a longer step can be taken
|
| 102 |
+
before a new local solution must be generated from *F*,
|
| 103 |
+
meaning that fewer steps are required to get from `x_0` to a given
|
| 104 |
+
`x_1`. On the other hand, a higher degree also means that each
|
| 105 |
+
local solution becomes more expensive (i.e., more evaluations of
|
| 106 |
+
*F* are required per step, and at higher precision).
|
| 107 |
+
|
| 108 |
+
The optimal setting therefore involves a tradeoff. Generally,
|
| 109 |
+
decreasing the *degree* for Taylor series is likely to give faster
|
| 110 |
+
solution at low precision, while increasing is likely to be better
|
| 111 |
+
at higher precision.
|
| 112 |
+
|
| 113 |
+
The function
|
| 114 |
+
object returned by :func:`~mpmath.odefun` caches the solutions at all step
|
| 115 |
+
points and uses polynomial interpolation between step points.
|
| 116 |
+
Therefore, once `y(x_1)` has been evaluated for some `x_1`,
|
| 117 |
+
`y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
|
| 118 |
+
and continuing the evaluation up to `x_2 > x_1` is also fast.
|
| 119 |
+
|
| 120 |
+
**Examples of first-order ODEs**
|
| 121 |
+
|
| 122 |
+
We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
|
| 123 |
+
which has explicit solution `y(x) = \exp(x)`::
|
| 124 |
+
|
| 125 |
+
>>> from mpmath import *
|
| 126 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 127 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
| 128 |
+
>>> for x in [0, 1, 2.5]:
|
| 129 |
+
... print((f(x), exp(x)))
|
| 130 |
+
...
|
| 131 |
+
(1.0, 1.0)
|
| 132 |
+
(2.71828182845905, 2.71828182845905)
|
| 133 |
+
(12.1824939607035, 12.1824939607035)
|
| 134 |
+
|
| 135 |
+
The solution with high precision::
|
| 136 |
+
|
| 137 |
+
>>> mp.dps = 50
|
| 138 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
| 139 |
+
>>> f(1)
|
| 140 |
+
2.7182818284590452353602874713526624977572470937
|
| 141 |
+
>>> exp(1)
|
| 142 |
+
2.7182818284590452353602874713526624977572470937
|
| 143 |
+
|
| 144 |
+
Using the more general vectorized form, the test problem
|
| 145 |
+
can be input as (note that *f* returns a 1-element vector)::
|
| 146 |
+
|
| 147 |
+
>>> mp.dps = 15
|
| 148 |
+
>>> f = odefun(lambda x, y: [y[0]], 0, [1])
|
| 149 |
+
>>> f(1)
|
| 150 |
+
[2.71828182845905]
|
| 151 |
+
|
| 152 |
+
:func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
|
| 153 |
+
impossible (and at best difficult) to solve analytically. As
|
| 154 |
+
an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
|
| 155 |
+
for `y(0) = \pi/2`. An exact solution happens to be known
|
| 156 |
+
for this problem, and is given by
|
| 157 |
+
`y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
|
| 158 |
+
|
| 159 |
+
>>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
|
| 160 |
+
>>> for x in [2, 5, 10]:
|
| 161 |
+
... print((f(x), 2*atan(exp(mpf(x)**2/2))))
|
| 162 |
+
...
|
| 163 |
+
(2.87255666284091, 2.87255666284091)
|
| 164 |
+
(3.14158520028345, 3.14158520028345)
|
| 165 |
+
(3.14159265358979, 3.14159265358979)
|
| 166 |
+
|
| 167 |
+
If `F` is independent of `y`, an ODE can be solved using direct
|
| 168 |
+
integration. We can therefore obtain a reference solution with
|
| 169 |
+
:func:`~mpmath.quad`::
|
| 170 |
+
|
| 171 |
+
>>> f = lambda x: (1+x**2)/(1+x**3)
|
| 172 |
+
>>> g = odefun(lambda x, y: f(x), pi, 0)
|
| 173 |
+
>>> g(2*pi)
|
| 174 |
+
0.72128263801696
|
| 175 |
+
>>> quad(f, [pi, 2*pi])
|
| 176 |
+
0.72128263801696
|
| 177 |
+
|
| 178 |
+
**Examples of second-order ODEs**
|
| 179 |
+
|
| 180 |
+
We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
|
| 181 |
+
To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
|
| 182 |
+
whereby the original equation can be written as `y_1' + y_0' = 0`. Put
|
| 183 |
+
together, we get the first-order, two-dimensional vector ODE
|
| 184 |
+
|
| 185 |
+
.. math ::
|
| 186 |
+
|
| 187 |
+
\begin{cases}
|
| 188 |
+
y_0' = y_1 \\
|
| 189 |
+
y_1' = -y_0
|
| 190 |
+
\end{cases}
|
| 191 |
+
|
| 192 |
+
To get a well-defined IVP, we need two initial values. With
|
| 193 |
+
`y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
|
| 194 |
+
course be solved by `y(x) = y_0(x) = \cos(x)` and
|
| 195 |
+
`-y'(x) = y_1(x) = \sin(x)`. We check this::
|
| 196 |
+
|
| 197 |
+
>>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
|
| 198 |
+
>>> for x in [0, 1, 2.5, 10]:
|
| 199 |
+
... nprint(f(x), 15)
|
| 200 |
+
... nprint([cos(x), sin(x)], 15)
|
| 201 |
+
... print("---")
|
| 202 |
+
...
|
| 203 |
+
[1.0, 0.0]
|
| 204 |
+
[1.0, 0.0]
|
| 205 |
+
---
|
| 206 |
+
[0.54030230586814, 0.841470984807897]
|
| 207 |
+
[0.54030230586814, 0.841470984807897]
|
| 208 |
+
---
|
| 209 |
+
[-0.801143615546934, 0.598472144103957]
|
| 210 |
+
[-0.801143615546934, 0.598472144103957]
|
| 211 |
+
---
|
| 212 |
+
[-0.839071529076452, -0.54402111088937]
|
| 213 |
+
[-0.839071529076452, -0.54402111088937]
|
| 214 |
+
---
|
| 215 |
+
|
| 216 |
+
Note that we get both the sine and the cosine solutions
|
| 217 |
+
simultaneously.
|
| 218 |
+
|
| 219 |
+
**TODO**
|
| 220 |
+
|
| 221 |
+
* Better automatic choice of degree and step size
|
| 222 |
+
* Make determination of Taylor series convergence radius
|
| 223 |
+
more robust
|
| 224 |
+
* Allow solution for `x < x_0`
|
| 225 |
+
* Allow solution for complex `x`
|
| 226 |
+
* Test for difficult (ill-conditioned) problems
|
| 227 |
+
* Implement Runge-Kutta and other algorithms
|
| 228 |
+
|
| 229 |
+
"""
|
| 230 |
+
if tol:
|
| 231 |
+
tol_prec = int(-ctx.log(tol, 2))+10
|
| 232 |
+
else:
|
| 233 |
+
tol_prec = ctx.prec+10
|
| 234 |
+
degree = degree or (3 + int(3*ctx.dps/2.))
|
| 235 |
+
workprec = ctx.prec + 40
|
| 236 |
+
try:
|
| 237 |
+
len(y0)
|
| 238 |
+
return_vector = True
|
| 239 |
+
except TypeError:
|
| 240 |
+
F_ = F
|
| 241 |
+
F = lambda x, y: [F_(x, y[0])]
|
| 242 |
+
y0 = [y0]
|
| 243 |
+
return_vector = False
|
| 244 |
+
ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
|
| 245 |
+
series_boundaries = [x0, xb]
|
| 246 |
+
series_data = [(ser, x0, xb)]
|
| 247 |
+
# We will be working with vectors of Taylor series
|
| 248 |
+
def mpolyval(ser, a):
|
| 249 |
+
return [ctx.polyval(s[::-1], a) for s in ser]
|
| 250 |
+
# Find nearest expansion point; compute if necessary
|
| 251 |
+
def get_series(x):
|
| 252 |
+
if x < x0:
|
| 253 |
+
raise ValueError
|
| 254 |
+
n = bisect(series_boundaries, x)
|
| 255 |
+
if n < len(series_boundaries):
|
| 256 |
+
return series_data[n-1]
|
| 257 |
+
while 1:
|
| 258 |
+
ser, xa, xb = series_data[-1]
|
| 259 |
+
if verbose:
|
| 260 |
+
print("Computing Taylor series for [%f, %f]" % (xa, xb))
|
| 261 |
+
y = mpolyval(ser, xb-xa)
|
| 262 |
+
xa = xb
|
| 263 |
+
ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
|
| 264 |
+
series_boundaries.append(xb)
|
| 265 |
+
series_data.append((ser, xa, xb))
|
| 266 |
+
if x <= xb:
|
| 267 |
+
return series_data[-1]
|
| 268 |
+
# Evaluation function
|
| 269 |
+
def interpolant(x):
|
| 270 |
+
x = ctx.convert(x)
|
| 271 |
+
orig = ctx.prec
|
| 272 |
+
try:
|
| 273 |
+
ctx.prec = workprec
|
| 274 |
+
ser, xa, xb = get_series(x)
|
| 275 |
+
y = mpolyval(ser, x-xa)
|
| 276 |
+
finally:
|
| 277 |
+
ctx.prec = orig
|
| 278 |
+
if return_vector:
|
| 279 |
+
return [+yk for yk in y]
|
| 280 |
+
else:
|
| 281 |
+
return +y[0]
|
| 282 |
+
return interpolant
|
| 283 |
+
|
| 284 |
+
ODEMethods.odefun = odefun
|
| 285 |
+
|
| 286 |
+
if __name__ == "__main__":
|
| 287 |
+
import doctest
|
| 288 |
+
doctest.testmod()
|
vllm/lib/python3.10/site-packages/mpmath/calculus/optimization.py
ADDED
|
@@ -0,0 +1,1102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function
|
| 2 |
+
|
| 3 |
+
from copy import copy
|
| 4 |
+
|
| 5 |
+
from ..libmp.backend import xrange
|
| 6 |
+
|
| 7 |
+
class OptimizationMethods(object):
|
| 8 |
+
def __init__(ctx):
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
##############
|
| 12 |
+
# 1D-SOLVERS #
|
| 13 |
+
##############
|
| 14 |
+
|
| 15 |
+
class Newton:
|
| 16 |
+
"""
|
| 17 |
+
1d-solver generating pairs of approximative root and error.
|
| 18 |
+
|
| 19 |
+
Needs starting points x0 close to the root.
|
| 20 |
+
|
| 21 |
+
Pro:
|
| 22 |
+
|
| 23 |
+
* converges fast
|
| 24 |
+
* sometimes more robust than secant with bad second starting point
|
| 25 |
+
|
| 26 |
+
Contra:
|
| 27 |
+
|
| 28 |
+
* converges slowly for multiple roots
|
| 29 |
+
* needs first derivative
|
| 30 |
+
* 2 function evaluations per iteration
|
| 31 |
+
"""
|
| 32 |
+
maxsteps = 20
|
| 33 |
+
|
| 34 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 35 |
+
self.ctx = ctx
|
| 36 |
+
if len(x0) == 1:
|
| 37 |
+
self.x0 = x0[0]
|
| 38 |
+
else:
|
| 39 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 40 |
+
self.f = f
|
| 41 |
+
if not 'df' in kwargs:
|
| 42 |
+
def df(x):
|
| 43 |
+
return self.ctx.diff(f, x)
|
| 44 |
+
else:
|
| 45 |
+
df = kwargs['df']
|
| 46 |
+
self.df = df
|
| 47 |
+
|
| 48 |
+
def __iter__(self):
|
| 49 |
+
f = self.f
|
| 50 |
+
df = self.df
|
| 51 |
+
x0 = self.x0
|
| 52 |
+
while True:
|
| 53 |
+
x1 = x0 - f(x0) / df(x0)
|
| 54 |
+
error = abs(x1 - x0)
|
| 55 |
+
x0 = x1
|
| 56 |
+
yield (x1, error)
|
| 57 |
+
|
| 58 |
+
class Secant:
|
| 59 |
+
"""
|
| 60 |
+
1d-solver generating pairs of approximative root and error.
|
| 61 |
+
|
| 62 |
+
Needs starting points x0 and x1 close to the root.
|
| 63 |
+
x1 defaults to x0 + 0.25.
|
| 64 |
+
|
| 65 |
+
Pro:
|
| 66 |
+
|
| 67 |
+
* converges fast
|
| 68 |
+
|
| 69 |
+
Contra:
|
| 70 |
+
|
| 71 |
+
* converges slowly for multiple roots
|
| 72 |
+
"""
|
| 73 |
+
maxsteps = 30
|
| 74 |
+
|
| 75 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 76 |
+
self.ctx = ctx
|
| 77 |
+
if len(x0) == 1:
|
| 78 |
+
self.x0 = x0[0]
|
| 79 |
+
self.x1 = self.x0 + 0.25
|
| 80 |
+
elif len(x0) == 2:
|
| 81 |
+
self.x0 = x0[0]
|
| 82 |
+
self.x1 = x0[1]
|
| 83 |
+
else:
|
| 84 |
+
raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
|
| 85 |
+
self.f = f
|
| 86 |
+
|
| 87 |
+
def __iter__(self):
|
| 88 |
+
f = self.f
|
| 89 |
+
x0 = self.x0
|
| 90 |
+
x1 = self.x1
|
| 91 |
+
f0 = f(x0)
|
| 92 |
+
while True:
|
| 93 |
+
f1 = f(x1)
|
| 94 |
+
l = x1 - x0
|
| 95 |
+
if not l:
|
| 96 |
+
break
|
| 97 |
+
s = (f1 - f0) / l
|
| 98 |
+
if not s:
|
| 99 |
+
break
|
| 100 |
+
x0, x1 = x1, x1 - f1/s
|
| 101 |
+
f0 = f1
|
| 102 |
+
yield x1, abs(l)
|
| 103 |
+
|
| 104 |
+
class MNewton:
|
| 105 |
+
"""
|
| 106 |
+
1d-solver generating pairs of approximative root and error.
|
| 107 |
+
|
| 108 |
+
Needs starting point x0 close to the root.
|
| 109 |
+
Uses modified Newton's method that converges fast regardless of the
|
| 110 |
+
multiplicity of the root.
|
| 111 |
+
|
| 112 |
+
Pro:
|
| 113 |
+
|
| 114 |
+
* converges fast for multiple roots
|
| 115 |
+
|
| 116 |
+
Contra:
|
| 117 |
+
|
| 118 |
+
* needs first and second derivative of f
|
| 119 |
+
* 3 function evaluations per iteration
|
| 120 |
+
"""
|
| 121 |
+
maxsteps = 20
|
| 122 |
+
|
| 123 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 124 |
+
self.ctx = ctx
|
| 125 |
+
if not len(x0) == 1:
|
| 126 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 127 |
+
self.x0 = x0[0]
|
| 128 |
+
self.f = f
|
| 129 |
+
if not 'df' in kwargs:
|
| 130 |
+
def df(x):
|
| 131 |
+
return self.ctx.diff(f, x)
|
| 132 |
+
else:
|
| 133 |
+
df = kwargs['df']
|
| 134 |
+
self.df = df
|
| 135 |
+
if not 'd2f' in kwargs:
|
| 136 |
+
def d2f(x):
|
| 137 |
+
return self.ctx.diff(df, x)
|
| 138 |
+
else:
|
| 139 |
+
d2f = kwargs['df']
|
| 140 |
+
self.d2f = d2f
|
| 141 |
+
|
| 142 |
+
def __iter__(self):
|
| 143 |
+
x = self.x0
|
| 144 |
+
f = self.f
|
| 145 |
+
df = self.df
|
| 146 |
+
d2f = self.d2f
|
| 147 |
+
while True:
|
| 148 |
+
prevx = x
|
| 149 |
+
fx = f(x)
|
| 150 |
+
if fx == 0:
|
| 151 |
+
break
|
| 152 |
+
dfx = df(x)
|
| 153 |
+
d2fx = d2f(x)
|
| 154 |
+
# x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
|
| 155 |
+
x -= fx / (dfx - fx * d2fx / dfx)
|
| 156 |
+
error = abs(x - prevx)
|
| 157 |
+
yield x, error
|
| 158 |
+
|
| 159 |
+
class Halley:
|
| 160 |
+
"""
|
| 161 |
+
1d-solver generating pairs of approximative root and error.
|
| 162 |
+
|
| 163 |
+
Needs a starting point x0 close to the root.
|
| 164 |
+
Uses Halley's method with cubic convergence rate.
|
| 165 |
+
|
| 166 |
+
Pro:
|
| 167 |
+
|
| 168 |
+
* converges even faster the Newton's method
|
| 169 |
+
* useful when computing with *many* digits
|
| 170 |
+
|
| 171 |
+
Contra:
|
| 172 |
+
|
| 173 |
+
* needs first and second derivative of f
|
| 174 |
+
* 3 function evaluations per iteration
|
| 175 |
+
* converges slowly for multiple roots
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
maxsteps = 20
|
| 179 |
+
|
| 180 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 181 |
+
self.ctx = ctx
|
| 182 |
+
if not len(x0) == 1:
|
| 183 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 184 |
+
self.x0 = x0[0]
|
| 185 |
+
self.f = f
|
| 186 |
+
if not 'df' in kwargs:
|
| 187 |
+
def df(x):
|
| 188 |
+
return self.ctx.diff(f, x)
|
| 189 |
+
else:
|
| 190 |
+
df = kwargs['df']
|
| 191 |
+
self.df = df
|
| 192 |
+
if not 'd2f' in kwargs:
|
| 193 |
+
def d2f(x):
|
| 194 |
+
return self.ctx.diff(df, x)
|
| 195 |
+
else:
|
| 196 |
+
d2f = kwargs['df']
|
| 197 |
+
self.d2f = d2f
|
| 198 |
+
|
| 199 |
+
def __iter__(self):
|
| 200 |
+
x = self.x0
|
| 201 |
+
f = self.f
|
| 202 |
+
df = self.df
|
| 203 |
+
d2f = self.d2f
|
| 204 |
+
while True:
|
| 205 |
+
prevx = x
|
| 206 |
+
fx = f(x)
|
| 207 |
+
dfx = df(x)
|
| 208 |
+
d2fx = d2f(x)
|
| 209 |
+
x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
|
| 210 |
+
error = abs(x - prevx)
|
| 211 |
+
yield x, error
|
| 212 |
+
|
| 213 |
+
class Muller:
|
| 214 |
+
"""
|
| 215 |
+
1d-solver generating pairs of approximative root and error.
|
| 216 |
+
|
| 217 |
+
Needs starting points x0, x1 and x2 close to the root.
|
| 218 |
+
x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
|
| 219 |
+
Uses Muller's method that converges towards complex roots.
|
| 220 |
+
|
| 221 |
+
Pro:
|
| 222 |
+
|
| 223 |
+
* converges fast (somewhat faster than secant)
|
| 224 |
+
* can find complex roots
|
| 225 |
+
|
| 226 |
+
Contra:
|
| 227 |
+
|
| 228 |
+
* converges slowly for multiple roots
|
| 229 |
+
* may have complex values for real starting points and real roots
|
| 230 |
+
|
| 231 |
+
http://en.wikipedia.org/wiki/Muller's_method
|
| 232 |
+
"""
|
| 233 |
+
maxsteps = 30
|
| 234 |
+
|
| 235 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 236 |
+
self.ctx = ctx
|
| 237 |
+
if len(x0) == 1:
|
| 238 |
+
self.x0 = x0[0]
|
| 239 |
+
self.x1 = self.x0 + 0.25
|
| 240 |
+
self.x2 = self.x1 + 0.25
|
| 241 |
+
elif len(x0) == 2:
|
| 242 |
+
self.x0 = x0[0]
|
| 243 |
+
self.x1 = x0[1]
|
| 244 |
+
self.x2 = self.x1 + 0.25
|
| 245 |
+
elif len(x0) == 3:
|
| 246 |
+
self.x0 = x0[0]
|
| 247 |
+
self.x1 = x0[1]
|
| 248 |
+
self.x2 = x0[2]
|
| 249 |
+
else:
|
| 250 |
+
raise ValueError('expected 1, 2 or 3 starting points, got %i'
|
| 251 |
+
% len(x0))
|
| 252 |
+
self.f = f
|
| 253 |
+
self.verbose = kwargs['verbose']
|
| 254 |
+
|
| 255 |
+
def __iter__(self):
|
| 256 |
+
f = self.f
|
| 257 |
+
x0 = self.x0
|
| 258 |
+
x1 = self.x1
|
| 259 |
+
x2 = self.x2
|
| 260 |
+
fx0 = f(x0)
|
| 261 |
+
fx1 = f(x1)
|
| 262 |
+
fx2 = f(x2)
|
| 263 |
+
while True:
|
| 264 |
+
# TODO: maybe refactoring with function for divided differences
|
| 265 |
+
# calculate divided differences
|
| 266 |
+
fx2x1 = (fx1 - fx2) / (x1 - x2)
|
| 267 |
+
fx2x0 = (fx0 - fx2) / (x0 - x2)
|
| 268 |
+
fx1x0 = (fx0 - fx1) / (x0 - x1)
|
| 269 |
+
w = fx2x1 + fx2x0 - fx1x0
|
| 270 |
+
fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
|
| 271 |
+
if w == 0 and fx2x1x0 == 0:
|
| 272 |
+
if self.verbose:
|
| 273 |
+
print('canceled with')
|
| 274 |
+
print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
|
| 275 |
+
break
|
| 276 |
+
x0 = x1
|
| 277 |
+
fx0 = fx1
|
| 278 |
+
x1 = x2
|
| 279 |
+
fx1 = fx2
|
| 280 |
+
# denominator should be as large as possible => choose sign
|
| 281 |
+
r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
|
| 282 |
+
if abs(w - r) > abs(w + r):
|
| 283 |
+
r = -r
|
| 284 |
+
x2 -= 2*fx2 / (w + r)
|
| 285 |
+
fx2 = f(x2)
|
| 286 |
+
error = abs(x2 - x1)
|
| 287 |
+
yield x2, error
|
| 288 |
+
|
| 289 |
+
# TODO: consider raising a ValueError when there's no sign change in a and b
|
| 290 |
+
class Bisection:
|
| 291 |
+
"""
|
| 292 |
+
1d-solver generating pairs of approximative root and error.
|
| 293 |
+
|
| 294 |
+
Uses bisection method to find a root of f in [a, b].
|
| 295 |
+
Might fail for multiple roots (needs sign change).
|
| 296 |
+
|
| 297 |
+
Pro:
|
| 298 |
+
|
| 299 |
+
* robust and reliable
|
| 300 |
+
|
| 301 |
+
Contra:
|
| 302 |
+
|
| 303 |
+
* converges slowly
|
| 304 |
+
* needs sign change
|
| 305 |
+
"""
|
| 306 |
+
maxsteps = 100
|
| 307 |
+
|
| 308 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 309 |
+
self.ctx = ctx
|
| 310 |
+
if len(x0) != 2:
|
| 311 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 312 |
+
self.f = f
|
| 313 |
+
self.a = x0[0]
|
| 314 |
+
self.b = x0[1]
|
| 315 |
+
|
| 316 |
+
def __iter__(self):
|
| 317 |
+
f = self.f
|
| 318 |
+
a = self.a
|
| 319 |
+
b = self.b
|
| 320 |
+
l = b - a
|
| 321 |
+
fb = f(b)
|
| 322 |
+
while True:
|
| 323 |
+
m = self.ctx.ldexp(a + b, -1)
|
| 324 |
+
fm = f(m)
|
| 325 |
+
sign = fm * fb
|
| 326 |
+
if sign < 0:
|
| 327 |
+
a = m
|
| 328 |
+
elif sign > 0:
|
| 329 |
+
b = m
|
| 330 |
+
fb = fm
|
| 331 |
+
else:
|
| 332 |
+
yield m, self.ctx.zero
|
| 333 |
+
l /= 2
|
| 334 |
+
yield (a + b)/2, abs(l)
|
| 335 |
+
|
| 336 |
+
def _getm(method):
|
| 337 |
+
"""
|
| 338 |
+
Return a function to calculate m for Illinois-like methods.
|
| 339 |
+
"""
|
| 340 |
+
if method == 'illinois':
|
| 341 |
+
def getm(fz, fb):
|
| 342 |
+
return 0.5
|
| 343 |
+
elif method == 'pegasus':
|
| 344 |
+
def getm(fz, fb):
|
| 345 |
+
return fb/(fb + fz)
|
| 346 |
+
elif method == 'anderson':
|
| 347 |
+
def getm(fz, fb):
|
| 348 |
+
m = 1 - fz/fb
|
| 349 |
+
if m > 0:
|
| 350 |
+
return m
|
| 351 |
+
else:
|
| 352 |
+
return 0.5
|
| 353 |
+
else:
|
| 354 |
+
raise ValueError("method '%s' not recognized" % method)
|
| 355 |
+
return getm
|
| 356 |
+
|
| 357 |
+
class Illinois:
|
| 358 |
+
"""
|
| 359 |
+
1d-solver generating pairs of approximative root and error.
|
| 360 |
+
|
| 361 |
+
Uses Illinois method or similar to find a root of f in [a, b].
|
| 362 |
+
Might fail for multiple roots (needs sign change).
|
| 363 |
+
Combines bisect with secant (improved regula falsi).
|
| 364 |
+
|
| 365 |
+
The only difference between the methods is the scaling factor m, which is
|
| 366 |
+
used to ensure convergence (you can choose one using the 'method' keyword):
|
| 367 |
+
|
| 368 |
+
Illinois method ('illinois'):
|
| 369 |
+
m = 0.5
|
| 370 |
+
|
| 371 |
+
Pegasus method ('pegasus'):
|
| 372 |
+
m = fb/(fb + fz)
|
| 373 |
+
|
| 374 |
+
Anderson-Bjoerk method ('anderson'):
|
| 375 |
+
m = 1 - fz/fb if positive else 0.5
|
| 376 |
+
|
| 377 |
+
Pro:
|
| 378 |
+
|
| 379 |
+
* converges very fast
|
| 380 |
+
|
| 381 |
+
Contra:
|
| 382 |
+
|
| 383 |
+
* has problems with multiple roots
|
| 384 |
+
* needs sign change
|
| 385 |
+
"""
|
| 386 |
+
maxsteps = 30
|
| 387 |
+
|
| 388 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 389 |
+
self.ctx = ctx
|
| 390 |
+
if len(x0) != 2:
|
| 391 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 392 |
+
self.a = x0[0]
|
| 393 |
+
self.b = x0[1]
|
| 394 |
+
self.f = f
|
| 395 |
+
self.tol = kwargs['tol']
|
| 396 |
+
self.verbose = kwargs['verbose']
|
| 397 |
+
self.method = kwargs.get('method', 'illinois')
|
| 398 |
+
self.getm = _getm(self.method)
|
| 399 |
+
if self.verbose:
|
| 400 |
+
print('using %s method' % self.method)
|
| 401 |
+
|
| 402 |
+
def __iter__(self):
|
| 403 |
+
method = self.method
|
| 404 |
+
f = self.f
|
| 405 |
+
a = self.a
|
| 406 |
+
b = self.b
|
| 407 |
+
fa = f(a)
|
| 408 |
+
fb = f(b)
|
| 409 |
+
m = None
|
| 410 |
+
while True:
|
| 411 |
+
l = b - a
|
| 412 |
+
if l == 0:
|
| 413 |
+
break
|
| 414 |
+
s = (fb - fa) / l
|
| 415 |
+
z = a - fa/s
|
| 416 |
+
fz = f(z)
|
| 417 |
+
if abs(fz) < self.tol:
|
| 418 |
+
# TODO: better condition (when f is very flat)
|
| 419 |
+
if self.verbose:
|
| 420 |
+
print('canceled with z =', z)
|
| 421 |
+
yield z, l
|
| 422 |
+
break
|
| 423 |
+
if fz * fb < 0: # root in [z, b]
|
| 424 |
+
a = b
|
| 425 |
+
fa = fb
|
| 426 |
+
b = z
|
| 427 |
+
fb = fz
|
| 428 |
+
else: # root in [a, z]
|
| 429 |
+
m = self.getm(fz, fb)
|
| 430 |
+
b = z
|
| 431 |
+
fb = fz
|
| 432 |
+
fa = m*fa # scale down to ensure convergence
|
| 433 |
+
if self.verbose and m and not method == 'illinois':
|
| 434 |
+
print('m:', m)
|
| 435 |
+
yield (a + b)/2, abs(l)
|
| 436 |
+
|
| 437 |
+
def Pegasus(*args, **kwargs):
|
| 438 |
+
"""
|
| 439 |
+
1d-solver generating pairs of approximative root and error.
|
| 440 |
+
|
| 441 |
+
Uses Pegasus method to find a root of f in [a, b].
|
| 442 |
+
Wrapper for illinois to use method='pegasus'.
|
| 443 |
+
"""
|
| 444 |
+
kwargs['method'] = 'pegasus'
|
| 445 |
+
return Illinois(*args, **kwargs)
|
| 446 |
+
|
| 447 |
+
def Anderson(*args, **kwargs):
|
| 448 |
+
"""
|
| 449 |
+
1d-solver generating pairs of approximative root and error.
|
| 450 |
+
|
| 451 |
+
Uses Anderson-Bjoerk method to find a root of f in [a, b].
|
| 452 |
+
Wrapper for illinois to use method='pegasus'.
|
| 453 |
+
"""
|
| 454 |
+
kwargs['method'] = 'anderson'
|
| 455 |
+
return Illinois(*args, **kwargs)
|
| 456 |
+
|
| 457 |
+
# TODO: check whether it's possible to combine it with Illinois stuff
|
| 458 |
+
class Ridder:
|
| 459 |
+
"""
|
| 460 |
+
1d-solver generating pairs of approximative root and error.
|
| 461 |
+
|
| 462 |
+
Ridders' method to find a root of f in [a, b].
|
| 463 |
+
Is told to perform as well as Brent's method while being simpler.
|
| 464 |
+
|
| 465 |
+
Pro:
|
| 466 |
+
|
| 467 |
+
* very fast
|
| 468 |
+
* simpler than Brent's method
|
| 469 |
+
|
| 470 |
+
Contra:
|
| 471 |
+
|
| 472 |
+
* two function evaluations per step
|
| 473 |
+
* has problems with multiple roots
|
| 474 |
+
* needs sign change
|
| 475 |
+
|
| 476 |
+
http://en.wikipedia.org/wiki/Ridders'_method
|
| 477 |
+
"""
|
| 478 |
+
maxsteps = 30
|
| 479 |
+
|
| 480 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 481 |
+
self.ctx = ctx
|
| 482 |
+
self.f = f
|
| 483 |
+
if len(x0) != 2:
|
| 484 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 485 |
+
self.x1 = x0[0]
|
| 486 |
+
self.x2 = x0[1]
|
| 487 |
+
self.verbose = kwargs['verbose']
|
| 488 |
+
self.tol = kwargs['tol']
|
| 489 |
+
|
| 490 |
+
def __iter__(self):
|
| 491 |
+
ctx = self.ctx
|
| 492 |
+
f = self.f
|
| 493 |
+
x1 = self.x1
|
| 494 |
+
fx1 = f(x1)
|
| 495 |
+
x2 = self.x2
|
| 496 |
+
fx2 = f(x2)
|
| 497 |
+
while True:
|
| 498 |
+
x3 = 0.5*(x1 + x2)
|
| 499 |
+
fx3 = f(x3)
|
| 500 |
+
x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
|
| 501 |
+
fx4 = f(x4)
|
| 502 |
+
if abs(fx4) < self.tol:
|
| 503 |
+
# TODO: better condition (when f is very flat)
|
| 504 |
+
if self.verbose:
|
| 505 |
+
print('canceled with f(x4) =', fx4)
|
| 506 |
+
yield x4, abs(x1 - x2)
|
| 507 |
+
break
|
| 508 |
+
if fx4 * fx2 < 0: # root in [x4, x2]
|
| 509 |
+
x1 = x4
|
| 510 |
+
fx1 = fx4
|
| 511 |
+
else: # root in [x1, x4]
|
| 512 |
+
x2 = x4
|
| 513 |
+
fx2 = fx4
|
| 514 |
+
error = abs(x1 - x2)
|
| 515 |
+
yield (x1 + x2)/2, error
|
| 516 |
+
|
| 517 |
+
class ANewton:
|
| 518 |
+
"""
|
| 519 |
+
EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
|
| 520 |
+
|
| 521 |
+
Uses Newton's method modified to use Steffensens method when convergence is
|
| 522 |
+
slow. (I.e. for multiple roots.)
|
| 523 |
+
"""
|
| 524 |
+
maxsteps = 20
|
| 525 |
+
|
| 526 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 527 |
+
self.ctx = ctx
|
| 528 |
+
if not len(x0) == 1:
|
| 529 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 530 |
+
self.x0 = x0[0]
|
| 531 |
+
self.f = f
|
| 532 |
+
if not 'df' in kwargs:
|
| 533 |
+
def df(x):
|
| 534 |
+
return self.ctx.diff(f, x)
|
| 535 |
+
else:
|
| 536 |
+
df = kwargs['df']
|
| 537 |
+
self.df = df
|
| 538 |
+
def phi(x):
|
| 539 |
+
return x - f(x) / df(x)
|
| 540 |
+
self.phi = phi
|
| 541 |
+
self.verbose = kwargs['verbose']
|
| 542 |
+
|
| 543 |
+
def __iter__(self):
|
| 544 |
+
x0 = self.x0
|
| 545 |
+
f = self.f
|
| 546 |
+
df = self.df
|
| 547 |
+
phi = self.phi
|
| 548 |
+
error = 0
|
| 549 |
+
counter = 0
|
| 550 |
+
while True:
|
| 551 |
+
prevx = x0
|
| 552 |
+
try:
|
| 553 |
+
x0 = phi(x0)
|
| 554 |
+
except ZeroDivisionError:
|
| 555 |
+
if self.verbose:
|
| 556 |
+
print('ZeroDivisionError: canceled with x =', x0)
|
| 557 |
+
break
|
| 558 |
+
preverror = error
|
| 559 |
+
error = abs(prevx - x0)
|
| 560 |
+
# TODO: decide not to use convergence acceleration
|
| 561 |
+
if error and abs(error - preverror) / error < 1:
|
| 562 |
+
if self.verbose:
|
| 563 |
+
print('converging slowly')
|
| 564 |
+
counter += 1
|
| 565 |
+
if counter >= 3:
|
| 566 |
+
# accelerate convergence
|
| 567 |
+
phi = steffensen(phi)
|
| 568 |
+
counter = 0
|
| 569 |
+
if self.verbose:
|
| 570 |
+
print('accelerating convergence')
|
| 571 |
+
yield x0, error
|
| 572 |
+
|
| 573 |
+
# TODO: add Brent
|
| 574 |
+
|
| 575 |
+
############################
|
| 576 |
+
# MULTIDIMENSIONAL SOLVERS #
|
| 577 |
+
############################
|
| 578 |
+
|
| 579 |
+
def jacobian(ctx, f, x):
|
| 580 |
+
"""
|
| 581 |
+
Calculate the Jacobian matrix of a function at the point x0.
|
| 582 |
+
|
| 583 |
+
This is the first derivative of a vectorial function:
|
| 584 |
+
|
| 585 |
+
f : R^m -> R^n with m >= n
|
| 586 |
+
"""
|
| 587 |
+
x = ctx.matrix(x)
|
| 588 |
+
h = ctx.sqrt(ctx.eps)
|
| 589 |
+
fx = ctx.matrix(f(*x))
|
| 590 |
+
m = len(fx)
|
| 591 |
+
n = len(x)
|
| 592 |
+
J = ctx.matrix(m, n)
|
| 593 |
+
for j in xrange(n):
|
| 594 |
+
xj = x.copy()
|
| 595 |
+
xj[j] += h
|
| 596 |
+
Jj = (ctx.matrix(f(*xj)) - fx) / h
|
| 597 |
+
for i in xrange(m):
|
| 598 |
+
J[i,j] = Jj[i]
|
| 599 |
+
return J
|
| 600 |
+
|
| 601 |
+
# TODO: test with user-specified jacobian matrix
|
| 602 |
+
class MDNewton:
|
| 603 |
+
"""
|
| 604 |
+
Find the root of a vector function numerically using Newton's method.
|
| 605 |
+
|
| 606 |
+
f is a vector function representing a nonlinear equation system.
|
| 607 |
+
|
| 608 |
+
x0 is the starting point close to the root.
|
| 609 |
+
|
| 610 |
+
J is a function returning the Jacobian matrix for a point.
|
| 611 |
+
|
| 612 |
+
Supports overdetermined systems.
|
| 613 |
+
|
| 614 |
+
Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
|
| 615 |
+
The function to calculate the Jacobian matrix can be given using the
|
| 616 |
+
keyword 'J'. Otherwise it will be calculated numerically.
|
| 617 |
+
|
| 618 |
+
Please note that this method converges only locally. Especially for high-
|
| 619 |
+
dimensional systems it is not trivial to find a good starting point being
|
| 620 |
+
close enough to the root.
|
| 621 |
+
|
| 622 |
+
It is recommended to use a faster, low-precision solver from SciPy [1] or
|
| 623 |
+
OpenOpt [2] to get an initial guess. Afterwards you can use this method for
|
| 624 |
+
root-polishing to any precision.
|
| 625 |
+
|
| 626 |
+
[1] http://scipy.org
|
| 627 |
+
|
| 628 |
+
[2] http://openopt.org/Welcome
|
| 629 |
+
"""
|
| 630 |
+
maxsteps = 10
|
| 631 |
+
|
| 632 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 633 |
+
self.ctx = ctx
|
| 634 |
+
self.f = f
|
| 635 |
+
if isinstance(x0, (tuple, list)):
|
| 636 |
+
x0 = ctx.matrix(x0)
|
| 637 |
+
assert x0.cols == 1, 'need a vector'
|
| 638 |
+
self.x0 = x0
|
| 639 |
+
if 'J' in kwargs:
|
| 640 |
+
self.J = kwargs['J']
|
| 641 |
+
else:
|
| 642 |
+
def J(*x):
|
| 643 |
+
return ctx.jacobian(f, x)
|
| 644 |
+
self.J = J
|
| 645 |
+
self.norm = kwargs['norm']
|
| 646 |
+
self.verbose = kwargs['verbose']
|
| 647 |
+
|
| 648 |
+
def __iter__(self):
|
| 649 |
+
f = self.f
|
| 650 |
+
x0 = self.x0
|
| 651 |
+
norm = self.norm
|
| 652 |
+
J = self.J
|
| 653 |
+
fx = self.ctx.matrix(f(*x0))
|
| 654 |
+
fxnorm = norm(fx)
|
| 655 |
+
cancel = False
|
| 656 |
+
while not cancel:
|
| 657 |
+
# get direction of descent
|
| 658 |
+
fxn = -fx
|
| 659 |
+
Jx = J(*x0)
|
| 660 |
+
s = self.ctx.lu_solve(Jx, fxn)
|
| 661 |
+
if self.verbose:
|
| 662 |
+
print('Jx:')
|
| 663 |
+
print(Jx)
|
| 664 |
+
print('s:', s)
|
| 665 |
+
# damping step size TODO: better strategy (hard task)
|
| 666 |
+
l = self.ctx.one
|
| 667 |
+
x1 = x0 + s
|
| 668 |
+
while True:
|
| 669 |
+
if x1 == x0:
|
| 670 |
+
if self.verbose:
|
| 671 |
+
print("canceled, won't get more excact")
|
| 672 |
+
cancel = True
|
| 673 |
+
break
|
| 674 |
+
fx = self.ctx.matrix(f(*x1))
|
| 675 |
+
newnorm = norm(fx)
|
| 676 |
+
if newnorm < fxnorm:
|
| 677 |
+
# new x accepted
|
| 678 |
+
fxnorm = newnorm
|
| 679 |
+
x0 = x1
|
| 680 |
+
break
|
| 681 |
+
l /= 2
|
| 682 |
+
x1 = x0 + l*s
|
| 683 |
+
yield (x0, fxnorm)
|
| 684 |
+
|
| 685 |
+
#############
|
| 686 |
+
# UTILITIES #
|
| 687 |
+
#############
|
| 688 |
+
|
| 689 |
+
str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton,
|
| 690 |
+
'halley':Halley, 'muller':Muller, 'bisect':Bisection,
|
| 691 |
+
'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
|
| 692 |
+
'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
|
| 693 |
+
|
| 694 |
+
def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs):
|
| 695 |
+
r"""
|
| 696 |
+
Find an approximate solution to `f(x) = 0`, using *x0* as starting point or
|
| 697 |
+
interval for *x*.
|
| 698 |
+
|
| 699 |
+
Multidimensional overdetermined systems are supported.
|
| 700 |
+
You can specify them using a function or a list of functions.
|
| 701 |
+
|
| 702 |
+
Mathematically speaking, this function returns `x` such that
|
| 703 |
+
`|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision.
|
| 704 |
+
If the computed value does not meet this criterion, an exception is raised.
|
| 705 |
+
This exception can be disabled with *verify=False*.
|
| 706 |
+
|
| 707 |
+
For interval arithmetic (``iv.findroot()``), please note that
|
| 708 |
+
the returned interval ``x`` is not guaranteed to contain `f(x)=0`!
|
| 709 |
+
It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds
|
| 710 |
+
regardless of numerical error. This may be improved in the future.
|
| 711 |
+
|
| 712 |
+
**Arguments**
|
| 713 |
+
|
| 714 |
+
*f*
|
| 715 |
+
one dimensional function
|
| 716 |
+
*x0*
|
| 717 |
+
starting point, several starting points or interval (depends on solver)
|
| 718 |
+
*tol*
|
| 719 |
+
the returned solution has an error smaller than this
|
| 720 |
+
*verbose*
|
| 721 |
+
print additional information for each iteration if true
|
| 722 |
+
*verify*
|
| 723 |
+
verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
|
| 724 |
+
*solver*
|
| 725 |
+
a generator for *f* and *x0* returning approximative solution and error
|
| 726 |
+
*maxsteps*
|
| 727 |
+
after how many steps the solver will cancel
|
| 728 |
+
*df*
|
| 729 |
+
first derivative of *f* (used by some solvers)
|
| 730 |
+
*d2f*
|
| 731 |
+
second derivative of *f* (used by some solvers)
|
| 732 |
+
*multidimensional*
|
| 733 |
+
force multidimensional solving
|
| 734 |
+
*J*
|
| 735 |
+
Jacobian matrix of *f* (used by multidimensional solvers)
|
| 736 |
+
*norm*
|
| 737 |
+
used vector norm (used by multidimensional solvers)
|
| 738 |
+
|
| 739 |
+
solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
|
| 740 |
+
yielding pairs of approximative solution and estimated error (which is
|
| 741 |
+
expected to be positive).
|
| 742 |
+
You can use the following string aliases:
|
| 743 |
+
'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
|
| 744 |
+
'ridder', 'anewton', 'bisect'
|
| 745 |
+
|
| 746 |
+
See mpmath.calculus.optimization for their documentation.
|
| 747 |
+
|
| 748 |
+
**Examples**
|
| 749 |
+
|
| 750 |
+
The function :func:`~mpmath.findroot` locates a root of a given function using the
|
| 751 |
+
secant method by default. A simple example use of the secant method is to
|
| 752 |
+
compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
|
| 753 |
+
|
| 754 |
+
>>> from mpmath import *
|
| 755 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 756 |
+
>>> findroot(sin, 3)
|
| 757 |
+
3.14159265358979323846264338328
|
| 758 |
+
|
| 759 |
+
The secant method can be used to find complex roots of analytic functions,
|
| 760 |
+
although it must in that case generally be given a nonreal starting value
|
| 761 |
+
(or else it will never leave the real line)::
|
| 762 |
+
|
| 763 |
+
>>> mp.dps = 15
|
| 764 |
+
>>> findroot(lambda x: x**3 + 2*x + 1, j)
|
| 765 |
+
(0.226698825758202 + 1.46771150871022j)
|
| 766 |
+
|
| 767 |
+
A nice application is to compute nontrivial roots of the Riemann zeta
|
| 768 |
+
function with many digits (good initial values are needed for convergence)::
|
| 769 |
+
|
| 770 |
+
>>> mp.dps = 30
|
| 771 |
+
>>> findroot(zeta, 0.5+14j)
|
| 772 |
+
(0.5 + 14.1347251417346937904572519836j)
|
| 773 |
+
|
| 774 |
+
The secant method can also be used as an optimization algorithm, by passing
|
| 775 |
+
it a derivative of a function. The following example locates the positive
|
| 776 |
+
minimum of the gamma function::
|
| 777 |
+
|
| 778 |
+
>>> mp.dps = 20
|
| 779 |
+
>>> findroot(lambda x: diff(gamma, x), 1)
|
| 780 |
+
1.4616321449683623413
|
| 781 |
+
|
| 782 |
+
Finally, a useful application is to compute inverse functions, such as the
|
| 783 |
+
Lambert W function which is the inverse of `w e^w`, given the first
|
| 784 |
+
term of the solution's asymptotic expansion as the initial value. In basic
|
| 785 |
+
cases, this gives identical results to mpmath's built-in ``lambertw``
|
| 786 |
+
function::
|
| 787 |
+
|
| 788 |
+
>>> def lambert(x):
|
| 789 |
+
... return findroot(lambda w: w*exp(w) - x, log(1+x))
|
| 790 |
+
...
|
| 791 |
+
>>> mp.dps = 15
|
| 792 |
+
>>> lambert(1); lambertw(1)
|
| 793 |
+
0.567143290409784
|
| 794 |
+
0.567143290409784
|
| 795 |
+
>>> lambert(1000); lambert(1000)
|
| 796 |
+
5.2496028524016
|
| 797 |
+
5.2496028524016
|
| 798 |
+
|
| 799 |
+
Multidimensional functions are also supported::
|
| 800 |
+
|
| 801 |
+
>>> f = [lambda x1, x2: x1**2 + x2,
|
| 802 |
+
... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
|
| 803 |
+
>>> findroot(f, (0, 0))
|
| 804 |
+
[-0.618033988749895]
|
| 805 |
+
[-0.381966011250105]
|
| 806 |
+
>>> findroot(f, (10, 10))
|
| 807 |
+
[ 1.61803398874989]
|
| 808 |
+
[-2.61803398874989]
|
| 809 |
+
|
| 810 |
+
You can verify this by solving the system manually.
|
| 811 |
+
|
| 812 |
+
Please note that the following (more general) syntax also works::
|
| 813 |
+
|
| 814 |
+
>>> def f(x1, x2):
|
| 815 |
+
... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
|
| 816 |
+
...
|
| 817 |
+
>>> findroot(f, (0, 0))
|
| 818 |
+
[-0.618033988749895]
|
| 819 |
+
[-0.381966011250105]
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
**Multiple roots**
|
| 823 |
+
|
| 824 |
+
For multiple roots all methods of the Newtonian family (including secant)
|
| 825 |
+
converge slowly. Consider this example::
|
| 826 |
+
|
| 827 |
+
>>> f = lambda x: (x - 1)**99
|
| 828 |
+
>>> findroot(f, 0.9, verify=False)
|
| 829 |
+
0.918073542444929
|
| 830 |
+
|
| 831 |
+
Even for a very close starting point the secant method converges very
|
| 832 |
+
slowly. Use ``verbose=True`` to illustrate this.
|
| 833 |
+
|
| 834 |
+
It is possible to modify Newton's method to make it converge regardless of
|
| 835 |
+
the root's multiplicity::
|
| 836 |
+
|
| 837 |
+
>>> findroot(f, -10, solver='mnewton')
|
| 838 |
+
1.0
|
| 839 |
+
|
| 840 |
+
This variant uses the first and second derivative of the function, which is
|
| 841 |
+
not very efficient.
|
| 842 |
+
|
| 843 |
+
Alternatively you can use an experimental Newtonian solver that keeps track
|
| 844 |
+
of the speed of convergence and accelerates it using Steffensen's method if
|
| 845 |
+
necessary::
|
| 846 |
+
|
| 847 |
+
>>> findroot(f, -10, solver='anewton', verbose=True)
|
| 848 |
+
x: -9.88888888888888888889
|
| 849 |
+
error: 0.111111111111111111111
|
| 850 |
+
converging slowly
|
| 851 |
+
x: -9.77890011223344556678
|
| 852 |
+
error: 0.10998877665544332211
|
| 853 |
+
converging slowly
|
| 854 |
+
x: -9.67002233332199662166
|
| 855 |
+
error: 0.108877778911448945119
|
| 856 |
+
converging slowly
|
| 857 |
+
accelerating convergence
|
| 858 |
+
x: -9.5622443299551077669
|
| 859 |
+
error: 0.107778003366888854764
|
| 860 |
+
converging slowly
|
| 861 |
+
x: 0.99999999999999999214
|
| 862 |
+
error: 10.562244329955107759
|
| 863 |
+
x: 1.0
|
| 864 |
+
error: 7.8598304758094664213e-18
|
| 865 |
+
ZeroDivisionError: canceled with x = 1.0
|
| 866 |
+
1.0
|
| 867 |
+
|
| 868 |
+
**Complex roots**
|
| 869 |
+
|
| 870 |
+
For complex roots it's recommended to use Muller's method as it converges
|
| 871 |
+
even for real starting points very fast::
|
| 872 |
+
|
| 873 |
+
>>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
|
| 874 |
+
(0.727136084491197 + 0.934099289460529j)
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
**Intersection methods**
|
| 878 |
+
|
| 879 |
+
When you need to find a root in a known interval, it's highly recommended to
|
| 880 |
+
use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
|
| 881 |
+
Usually they converge faster and more reliable. They have however problems
|
| 882 |
+
with multiple roots and usually need a sign change to find a root::
|
| 883 |
+
|
| 884 |
+
>>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
|
| 885 |
+
0.0
|
| 886 |
+
|
| 887 |
+
Be careful with symmetric functions::
|
| 888 |
+
|
| 889 |
+
>>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
|
| 890 |
+
Traceback (most recent call last):
|
| 891 |
+
...
|
| 892 |
+
ZeroDivisionError
|
| 893 |
+
|
| 894 |
+
It fails even for better starting points, because there is no sign change::
|
| 895 |
+
|
| 896 |
+
>>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
|
| 897 |
+
Traceback (most recent call last):
|
| 898 |
+
...
|
| 899 |
+
ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19)
|
| 900 |
+
Try another starting point or tweak arguments.
|
| 901 |
+
|
| 902 |
+
"""
|
| 903 |
+
prec = ctx.prec
|
| 904 |
+
try:
|
| 905 |
+
ctx.prec += 20
|
| 906 |
+
|
| 907 |
+
# initialize arguments
|
| 908 |
+
if tol is None:
|
| 909 |
+
tol = ctx.eps * 2**10
|
| 910 |
+
|
| 911 |
+
kwargs['verbose'] = kwargs.get('verbose', verbose)
|
| 912 |
+
|
| 913 |
+
if 'd1f' in kwargs:
|
| 914 |
+
kwargs['df'] = kwargs['d1f']
|
| 915 |
+
|
| 916 |
+
kwargs['tol'] = tol
|
| 917 |
+
if isinstance(x0, (list, tuple)):
|
| 918 |
+
x0 = [ctx.convert(x) for x in x0]
|
| 919 |
+
else:
|
| 920 |
+
x0 = [ctx.convert(x0)]
|
| 921 |
+
|
| 922 |
+
if isinstance(solver, str):
|
| 923 |
+
try:
|
| 924 |
+
solver = str2solver[solver]
|
| 925 |
+
except KeyError:
|
| 926 |
+
raise ValueError('could not recognize solver')
|
| 927 |
+
|
| 928 |
+
# accept list of functions
|
| 929 |
+
if isinstance(f, (list, tuple)):
|
| 930 |
+
f2 = copy(f)
|
| 931 |
+
def tmp(*args):
|
| 932 |
+
return [fn(*args) for fn in f2]
|
| 933 |
+
f = tmp
|
| 934 |
+
|
| 935 |
+
# detect multidimensional functions
|
| 936 |
+
try:
|
| 937 |
+
fx = f(*x0)
|
| 938 |
+
multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
|
| 939 |
+
except TypeError:
|
| 940 |
+
fx = f(x0[0])
|
| 941 |
+
multidimensional = False
|
| 942 |
+
if 'multidimensional' in kwargs:
|
| 943 |
+
multidimensional = kwargs['multidimensional']
|
| 944 |
+
if multidimensional:
|
| 945 |
+
# only one multidimensional solver available at the moment
|
| 946 |
+
solver = MDNewton
|
| 947 |
+
if not 'norm' in kwargs:
|
| 948 |
+
norm = lambda x: ctx.norm(x, 'inf')
|
| 949 |
+
kwargs['norm'] = norm
|
| 950 |
+
else:
|
| 951 |
+
norm = kwargs['norm']
|
| 952 |
+
else:
|
| 953 |
+
norm = abs
|
| 954 |
+
|
| 955 |
+
# happily return starting point if it's a root
|
| 956 |
+
if norm(fx) == 0:
|
| 957 |
+
if multidimensional:
|
| 958 |
+
return ctx.matrix(x0)
|
| 959 |
+
else:
|
| 960 |
+
return x0[0]
|
| 961 |
+
|
| 962 |
+
# use solver
|
| 963 |
+
iterations = solver(ctx, f, x0, **kwargs)
|
| 964 |
+
if 'maxsteps' in kwargs:
|
| 965 |
+
maxsteps = kwargs['maxsteps']
|
| 966 |
+
else:
|
| 967 |
+
maxsteps = iterations.maxsteps
|
| 968 |
+
i = 0
|
| 969 |
+
for x, error in iterations:
|
| 970 |
+
if verbose:
|
| 971 |
+
print('x: ', x)
|
| 972 |
+
print('error:', error)
|
| 973 |
+
i += 1
|
| 974 |
+
if error < tol * max(1, norm(x)) or i >= maxsteps:
|
| 975 |
+
break
|
| 976 |
+
else:
|
| 977 |
+
if not i:
|
| 978 |
+
raise ValueError('Could not find root using the given solver.\n'
|
| 979 |
+
'Try another starting point or tweak arguments.')
|
| 980 |
+
if not isinstance(x, (list, tuple, ctx.matrix)):
|
| 981 |
+
xl = [x]
|
| 982 |
+
else:
|
| 983 |
+
xl = x
|
| 984 |
+
if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
|
| 985 |
+
raise ValueError('Could not find root within given tolerance. '
|
| 986 |
+
'(%s > %s)\n'
|
| 987 |
+
'Try another starting point or tweak arguments.'
|
| 988 |
+
% (norm(f(*xl))**2, tol))
|
| 989 |
+
return x
|
| 990 |
+
finally:
|
| 991 |
+
ctx.prec = prec
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
|
| 995 |
+
"""
|
| 996 |
+
Return the multiplicity of a given root of f.
|
| 997 |
+
|
| 998 |
+
Internally, numerical derivatives are used. This might be inefficient for
|
| 999 |
+
higher order derviatives. Due to this, ``multiplicity`` cancels after
|
| 1000 |
+
evaluating 10 derivatives by default. You can be specify the n-th derivative
|
| 1001 |
+
using the dnf keyword.
|
| 1002 |
+
|
| 1003 |
+
>>> from mpmath import *
|
| 1004 |
+
>>> multiplicity(lambda x: sin(x) - 1, pi/2)
|
| 1005 |
+
2
|
| 1006 |
+
|
| 1007 |
+
"""
|
| 1008 |
+
if tol is None:
|
| 1009 |
+
tol = ctx.eps ** 0.8
|
| 1010 |
+
kwargs['d0f'] = f
|
| 1011 |
+
for i in xrange(maxsteps):
|
| 1012 |
+
dfstr = 'd' + str(i) + 'f'
|
| 1013 |
+
if dfstr in kwargs:
|
| 1014 |
+
df = kwargs[dfstr]
|
| 1015 |
+
else:
|
| 1016 |
+
df = lambda x: ctx.diff(f, x, i)
|
| 1017 |
+
if not abs(df(root)) < tol:
|
| 1018 |
+
break
|
| 1019 |
+
return i
|
| 1020 |
+
|
| 1021 |
+
def steffensen(f):
|
| 1022 |
+
"""
|
| 1023 |
+
linear convergent function -> quadratic convergent function
|
| 1024 |
+
|
| 1025 |
+
Steffensen's method for quadratic convergence of a linear converging
|
| 1026 |
+
sequence.
|
| 1027 |
+
Don not use it for higher rates of convergence.
|
| 1028 |
+
It may even work for divergent sequences.
|
| 1029 |
+
|
| 1030 |
+
Definition:
|
| 1031 |
+
F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
|
| 1032 |
+
|
| 1033 |
+
Example
|
| 1034 |
+
.......
|
| 1035 |
+
|
| 1036 |
+
You can use Steffensen's method to accelerate a fixpoint iteration of linear
|
| 1037 |
+
(or less) convergence.
|
| 1038 |
+
|
| 1039 |
+
x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
|
| 1040 |
+
phi(x) = x**2 there are two fixpoints: 0 and 1.
|
| 1041 |
+
|
| 1042 |
+
Let's try Steffensen's method:
|
| 1043 |
+
|
| 1044 |
+
>>> f = lambda x: x**2
|
| 1045 |
+
>>> from mpmath.calculus.optimization import steffensen
|
| 1046 |
+
>>> F = steffensen(f)
|
| 1047 |
+
>>> for x in [0.5, 0.9, 2.0]:
|
| 1048 |
+
... fx = Fx = x
|
| 1049 |
+
... for i in xrange(9):
|
| 1050 |
+
... try:
|
| 1051 |
+
... fx = f(fx)
|
| 1052 |
+
... except OverflowError:
|
| 1053 |
+
... pass
|
| 1054 |
+
... try:
|
| 1055 |
+
... Fx = F(Fx)
|
| 1056 |
+
... except ZeroDivisionError:
|
| 1057 |
+
... pass
|
| 1058 |
+
... print('%20g %20g' % (fx, Fx))
|
| 1059 |
+
0.25 -0.5
|
| 1060 |
+
0.0625 0.1
|
| 1061 |
+
0.00390625 -0.0011236
|
| 1062 |
+
1.52588e-05 1.41691e-09
|
| 1063 |
+
2.32831e-10 -2.84465e-27
|
| 1064 |
+
5.42101e-20 2.30189e-80
|
| 1065 |
+
2.93874e-39 -1.2197e-239
|
| 1066 |
+
8.63617e-78 0
|
| 1067 |
+
7.45834e-155 0
|
| 1068 |
+
0.81 1.02676
|
| 1069 |
+
0.6561 1.00134
|
| 1070 |
+
0.430467 1
|
| 1071 |
+
0.185302 1
|
| 1072 |
+
0.0343368 1
|
| 1073 |
+
0.00117902 1
|
| 1074 |
+
1.39008e-06 1
|
| 1075 |
+
1.93233e-12 1
|
| 1076 |
+
3.73392e-24 1
|
| 1077 |
+
4 1.6
|
| 1078 |
+
16 1.2962
|
| 1079 |
+
256 1.10194
|
| 1080 |
+
65536 1.01659
|
| 1081 |
+
4.29497e+09 1.00053
|
| 1082 |
+
1.84467e+19 1
|
| 1083 |
+
3.40282e+38 1
|
| 1084 |
+
1.15792e+77 1
|
| 1085 |
+
1.34078e+154 1
|
| 1086 |
+
|
| 1087 |
+
Unmodified, the iteration converges only towards 0. Modified it converges
|
| 1088 |
+
not only much faster, it converges even to the repelling fixpoint 1.
|
| 1089 |
+
"""
|
| 1090 |
+
def F(x):
|
| 1091 |
+
fx = f(x)
|
| 1092 |
+
ffx = f(fx)
|
| 1093 |
+
return (x*ffx - fx**2) / (ffx - 2*fx + x)
|
| 1094 |
+
return F
|
| 1095 |
+
|
| 1096 |
+
OptimizationMethods.jacobian = jacobian
|
| 1097 |
+
OptimizationMethods.findroot = findroot
|
| 1098 |
+
OptimizationMethods.multiplicity = multiplicity
|
| 1099 |
+
|
| 1100 |
+
if __name__ == '__main__':
|
| 1101 |
+
import doctest
|
| 1102 |
+
doctest.testmod()
|
vllm/lib/python3.10/site-packages/mpmath/calculus/polynomials.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..libmp.backend import xrange
|
| 2 |
+
from .calculus import defun
|
| 3 |
+
|
| 4 |
+
#----------------------------------------------------------------------------#
|
| 5 |
+
# Polynomials #
|
| 6 |
+
#----------------------------------------------------------------------------#
|
| 7 |
+
|
| 8 |
+
# XXX: extra precision
|
| 9 |
+
@defun
|
| 10 |
+
def polyval(ctx, coeffs, x, derivative=False):
|
| 11 |
+
r"""
|
| 12 |
+
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
|
| 13 |
+
:func:`~mpmath.polyval` evaluates the polynomial
|
| 14 |
+
|
| 15 |
+
.. math ::
|
| 16 |
+
|
| 17 |
+
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
|
| 18 |
+
|
| 19 |
+
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
|
| 20 |
+
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
|
| 21 |
+
tuple `(P(x), P'(x))`.
|
| 22 |
+
|
| 23 |
+
>>> from mpmath import *
|
| 24 |
+
>>> mp.pretty = True
|
| 25 |
+
>>> polyval([3, 0, 2], 0.5)
|
| 26 |
+
2.75
|
| 27 |
+
>>> polyval([3, 0, 2], 0.5, derivative=True)
|
| 28 |
+
(2.75, 3.0)
|
| 29 |
+
|
| 30 |
+
The coefficients and the evaluation point may be any combination
|
| 31 |
+
of real or complex numbers.
|
| 32 |
+
"""
|
| 33 |
+
if not coeffs:
|
| 34 |
+
return ctx.zero
|
| 35 |
+
p = ctx.convert(coeffs[0])
|
| 36 |
+
q = ctx.zero
|
| 37 |
+
for c in coeffs[1:]:
|
| 38 |
+
if derivative:
|
| 39 |
+
q = p + x*q
|
| 40 |
+
p = c + x*p
|
| 41 |
+
if derivative:
|
| 42 |
+
return p, q
|
| 43 |
+
else:
|
| 44 |
+
return p
|
| 45 |
+
|
| 46 |
+
@defun
|
| 47 |
+
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
|
| 48 |
+
error=False, roots_init=None):
|
| 49 |
+
"""
|
| 50 |
+
Computes all roots (real or complex) of a given polynomial.
|
| 51 |
+
|
| 52 |
+
The roots are returned as a sorted list, where real roots appear first
|
| 53 |
+
followed by complex conjugate roots as adjacent elements. The polynomial
|
| 54 |
+
should be given as a list of coefficients, in the format used by
|
| 55 |
+
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
|
| 56 |
+
|
| 57 |
+
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
|
| 58 |
+
where *err* is an estimate of the maximum error among the computed roots.
|
| 59 |
+
|
| 60 |
+
**Examples**
|
| 61 |
+
|
| 62 |
+
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
|
| 63 |
+
|
| 64 |
+
>>> from mpmath import *
|
| 65 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 66 |
+
>>> nprint(polyroots([1,-1,-14,24]), 4)
|
| 67 |
+
[-4.0, 2.0, 3.0]
|
| 68 |
+
|
| 69 |
+
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
|
| 70 |
+
error estimate::
|
| 71 |
+
|
| 72 |
+
>>> roots, err = polyroots([4,3,2], error=True)
|
| 73 |
+
>>> for r in roots:
|
| 74 |
+
... print(r)
|
| 75 |
+
...
|
| 76 |
+
(-0.375 + 0.59947894041409j)
|
| 77 |
+
(-0.375 - 0.59947894041409j)
|
| 78 |
+
>>>
|
| 79 |
+
>>> err
|
| 80 |
+
2.22044604925031e-16
|
| 81 |
+
>>>
|
| 82 |
+
>>> polyval([4,3,2], roots[0])
|
| 83 |
+
(2.22044604925031e-16 + 0.0j)
|
| 84 |
+
>>> polyval([4,3,2], roots[1])
|
| 85 |
+
(2.22044604925031e-16 + 0.0j)
|
| 86 |
+
|
| 87 |
+
The following example computes all the 5th roots of unity; that is,
|
| 88 |
+
the roots of `x^5 - 1`::
|
| 89 |
+
|
| 90 |
+
>>> mp.dps = 20
|
| 91 |
+
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
|
| 92 |
+
... print(r)
|
| 93 |
+
...
|
| 94 |
+
1.0
|
| 95 |
+
(-0.8090169943749474241 + 0.58778525229247312917j)
|
| 96 |
+
(-0.8090169943749474241 - 0.58778525229247312917j)
|
| 97 |
+
(0.3090169943749474241 + 0.95105651629515357212j)
|
| 98 |
+
(0.3090169943749474241 - 0.95105651629515357212j)
|
| 99 |
+
|
| 100 |
+
**Precision and conditioning**
|
| 101 |
+
|
| 102 |
+
The roots are computed to the current working precision accuracy. If this
|
| 103 |
+
accuracy cannot be achieved in ``maxsteps`` steps, then a
|
| 104 |
+
``NoConvergence`` exception is raised. The algorithm internally is using
|
| 105 |
+
the current working precision extended by ``extraprec``. If
|
| 106 |
+
``NoConvergence`` was raised, that is caused either by not having enough
|
| 107 |
+
extra precision to achieve convergence (in which case increasing
|
| 108 |
+
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
|
| 109 |
+
case increasing ``maxsteps`` should fix the problem), or a combination of
|
| 110 |
+
both.
|
| 111 |
+
|
| 112 |
+
The user should always do a convergence study with regards to
|
| 113 |
+
``extraprec`` to ensure accurate results. It is possible to get
|
| 114 |
+
convergence to a wrong answer with too low ``extraprec``.
|
| 115 |
+
|
| 116 |
+
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
|
| 117 |
+
typically compute all roots of an arbitrary polynomial to high precision::
|
| 118 |
+
|
| 119 |
+
>>> mp.dps = 60
|
| 120 |
+
>>> for r in polyroots([1, 0, -10, 0, 1]):
|
| 121 |
+
... print(r)
|
| 122 |
+
...
|
| 123 |
+
-3.14626436994197234232913506571557044551247712918732870123249
|
| 124 |
+
-0.317837245195782244725757617296174288373133378433432554879127
|
| 125 |
+
0.317837245195782244725757617296174288373133378433432554879127
|
| 126 |
+
3.14626436994197234232913506571557044551247712918732870123249
|
| 127 |
+
>>>
|
| 128 |
+
>>> sqrt(3) + sqrt(2)
|
| 129 |
+
3.14626436994197234232913506571557044551247712918732870123249
|
| 130 |
+
>>> sqrt(3) - sqrt(2)
|
| 131 |
+
0.317837245195782244725757617296174288373133378433432554879127
|
| 132 |
+
|
| 133 |
+
**Algorithm**
|
| 134 |
+
|
| 135 |
+
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
|
| 136 |
+
uses complex arithmetic to locate all roots simultaneously.
|
| 137 |
+
The Durand-Kerner method can be viewed as approximately performing
|
| 138 |
+
simultaneous Newton iteration for all the roots. In particular,
|
| 139 |
+
the convergence to simple roots is quadratic, just like Newton's
|
| 140 |
+
method.
|
| 141 |
+
|
| 142 |
+
Although all roots are internally calculated using complex arithmetic, any
|
| 143 |
+
root found to have an imaginary part smaller than the estimated numerical
|
| 144 |
+
error is truncated to a real number (small real parts are also chopped).
|
| 145 |
+
Real roots are placed first in the returned list, sorted by value. The
|
| 146 |
+
remaining complex roots are sorted by their real parts so that conjugate
|
| 147 |
+
roots end up next to each other.
|
| 148 |
+
|
| 149 |
+
**References**
|
| 150 |
+
|
| 151 |
+
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
|
| 152 |
+
|
| 153 |
+
"""
|
| 154 |
+
if len(coeffs) <= 1:
|
| 155 |
+
if not coeffs or not coeffs[0]:
|
| 156 |
+
raise ValueError("Input to polyroots must not be the zero polynomial")
|
| 157 |
+
# Constant polynomial with no roots
|
| 158 |
+
return []
|
| 159 |
+
|
| 160 |
+
orig = ctx.prec
|
| 161 |
+
tol = +ctx.eps
|
| 162 |
+
with ctx.extraprec(extraprec):
|
| 163 |
+
deg = len(coeffs) - 1
|
| 164 |
+
# Must be monic
|
| 165 |
+
lead = ctx.convert(coeffs[0])
|
| 166 |
+
if lead == 1:
|
| 167 |
+
coeffs = [ctx.convert(c) for c in coeffs]
|
| 168 |
+
else:
|
| 169 |
+
coeffs = [c/lead for c in coeffs]
|
| 170 |
+
f = lambda x: ctx.polyval(coeffs, x)
|
| 171 |
+
if roots_init is None:
|
| 172 |
+
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
|
| 173 |
+
else:
|
| 174 |
+
roots = [None]*deg;
|
| 175 |
+
deg_init = min(deg, len(roots_init))
|
| 176 |
+
roots[:deg_init] = list(roots_init[:deg_init])
|
| 177 |
+
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
|
| 178 |
+
in xrange(deg_init,deg)]
|
| 179 |
+
err = [ctx.one for n in xrange(deg)]
|
| 180 |
+
# Durand-Kerner iteration until convergence
|
| 181 |
+
for step in xrange(maxsteps):
|
| 182 |
+
if abs(max(err)) < tol:
|
| 183 |
+
break
|
| 184 |
+
for i in xrange(deg):
|
| 185 |
+
p = roots[i]
|
| 186 |
+
x = f(p)
|
| 187 |
+
for j in range(deg):
|
| 188 |
+
if i != j:
|
| 189 |
+
try:
|
| 190 |
+
x /= (p-roots[j])
|
| 191 |
+
except ZeroDivisionError:
|
| 192 |
+
continue
|
| 193 |
+
roots[i] = p - x
|
| 194 |
+
err[i] = abs(x)
|
| 195 |
+
if abs(max(err)) >= tol:
|
| 196 |
+
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
|
| 197 |
+
% maxsteps)
|
| 198 |
+
# Remove small real or imaginary parts
|
| 199 |
+
if cleanup:
|
| 200 |
+
for i in xrange(deg):
|
| 201 |
+
if abs(roots[i]) < tol:
|
| 202 |
+
roots[i] = ctx.zero
|
| 203 |
+
elif abs(ctx._im(roots[i])) < tol:
|
| 204 |
+
roots[i] = roots[i].real
|
| 205 |
+
elif abs(ctx._re(roots[i])) < tol:
|
| 206 |
+
roots[i] = roots[i].imag * 1j
|
| 207 |
+
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
|
| 208 |
+
if error:
|
| 209 |
+
err = max(err)
|
| 210 |
+
err = max(err, ctx.ldexp(1, -orig+1))
|
| 211 |
+
return [+r for r in roots], +err
|
| 212 |
+
else:
|
| 213 |
+
return [+r for r in roots]
|
vllm/lib/python3.10/site-packages/mpmath/calculus/quadrature.py
ADDED
|
@@ -0,0 +1,1115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from ..libmp.backend import xrange
|
| 4 |
+
|
| 5 |
+
class QuadratureRule(object):
|
| 6 |
+
"""
|
| 7 |
+
Quadrature rules are implemented using this class, in order to
|
| 8 |
+
simplify the code and provide a common infrastructure
|
| 9 |
+
for tasks such as error estimation and node caching.
|
| 10 |
+
|
| 11 |
+
You can implement a custom quadrature rule by subclassing
|
| 12 |
+
:class:`QuadratureRule` and implementing the appropriate
|
| 13 |
+
methods. The subclass can then be used by :func:`~mpmath.quad` by
|
| 14 |
+
passing it as the *method* argument.
|
| 15 |
+
|
| 16 |
+
:class:`QuadratureRule` instances are supposed to be singletons.
|
| 17 |
+
:class:`QuadratureRule` therefore implements instance caching
|
| 18 |
+
in :func:`~mpmath.__new__`.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, ctx):
|
| 22 |
+
self.ctx = ctx
|
| 23 |
+
self.standard_cache = {}
|
| 24 |
+
self.transformed_cache = {}
|
| 25 |
+
self.interval_count = {}
|
| 26 |
+
|
| 27 |
+
def clear(self):
|
| 28 |
+
"""
|
| 29 |
+
Delete cached node data.
|
| 30 |
+
"""
|
| 31 |
+
self.standard_cache = {}
|
| 32 |
+
self.transformed_cache = {}
|
| 33 |
+
self.interval_count = {}
|
| 34 |
+
|
| 35 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 36 |
+
r"""
|
| 37 |
+
Compute nodes for the standard interval `[-1, 1]`. Subclasses
|
| 38 |
+
should probably implement only this method, and use
|
| 39 |
+
:func:`~mpmath.get_nodes` method to retrieve the nodes.
|
| 40 |
+
"""
|
| 41 |
+
raise NotImplementedError
|
| 42 |
+
|
| 43 |
+
def get_nodes(self, a, b, degree, prec, verbose=False):
|
| 44 |
+
"""
|
| 45 |
+
Return nodes for given interval, degree and precision. The
|
| 46 |
+
nodes are retrieved from a cache if already computed;
|
| 47 |
+
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
|
| 48 |
+
and are then cached.
|
| 49 |
+
|
| 50 |
+
Subclasses should probably not implement this method,
|
| 51 |
+
but just implement :func:`~mpmath.calc_nodes` for the actual
|
| 52 |
+
node computation.
|
| 53 |
+
"""
|
| 54 |
+
key = (a, b, degree, prec)
|
| 55 |
+
if key in self.transformed_cache:
|
| 56 |
+
return self.transformed_cache[key]
|
| 57 |
+
orig = self.ctx.prec
|
| 58 |
+
try:
|
| 59 |
+
self.ctx.prec = prec+20
|
| 60 |
+
# Get nodes on standard interval
|
| 61 |
+
if (degree, prec) in self.standard_cache:
|
| 62 |
+
nodes = self.standard_cache[degree, prec]
|
| 63 |
+
else:
|
| 64 |
+
nodes = self.calc_nodes(degree, prec, verbose)
|
| 65 |
+
self.standard_cache[degree, prec] = nodes
|
| 66 |
+
# Transform to general interval
|
| 67 |
+
nodes = self.transform_nodes(nodes, a, b, verbose)
|
| 68 |
+
if key in self.interval_count:
|
| 69 |
+
self.transformed_cache[key] = nodes
|
| 70 |
+
else:
|
| 71 |
+
self.interval_count[key] = True
|
| 72 |
+
finally:
|
| 73 |
+
self.ctx.prec = orig
|
| 74 |
+
return nodes
|
| 75 |
+
|
| 76 |
+
def transform_nodes(self, nodes, a, b, verbose=False):
|
| 77 |
+
r"""
|
| 78 |
+
Rescale standardized nodes (for `[-1, 1]`) to a general
|
| 79 |
+
interval `[a, b]`. For a finite interval, a simple linear
|
| 80 |
+
change of variables is used. Otherwise, the following
|
| 81 |
+
transformations are used:
|
| 82 |
+
|
| 83 |
+
.. math ::
|
| 84 |
+
|
| 85 |
+
\lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
|
| 86 |
+
|
| 87 |
+
\lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
|
| 88 |
+
|
| 89 |
+
\lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
ctx = self.ctx
|
| 93 |
+
a = ctx.convert(a)
|
| 94 |
+
b = ctx.convert(b)
|
| 95 |
+
one = ctx.one
|
| 96 |
+
if (a, b) == (-one, one):
|
| 97 |
+
return nodes
|
| 98 |
+
half = ctx.mpf(0.5)
|
| 99 |
+
new_nodes = []
|
| 100 |
+
if ctx.isinf(a) or ctx.isinf(b):
|
| 101 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
| 102 |
+
p05 = -half
|
| 103 |
+
for x, w in nodes:
|
| 104 |
+
x2 = x*x
|
| 105 |
+
px1 = one-x2
|
| 106 |
+
spx1 = px1**p05
|
| 107 |
+
x = x*spx1
|
| 108 |
+
w *= spx1/px1
|
| 109 |
+
new_nodes.append((x, w))
|
| 110 |
+
elif a == ctx.ninf:
|
| 111 |
+
b1 = b+1
|
| 112 |
+
for x, w in nodes:
|
| 113 |
+
u = 2/(x+one)
|
| 114 |
+
x = b1-u
|
| 115 |
+
w *= half*u**2
|
| 116 |
+
new_nodes.append((x, w))
|
| 117 |
+
elif b == ctx.inf:
|
| 118 |
+
a1 = a-1
|
| 119 |
+
for x, w in nodes:
|
| 120 |
+
u = 2/(x+one)
|
| 121 |
+
x = a1+u
|
| 122 |
+
w *= half*u**2
|
| 123 |
+
new_nodes.append((x, w))
|
| 124 |
+
elif a == ctx.inf or b == ctx.ninf:
|
| 125 |
+
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
|
| 126 |
+
else:
|
| 127 |
+
raise NotImplementedError
|
| 128 |
+
else:
|
| 129 |
+
# Simple linear change of variables
|
| 130 |
+
C = (b-a)/2
|
| 131 |
+
D = (b+a)/2
|
| 132 |
+
for x, w in nodes:
|
| 133 |
+
new_nodes.append((D+C*x, C*w))
|
| 134 |
+
return new_nodes
|
| 135 |
+
|
| 136 |
+
def guess_degree(self, prec):
|
| 137 |
+
"""
|
| 138 |
+
Given a desired precision `p` in bits, estimate the degree `m`
|
| 139 |
+
of the quadrature required to accomplish full accuracy for
|
| 140 |
+
typical integrals. By default, :func:`~mpmath.quad` will perform up
|
| 141 |
+
to `m` iterations. The value of `m` should be a slight
|
| 142 |
+
overestimate, so that "slightly bad" integrals can be dealt
|
| 143 |
+
with automatically using a few extra iterations. On the
|
| 144 |
+
other hand, it should not be too big, so :func:`~mpmath.quad` can
|
| 145 |
+
quit within a reasonable amount of time when it is given
|
| 146 |
+
an "unsolvable" integral.
|
| 147 |
+
|
| 148 |
+
The default formula used by :func:`~mpmath.guess_degree` is tuned
|
| 149 |
+
for both :class:`TanhSinh` and :class:`GaussLegendre`.
|
| 150 |
+
The output is roughly as follows:
|
| 151 |
+
|
| 152 |
+
+---------+---------+
|
| 153 |
+
| `p` | `m` |
|
| 154 |
+
+=========+=========+
|
| 155 |
+
| 50 | 6 |
|
| 156 |
+
+---------+---------+
|
| 157 |
+
| 100 | 7 |
|
| 158 |
+
+---------+---------+
|
| 159 |
+
| 500 | 10 |
|
| 160 |
+
+---------+---------+
|
| 161 |
+
| 3000 | 12 |
|
| 162 |
+
+---------+---------+
|
| 163 |
+
|
| 164 |
+
This formula is based purely on a limited amount of
|
| 165 |
+
experimentation and will sometimes be wrong.
|
| 166 |
+
"""
|
| 167 |
+
# Expected degree
|
| 168 |
+
# XXX: use mag
|
| 169 |
+
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
|
| 170 |
+
# Reasonable "worst case"
|
| 171 |
+
g += 2
|
| 172 |
+
return g
|
| 173 |
+
|
| 174 |
+
def estimate_error(self, results, prec, epsilon):
|
| 175 |
+
r"""
|
| 176 |
+
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
|
| 177 |
+
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
|
| 178 |
+
the error of `I_k`.
|
| 179 |
+
|
| 180 |
+
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
|
| 181 |
+
|
| 182 |
+
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
|
| 183 |
+
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
|
| 184 |
+
that each degree increment roughly doubles the accuracy of
|
| 185 |
+
the quadrature rule (this is true for both :class:`TanhSinh`
|
| 186 |
+
and :class:`GaussLegendre`). The extrapolation formula is given
|
| 187 |
+
by Borwein, Bailey & Girgensohn. Although not very conservative,
|
| 188 |
+
this method seems to be very robust in practice.
|
| 189 |
+
"""
|
| 190 |
+
if len(results) == 2:
|
| 191 |
+
return abs(results[0]-results[1])
|
| 192 |
+
try:
|
| 193 |
+
if results[-1] == results[-2] == results[-3]:
|
| 194 |
+
return self.ctx.zero
|
| 195 |
+
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
|
| 196 |
+
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
|
| 197 |
+
except ValueError:
|
| 198 |
+
return epsilon
|
| 199 |
+
D3 = -prec
|
| 200 |
+
D4 = min(0, max(D1**2/D2, 2*D1, D3))
|
| 201 |
+
return self.ctx.mpf(10) ** int(D4)
|
| 202 |
+
|
| 203 |
+
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
|
| 204 |
+
"""
|
| 205 |
+
Main integration function. Computes the 1D integral over
|
| 206 |
+
the interval specified by *points*. For each subinterval,
|
| 207 |
+
performs quadrature of degree from 1 up to *max_degree*
|
| 208 |
+
until :func:`~mpmath.estimate_error` signals convergence.
|
| 209 |
+
|
| 210 |
+
:func:`~mpmath.summation` transforms each subintegration to
|
| 211 |
+
the standard interval and then calls :func:`~mpmath.sum_next`.
|
| 212 |
+
"""
|
| 213 |
+
ctx = self.ctx
|
| 214 |
+
I = total_err = ctx.zero
|
| 215 |
+
for i in xrange(len(points)-1):
|
| 216 |
+
a, b = points[i], points[i+1]
|
| 217 |
+
if a == b:
|
| 218 |
+
continue
|
| 219 |
+
# XXX: we could use a single variable transformation,
|
| 220 |
+
# but this is not good in practice. We get better accuracy
|
| 221 |
+
# by having 0 as an endpoint.
|
| 222 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
| 223 |
+
_f = f
|
| 224 |
+
f = lambda x: _f(-x) + _f(x)
|
| 225 |
+
a, b = (ctx.zero, ctx.inf)
|
| 226 |
+
results = []
|
| 227 |
+
err = ctx.zero
|
| 228 |
+
for degree in xrange(1, max_degree+1):
|
| 229 |
+
nodes = self.get_nodes(a, b, degree, prec, verbose)
|
| 230 |
+
if verbose:
|
| 231 |
+
print("Integrating from %s to %s (degree %s of %s)" % \
|
| 232 |
+
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
|
| 233 |
+
result = self.sum_next(f, nodes, degree, prec, results, verbose)
|
| 234 |
+
results.append(result)
|
| 235 |
+
if degree > 1:
|
| 236 |
+
err = self.estimate_error(results, prec, epsilon)
|
| 237 |
+
if verbose:
|
| 238 |
+
print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
|
| 239 |
+
if err <= epsilon:
|
| 240 |
+
break
|
| 241 |
+
I += results[-1]
|
| 242 |
+
total_err += err
|
| 243 |
+
if total_err > epsilon:
|
| 244 |
+
if verbose:
|
| 245 |
+
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
|
| 246 |
+
return I, total_err
|
| 247 |
+
|
| 248 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
| 249 |
+
r"""
|
| 250 |
+
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
|
| 251 |
+
contains the `(w_k, x_k)` pairs.
|
| 252 |
+
|
| 253 |
+
:func:`~mpmath.summation` will supply the list *results* of
|
| 254 |
+
values computed by :func:`~mpmath.sum_next` at previous degrees, in
|
| 255 |
+
case the quadrature rule is able to reuse them.
|
| 256 |
+
"""
|
| 257 |
+
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class TanhSinh(QuadratureRule):
|
| 261 |
+
r"""
|
| 262 |
+
This class implements "tanh-sinh" or "doubly exponential"
|
| 263 |
+
quadrature. This quadrature rule is based on the Euler-Maclaurin
|
| 264 |
+
integral formula. By performing a change of variables involving
|
| 265 |
+
nested exponentials / hyperbolic functions (hence the name), the
|
| 266 |
+
derivatives at the endpoints vanish rapidly. Since the error term
|
| 267 |
+
in the Euler-Maclaurin formula depends on the derivatives at the
|
| 268 |
+
endpoints, a simple step sum becomes extremely accurate. In
|
| 269 |
+
practice, this means that doubling the number of evaluation
|
| 270 |
+
points roughly doubles the number of accurate digits.
|
| 271 |
+
|
| 272 |
+
Comparison to Gauss-Legendre:
|
| 273 |
+
* Initial computation of nodes is usually faster
|
| 274 |
+
* Handles endpoint singularities better
|
| 275 |
+
* Handles infinite integration intervals better
|
| 276 |
+
* Is slower for smooth integrands once nodes have been computed
|
| 277 |
+
|
| 278 |
+
The implementation of the tanh-sinh algorithm is based on the
|
| 279 |
+
description given in Borwein, Bailey & Girgensohn, "Experimentation
|
| 280 |
+
in Mathematics - Computational Paths to Discovery", A K Peters,
|
| 281 |
+
2003, pages 312-313. In the present implementation, a few
|
| 282 |
+
improvements have been made:
|
| 283 |
+
|
| 284 |
+
* A more efficient scheme is used to compute nodes (exploiting
|
| 285 |
+
recurrence for the exponential function)
|
| 286 |
+
* The nodes are computed successively instead of all at once
|
| 287 |
+
|
| 288 |
+
**References**
|
| 289 |
+
|
| 290 |
+
* [Bailey]_
|
| 291 |
+
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
|
| 292 |
+
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
| 296 |
+
"""
|
| 297 |
+
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
|
| 298 |
+
fact that half of the abscissas at degree `m` are precisely the
|
| 299 |
+
abscissas from degree `m-1`. Thus reusing the result from
|
| 300 |
+
the previous level allows a 2x speedup.
|
| 301 |
+
"""
|
| 302 |
+
h = self.ctx.mpf(2)**(-degree)
|
| 303 |
+
# Abscissas overlap, so reusing saves half of the time
|
| 304 |
+
if previous:
|
| 305 |
+
S = previous[-1]/(h*2)
|
| 306 |
+
else:
|
| 307 |
+
S = self.ctx.zero
|
| 308 |
+
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
|
| 309 |
+
return h*S
|
| 310 |
+
|
| 311 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 312 |
+
r"""
|
| 313 |
+
The abscissas and weights for tanh-sinh quadrature of degree
|
| 314 |
+
`m` are given by
|
| 315 |
+
|
| 316 |
+
.. math::
|
| 317 |
+
|
| 318 |
+
x_k = \tanh(\pi/2 \sinh(t_k))
|
| 319 |
+
|
| 320 |
+
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
|
| 321 |
+
|
| 322 |
+
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
|
| 323 |
+
list of nodes is actually infinite, but the weights die off so
|
| 324 |
+
rapidly that only a few are needed.
|
| 325 |
+
"""
|
| 326 |
+
ctx = self.ctx
|
| 327 |
+
nodes = []
|
| 328 |
+
|
| 329 |
+
extra = 20
|
| 330 |
+
ctx.prec += extra
|
| 331 |
+
tol = ctx.ldexp(1, -prec-10)
|
| 332 |
+
pi4 = ctx.pi/4
|
| 333 |
+
|
| 334 |
+
# For simplicity, we work in steps h = 1/2^n, with the first point
|
| 335 |
+
# offset so that we can reuse the sum from the previous degree
|
| 336 |
+
|
| 337 |
+
# We define degree 1 to include the "degree 0" steps, including
|
| 338 |
+
# the point x = 0. (It doesn't work well otherwise; not sure why.)
|
| 339 |
+
t0 = ctx.ldexp(1, -degree)
|
| 340 |
+
if degree == 1:
|
| 341 |
+
#nodes.append((mpf(0), pi4))
|
| 342 |
+
#nodes.append((-mpf(0), pi4))
|
| 343 |
+
nodes.append((ctx.zero, ctx.pi/2))
|
| 344 |
+
h = t0
|
| 345 |
+
else:
|
| 346 |
+
h = t0*2
|
| 347 |
+
|
| 348 |
+
# Since h is fixed, we can compute the next exponential
|
| 349 |
+
# by simply multiplying by exp(h)
|
| 350 |
+
expt0 = ctx.exp(t0)
|
| 351 |
+
a = pi4 * expt0
|
| 352 |
+
b = pi4 / expt0
|
| 353 |
+
udelta = ctx.exp(h)
|
| 354 |
+
urdelta = 1/udelta
|
| 355 |
+
|
| 356 |
+
for k in xrange(0, 20*2**degree+1):
|
| 357 |
+
# Reference implementation:
|
| 358 |
+
# t = t0 + k*h
|
| 359 |
+
# x = tanh(pi/2 * sinh(t))
|
| 360 |
+
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
|
| 361 |
+
|
| 362 |
+
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
|
| 363 |
+
c = ctx.exp(a-b)
|
| 364 |
+
d = 1/c
|
| 365 |
+
co = (c+d)/2
|
| 366 |
+
si = (c-d)/2
|
| 367 |
+
x = si / co
|
| 368 |
+
w = (a+b) / co**2
|
| 369 |
+
diff = abs(x-1)
|
| 370 |
+
if diff <= tol:
|
| 371 |
+
break
|
| 372 |
+
|
| 373 |
+
nodes.append((x, w))
|
| 374 |
+
nodes.append((-x, w))
|
| 375 |
+
|
| 376 |
+
a *= udelta
|
| 377 |
+
b *= urdelta
|
| 378 |
+
|
| 379 |
+
if verbose and k % 300 == 150:
|
| 380 |
+
# Note: the number displayed is rather arbitrary. Should
|
| 381 |
+
# figure out how to print something that looks more like a
|
| 382 |
+
# percentage
|
| 383 |
+
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
|
| 384 |
+
|
| 385 |
+
ctx.prec -= extra
|
| 386 |
+
return nodes
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class GaussLegendre(QuadratureRule):
|
| 390 |
+
r"""
|
| 391 |
+
This class implements Gauss-Legendre quadrature, which is
|
| 392 |
+
exceptionally efficient for polynomials and polynomial-like (i.e.
|
| 393 |
+
very smooth) integrands.
|
| 394 |
+
|
| 395 |
+
The abscissas and weights are given by roots and values of
|
| 396 |
+
Legendre polynomials, which are the orthogonal polynomials
|
| 397 |
+
on `[-1, 1]` with respect to the unit weight
|
| 398 |
+
(see :func:`~mpmath.legendre`).
|
| 399 |
+
|
| 400 |
+
In this implementation, we take the "degree" `m` of the quadrature
|
| 401 |
+
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
|
| 402 |
+
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
|
| 403 |
+
than linear, convergence as the degree is incremented.
|
| 404 |
+
|
| 405 |
+
Comparison to tanh-sinh quadrature:
|
| 406 |
+
* Is faster for smooth integrands once nodes have been computed
|
| 407 |
+
* Initial computation of nodes is usually slower
|
| 408 |
+
* Handles endpoint singularities worse
|
| 409 |
+
* Handles infinite integration intervals worse
|
| 410 |
+
|
| 411 |
+
"""
|
| 412 |
+
|
| 413 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 414 |
+
r"""
|
| 415 |
+
Calculates the abscissas and weights for Gauss-Legendre
|
| 416 |
+
quadrature of degree of given degree (actually `3 \cdot 2^m`).
|
| 417 |
+
"""
|
| 418 |
+
ctx = self.ctx
|
| 419 |
+
# It is important that the epsilon is set lower than the
|
| 420 |
+
# "real" epsilon
|
| 421 |
+
epsilon = ctx.ldexp(1, -prec-8)
|
| 422 |
+
# Fairly high precision might be required for accurate
|
| 423 |
+
# evaluation of the roots
|
| 424 |
+
orig = ctx.prec
|
| 425 |
+
ctx.prec = int(prec*1.5)
|
| 426 |
+
if degree == 1:
|
| 427 |
+
x = ctx.sqrt(ctx.mpf(3)/5)
|
| 428 |
+
w = ctx.mpf(5)/9
|
| 429 |
+
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
|
| 430 |
+
ctx.prec = orig
|
| 431 |
+
return nodes
|
| 432 |
+
nodes = []
|
| 433 |
+
n = 3*2**(degree-1)
|
| 434 |
+
upto = n//2 + 1
|
| 435 |
+
for j in xrange(1, upto):
|
| 436 |
+
# Asymptotic formula for the roots
|
| 437 |
+
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
|
| 438 |
+
# Newton iteration
|
| 439 |
+
while 1:
|
| 440 |
+
t1, t2 = 1, 0
|
| 441 |
+
# Evaluates the Legendre polynomial using its defining
|
| 442 |
+
# recurrence relation
|
| 443 |
+
for j1 in xrange(1,n+1):
|
| 444 |
+
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
|
| 445 |
+
t4 = n*(r*t1-t2)/(r**2-1)
|
| 446 |
+
a = t1/t4
|
| 447 |
+
r = r - a
|
| 448 |
+
if abs(a) < epsilon:
|
| 449 |
+
break
|
| 450 |
+
x = r
|
| 451 |
+
w = 2/((1-r**2)*t4**2)
|
| 452 |
+
if verbose and j % 30 == 15:
|
| 453 |
+
print("Computing nodes (%i of %i)" % (j, upto))
|
| 454 |
+
nodes.append((x, w))
|
| 455 |
+
nodes.append((-x, w))
|
| 456 |
+
ctx.prec = orig
|
| 457 |
+
return nodes
|
| 458 |
+
|
| 459 |
+
class QuadratureMethods(object):
|
| 460 |
+
|
| 461 |
+
def __init__(ctx, *args, **kwargs):
|
| 462 |
+
ctx._gauss_legendre = GaussLegendre(ctx)
|
| 463 |
+
ctx._tanh_sinh = TanhSinh(ctx)
|
| 464 |
+
|
| 465 |
+
def quad(ctx, f, *points, **kwargs):
|
| 466 |
+
r"""
|
| 467 |
+
Computes a single, double or triple integral over a given
|
| 468 |
+
1D interval, 2D rectangle, or 3D cuboid. A basic example::
|
| 469 |
+
|
| 470 |
+
>>> from mpmath import *
|
| 471 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 472 |
+
>>> quad(sin, [0, pi])
|
| 473 |
+
2.0
|
| 474 |
+
|
| 475 |
+
A basic 2D integral::
|
| 476 |
+
|
| 477 |
+
>>> f = lambda x, y: cos(x+y/2)
|
| 478 |
+
>>> quad(f, [-pi/2, pi/2], [0, pi])
|
| 479 |
+
4.0
|
| 480 |
+
|
| 481 |
+
**Interval format**
|
| 482 |
+
|
| 483 |
+
The integration range for each dimension may be specified
|
| 484 |
+
using a list or tuple. Arguments are interpreted as follows:
|
| 485 |
+
|
| 486 |
+
``quad(f, [x1, x2])`` -- calculates
|
| 487 |
+
`\int_{x_1}^{x_2} f(x) \, dx`
|
| 488 |
+
|
| 489 |
+
``quad(f, [x1, x2], [y1, y2])`` -- calculates
|
| 490 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
|
| 491 |
+
|
| 492 |
+
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
|
| 493 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
|
| 494 |
+
\, dz \, dy \, dx`
|
| 495 |
+
|
| 496 |
+
Endpoints may be finite or infinite. An interval descriptor
|
| 497 |
+
may also contain more than two points. In this
|
| 498 |
+
case, the integration is split into subintervals, between
|
| 499 |
+
each pair of consecutive points. This is useful for
|
| 500 |
+
dealing with mid-interval discontinuities, or integrating
|
| 501 |
+
over large intervals where the function is irregular or
|
| 502 |
+
oscillates.
|
| 503 |
+
|
| 504 |
+
**Options**
|
| 505 |
+
|
| 506 |
+
:func:`~mpmath.quad` recognizes the following keyword arguments:
|
| 507 |
+
|
| 508 |
+
*method*
|
| 509 |
+
Chooses integration algorithm (described below).
|
| 510 |
+
*error*
|
| 511 |
+
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
|
| 512 |
+
integral and `e` is the estimated error.
|
| 513 |
+
*maxdegree*
|
| 514 |
+
Maximum degree of the quadrature rule to try before
|
| 515 |
+
quitting.
|
| 516 |
+
*verbose*
|
| 517 |
+
Print details about progress.
|
| 518 |
+
|
| 519 |
+
**Algorithms**
|
| 520 |
+
|
| 521 |
+
Mpmath presently implements two integration algorithms: tanh-sinh
|
| 522 |
+
quadrature and Gauss-Legendre quadrature. These can be selected
|
| 523 |
+
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
|
| 524 |
+
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
|
| 525 |
+
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
|
| 526 |
+
as shortcuts.
|
| 527 |
+
|
| 528 |
+
Both algorithms have the property that doubling the number of
|
| 529 |
+
evaluation points roughly doubles the accuracy, so both are ideal
|
| 530 |
+
for high precision quadrature (hundreds or thousands of digits).
|
| 531 |
+
|
| 532 |
+
At high precision, computing the nodes and weights for the
|
| 533 |
+
integration can be expensive (more expensive than computing the
|
| 534 |
+
function values). To make repeated integrations fast, nodes
|
| 535 |
+
are automatically cached.
|
| 536 |
+
|
| 537 |
+
The advantages of the tanh-sinh algorithm are that it tends to
|
| 538 |
+
handle endpoint singularities well, and that the nodes are cheap
|
| 539 |
+
to compute on the first run. For these reasons, it is used by
|
| 540 |
+
:func:`~mpmath.quad` as the default algorithm.
|
| 541 |
+
|
| 542 |
+
Gauss-Legendre quadrature often requires fewer function
|
| 543 |
+
evaluations, and is therefore often faster for repeated use, but
|
| 544 |
+
the algorithm does not handle endpoint singularities as well and
|
| 545 |
+
the nodes are more expensive to compute. Gauss-Legendre quadrature
|
| 546 |
+
can be a better choice if the integrand is smooth and repeated
|
| 547 |
+
integrations are required (e.g. for multiple integrals).
|
| 548 |
+
|
| 549 |
+
See the documentation for :class:`TanhSinh` and
|
| 550 |
+
:class:`GaussLegendre` for additional details.
|
| 551 |
+
|
| 552 |
+
**Examples of 1D integrals**
|
| 553 |
+
|
| 554 |
+
Intervals may be infinite or half-infinite. The following two
|
| 555 |
+
examples evaluate the limits of the inverse tangent function
|
| 556 |
+
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
|
| 557 |
+
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
|
| 558 |
+
|
| 559 |
+
>>> mp.dps = 15
|
| 560 |
+
>>> quad(lambda x: 2/(x**2+1), [0, inf])
|
| 561 |
+
3.14159265358979
|
| 562 |
+
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
|
| 563 |
+
3.14159265358979
|
| 564 |
+
|
| 565 |
+
Integrals can typically be resolved to high precision.
|
| 566 |
+
The following computes 50 digits of `\pi` by integrating the
|
| 567 |
+
area of the half-circle defined by `x^2 + y^2 \le 1`,
|
| 568 |
+
`-1 \le x \le 1`, `y \ge 0`::
|
| 569 |
+
|
| 570 |
+
>>> mp.dps = 50
|
| 571 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
|
| 572 |
+
3.1415926535897932384626433832795028841971693993751
|
| 573 |
+
|
| 574 |
+
One can just as well compute 1000 digits (output truncated)::
|
| 575 |
+
|
| 576 |
+
>>> mp.dps = 1000
|
| 577 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
|
| 578 |
+
3.141592653589793238462643383279502884...216420199
|
| 579 |
+
|
| 580 |
+
Complex integrals are supported. The following computes
|
| 581 |
+
a residue at `z = 0` by integrating counterclockwise along the
|
| 582 |
+
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
|
| 583 |
+
|
| 584 |
+
>>> mp.dps = 15
|
| 585 |
+
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
|
| 586 |
+
(0.0 + 6.28318530717959j)
|
| 587 |
+
|
| 588 |
+
**Examples of 2D and 3D integrals**
|
| 589 |
+
|
| 590 |
+
Here are several nice examples of analytically solvable
|
| 591 |
+
2D integrals (taken from MathWorld [1]) that can be evaluated
|
| 592 |
+
to high precision fairly rapidly by :func:`~mpmath.quad`::
|
| 593 |
+
|
| 594 |
+
>>> mp.dps = 30
|
| 595 |
+
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
|
| 596 |
+
>>> quad(f, [0, 1], [0, 1])
|
| 597 |
+
0.577215664901532860606512090082
|
| 598 |
+
>>> +euler
|
| 599 |
+
0.577215664901532860606512090082
|
| 600 |
+
|
| 601 |
+
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
|
| 602 |
+
>>> quad(f, [-1, 1], [-1, 1])
|
| 603 |
+
3.17343648530607134219175646705
|
| 604 |
+
>>> 4*log(2+sqrt(3))-2*pi/3
|
| 605 |
+
3.17343648530607134219175646705
|
| 606 |
+
|
| 607 |
+
>>> f = lambda x, y: 1/(1-x**2 * y**2)
|
| 608 |
+
>>> quad(f, [0, 1], [0, 1])
|
| 609 |
+
1.23370055013616982735431137498
|
| 610 |
+
>>> pi**2 / 8
|
| 611 |
+
1.23370055013616982735431137498
|
| 612 |
+
|
| 613 |
+
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
|
| 614 |
+
1.64493406684822643647241516665
|
| 615 |
+
>>> pi**2 / 6
|
| 616 |
+
1.64493406684822643647241516665
|
| 617 |
+
|
| 618 |
+
Multiple integrals may be done over infinite ranges::
|
| 619 |
+
|
| 620 |
+
>>> mp.dps = 15
|
| 621 |
+
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
|
| 622 |
+
0.367879441171442
|
| 623 |
+
>>> print(1/e)
|
| 624 |
+
0.367879441171442
|
| 625 |
+
|
| 626 |
+
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
|
| 627 |
+
For example, we can replicate the earlier example of calculating
|
| 628 |
+
`\pi` by integrating over the unit-circle, and actually use double
|
| 629 |
+
quadrature to actually measure the area circle::
|
| 630 |
+
|
| 631 |
+
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
|
| 632 |
+
>>> quad(f, [-1, 1])
|
| 633 |
+
3.14159265358979
|
| 634 |
+
|
| 635 |
+
Here is a simple triple integral::
|
| 636 |
+
|
| 637 |
+
>>> mp.dps = 15
|
| 638 |
+
>>> f = lambda x,y,z: x*y/(1+z)
|
| 639 |
+
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
|
| 640 |
+
0.101366277027041
|
| 641 |
+
>>> (log(3)-log(2))/4
|
| 642 |
+
0.101366277027041
|
| 643 |
+
|
| 644 |
+
**Singularities**
|
| 645 |
+
|
| 646 |
+
Both tanh-sinh and Gauss-Legendre quadrature are designed to
|
| 647 |
+
integrate smooth (infinitely differentiable) functions. Neither
|
| 648 |
+
algorithm copes well with mid-interval singularities (such as
|
| 649 |
+
mid-interval discontinuities in `f(x)` or `f'(x)`).
|
| 650 |
+
The best solution is to split the integral into parts::
|
| 651 |
+
|
| 652 |
+
>>> mp.dps = 15
|
| 653 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
|
| 654 |
+
3.99900894176779
|
| 655 |
+
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
|
| 656 |
+
4.0
|
| 657 |
+
|
| 658 |
+
The tanh-sinh rule often works well for integrands having a
|
| 659 |
+
singularity at one or both endpoints::
|
| 660 |
+
|
| 661 |
+
>>> mp.dps = 15
|
| 662 |
+
>>> quad(log, [0, 1], method='tanh-sinh') # Good
|
| 663 |
+
-1.0
|
| 664 |
+
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
|
| 665 |
+
-0.999932197413801
|
| 666 |
+
|
| 667 |
+
However, the result may still be inaccurate for some functions::
|
| 668 |
+
|
| 669 |
+
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
| 670 |
+
1.99999999946942
|
| 671 |
+
|
| 672 |
+
This problem is not due to the quadrature rule per se, but to
|
| 673 |
+
numerical amplification of errors in the nodes. The problem can be
|
| 674 |
+
circumvented by temporarily increasing the precision::
|
| 675 |
+
|
| 676 |
+
>>> mp.dps = 30
|
| 677 |
+
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
| 678 |
+
>>> mp.dps = 15
|
| 679 |
+
>>> +a
|
| 680 |
+
2.0
|
| 681 |
+
|
| 682 |
+
**Highly variable functions**
|
| 683 |
+
|
| 684 |
+
For functions that are smooth (in the sense of being infinitely
|
| 685 |
+
differentiable) but contain sharp mid-interval peaks or many
|
| 686 |
+
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
|
| 687 |
+
example, with default settings, :func:`~mpmath.quad` is able to integrate
|
| 688 |
+
`\sin(x)` accurately over an interval of length 100 but not over
|
| 689 |
+
length 1000::
|
| 690 |
+
|
| 691 |
+
>>> quad(sin, [0, 100]); 1-cos(100) # Good
|
| 692 |
+
0.137681127712316
|
| 693 |
+
0.137681127712316
|
| 694 |
+
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
|
| 695 |
+
-37.8587612408485
|
| 696 |
+
0.437620923709297
|
| 697 |
+
|
| 698 |
+
One solution is to break the integration into 10 intervals of
|
| 699 |
+
length 100::
|
| 700 |
+
|
| 701 |
+
>>> quad(sin, linspace(0, 1000, 10)) # Good
|
| 702 |
+
0.437620923709297
|
| 703 |
+
|
| 704 |
+
Another is to increase the degree of the quadrature::
|
| 705 |
+
|
| 706 |
+
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
|
| 707 |
+
0.437620923709297
|
| 708 |
+
|
| 709 |
+
Whether splitting the interval or increasing the degree is
|
| 710 |
+
more efficient differs from case to case. Another example is the
|
| 711 |
+
function `1/(1+x^2)`, which has a sharp peak centered around
|
| 712 |
+
`x = 0`::
|
| 713 |
+
|
| 714 |
+
>>> f = lambda x: 1/(1+x**2)
|
| 715 |
+
>>> quad(f, [-100, 100]) # Bad
|
| 716 |
+
3.64804647105268
|
| 717 |
+
>>> quad(f, [-100, 100], maxdegree=10) # Good
|
| 718 |
+
3.12159332021646
|
| 719 |
+
>>> quad(f, [-100, 0, 100]) # Also good
|
| 720 |
+
3.12159332021646
|
| 721 |
+
|
| 722 |
+
**References**
|
| 723 |
+
|
| 724 |
+
1. http://mathworld.wolfram.com/DoubleIntegral.html
|
| 725 |
+
|
| 726 |
+
"""
|
| 727 |
+
rule = kwargs.get('method', 'tanh-sinh')
|
| 728 |
+
if type(rule) is str:
|
| 729 |
+
if rule == 'tanh-sinh':
|
| 730 |
+
rule = ctx._tanh_sinh
|
| 731 |
+
elif rule == 'gauss-legendre':
|
| 732 |
+
rule = ctx._gauss_legendre
|
| 733 |
+
else:
|
| 734 |
+
raise ValueError("unknown quadrature rule: %s" % rule)
|
| 735 |
+
else:
|
| 736 |
+
rule = rule(ctx)
|
| 737 |
+
verbose = kwargs.get('verbose')
|
| 738 |
+
dim = len(points)
|
| 739 |
+
orig = prec = ctx.prec
|
| 740 |
+
epsilon = ctx.eps/8
|
| 741 |
+
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
|
| 742 |
+
points = [ctx._as_points(p) for p in points]
|
| 743 |
+
try:
|
| 744 |
+
ctx.prec += 20
|
| 745 |
+
if dim == 1:
|
| 746 |
+
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
|
| 747 |
+
elif dim == 2:
|
| 748 |
+
v, err = rule.summation(lambda x: \
|
| 749 |
+
rule.summation(lambda y: f(x,y), \
|
| 750 |
+
points[1], prec, epsilon, m)[0],
|
| 751 |
+
points[0], prec, epsilon, m, verbose)
|
| 752 |
+
elif dim == 3:
|
| 753 |
+
v, err = rule.summation(lambda x: \
|
| 754 |
+
rule.summation(lambda y: \
|
| 755 |
+
rule.summation(lambda z: f(x,y,z), \
|
| 756 |
+
points[2], prec, epsilon, m)[0],
|
| 757 |
+
points[1], prec, epsilon, m)[0],
|
| 758 |
+
points[0], prec, epsilon, m, verbose)
|
| 759 |
+
else:
|
| 760 |
+
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
|
| 761 |
+
finally:
|
| 762 |
+
ctx.prec = orig
|
| 763 |
+
if kwargs.get("error"):
|
| 764 |
+
return +v, err
|
| 765 |
+
return +v
|
| 766 |
+
|
| 767 |
+
def quadts(ctx, *args, **kwargs):
|
| 768 |
+
"""
|
| 769 |
+
Performs tanh-sinh quadrature. The call
|
| 770 |
+
|
| 771 |
+
quadts(func, *points, ...)
|
| 772 |
+
|
| 773 |
+
is simply a shortcut for:
|
| 774 |
+
|
| 775 |
+
quad(func, *points, ..., method=TanhSinh)
|
| 776 |
+
|
| 777 |
+
For example, a single integral and a double integral:
|
| 778 |
+
|
| 779 |
+
quadts(lambda x: exp(cos(x)), [0, 1])
|
| 780 |
+
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
| 781 |
+
|
| 782 |
+
See the documentation for quad for information about how points
|
| 783 |
+
arguments and keyword arguments are parsed.
|
| 784 |
+
|
| 785 |
+
See documentation for TanhSinh for algorithmic information about
|
| 786 |
+
tanh-sinh quadrature.
|
| 787 |
+
"""
|
| 788 |
+
kwargs['method'] = 'tanh-sinh'
|
| 789 |
+
return ctx.quad(*args, **kwargs)
|
| 790 |
+
|
| 791 |
+
def quadgl(ctx, *args, **kwargs):
|
| 792 |
+
"""
|
| 793 |
+
Performs Gauss-Legendre quadrature. The call
|
| 794 |
+
|
| 795 |
+
quadgl(func, *points, ...)
|
| 796 |
+
|
| 797 |
+
is simply a shortcut for:
|
| 798 |
+
|
| 799 |
+
quad(func, *points, ..., method=GaussLegendre)
|
| 800 |
+
|
| 801 |
+
For example, a single integral and a double integral:
|
| 802 |
+
|
| 803 |
+
quadgl(lambda x: exp(cos(x)), [0, 1])
|
| 804 |
+
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
| 805 |
+
|
| 806 |
+
See the documentation for quad for information about how points
|
| 807 |
+
arguments and keyword arguments are parsed.
|
| 808 |
+
|
| 809 |
+
See documentation for TanhSinh for algorithmic information about
|
| 810 |
+
tanh-sinh quadrature.
|
| 811 |
+
"""
|
| 812 |
+
kwargs['method'] = 'gauss-legendre'
|
| 813 |
+
return ctx.quad(*args, **kwargs)
|
| 814 |
+
|
| 815 |
+
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
|
| 816 |
+
r"""
|
| 817 |
+
Calculates
|
| 818 |
+
|
| 819 |
+
.. math ::
|
| 820 |
+
|
| 821 |
+
I = \int_a^b f(x) dx
|
| 822 |
+
|
| 823 |
+
where at least one of `a` and `b` is infinite and where
|
| 824 |
+
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
|
| 825 |
+
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
|
| 826 |
+
can also handle oscillatory integrals where the oscillation
|
| 827 |
+
rate is different from a pure sine or cosine wave.
|
| 828 |
+
|
| 829 |
+
In the standard case when `|a| < \infty, b = \infty`,
|
| 830 |
+
:func:`~mpmath.quadosc` works by evaluating the infinite series
|
| 831 |
+
|
| 832 |
+
.. math ::
|
| 833 |
+
|
| 834 |
+
I = \int_a^{x_1} f(x) dx +
|
| 835 |
+
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
|
| 836 |
+
|
| 837 |
+
where `x_k` are consecutive zeros (alternatively
|
| 838 |
+
some other periodic reference point) of `f(x)`.
|
| 839 |
+
Accordingly, :func:`~mpmath.quadosc` requires information about the
|
| 840 |
+
zeros of `f(x)`. For a periodic function, you can specify
|
| 841 |
+
the zeros by either providing the angular frequency `\omega`
|
| 842 |
+
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
|
| 843 |
+
specify the `n`-th zero by providing the *zeros* arguments.
|
| 844 |
+
Below is an example of each::
|
| 845 |
+
|
| 846 |
+
>>> from mpmath import *
|
| 847 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 848 |
+
>>> f = lambda x: sin(3*x)/(x**2+1)
|
| 849 |
+
>>> quadosc(f, [0,inf], omega=3)
|
| 850 |
+
0.37833007080198
|
| 851 |
+
>>> quadosc(f, [0,inf], period=2*pi/3)
|
| 852 |
+
0.37833007080198
|
| 853 |
+
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
|
| 854 |
+
0.37833007080198
|
| 855 |
+
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
|
| 856 |
+
0.37833007080198
|
| 857 |
+
|
| 858 |
+
Note that *zeros* was specified to multiply `n` by the
|
| 859 |
+
*half-period*, not the full period. In theory, it does not matter
|
| 860 |
+
whether each partial integral is done over a half period or a full
|
| 861 |
+
period. However, if done over half-periods, the infinite series
|
| 862 |
+
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
|
| 863 |
+
typically makes the extrapolation much more efficient.
|
| 864 |
+
|
| 865 |
+
Here is an example of an integration over the entire real line,
|
| 866 |
+
and a half-infinite integration starting at `-\infty`::
|
| 867 |
+
|
| 868 |
+
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
|
| 869 |
+
1.15572734979092
|
| 870 |
+
>>> pi/e
|
| 871 |
+
1.15572734979092
|
| 872 |
+
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
|
| 873 |
+
-0.0844109505595739
|
| 874 |
+
>>> cos(1)+si(1)-pi/2
|
| 875 |
+
-0.0844109505595738
|
| 876 |
+
|
| 877 |
+
Of course, the integrand may contain a complex exponential just as
|
| 878 |
+
well as a real sine or cosine::
|
| 879 |
+
|
| 880 |
+
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
|
| 881 |
+
(0.156410688228254 + 0.0j)
|
| 882 |
+
>>> pi/e**3
|
| 883 |
+
0.156410688228254
|
| 884 |
+
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
|
| 885 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
| 886 |
+
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
|
| 887 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
| 888 |
+
|
| 889 |
+
**Non-periodic functions**
|
| 890 |
+
|
| 891 |
+
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
|
| 892 |
+
strictly periodic, *omega* or *period* might not work, and it might
|
| 893 |
+
be necessary to use *zeros*.
|
| 894 |
+
|
| 895 |
+
A notable exception can be made for Bessel functions which, though not
|
| 896 |
+
periodic, are "asymptotically periodic" in a sufficiently strong sense
|
| 897 |
+
that the sum extrapolation will work out::
|
| 898 |
+
|
| 899 |
+
>>> quadosc(j0, [0, inf], period=2*pi)
|
| 900 |
+
1.0
|
| 901 |
+
>>> quadosc(j1, [0, inf], period=2*pi)
|
| 902 |
+
1.0
|
| 903 |
+
|
| 904 |
+
More properly, one should provide the exact Bessel function zeros::
|
| 905 |
+
|
| 906 |
+
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
|
| 907 |
+
>>> quadosc(j0, [0, inf], zeros=j0zero)
|
| 908 |
+
1.0
|
| 909 |
+
|
| 910 |
+
For an example where *zeros* becomes necessary, consider the
|
| 911 |
+
complete Fresnel integrals
|
| 912 |
+
|
| 913 |
+
.. math ::
|
| 914 |
+
|
| 915 |
+
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
|
| 916 |
+
= \sqrt{\frac{\pi}{8}}.
|
| 917 |
+
|
| 918 |
+
Although the integrands do not decrease in magnitude as
|
| 919 |
+
`x \to \infty`, the integrals are convergent since the oscillation
|
| 920 |
+
rate increases (causing consecutive periods to asymptotically
|
| 921 |
+
cancel out). These integrals are virtually impossible to calculate
|
| 922 |
+
to any kind of accuracy using standard quadrature rules. However,
|
| 923 |
+
if one provides the correct asymptotic distribution of zeros
|
| 924 |
+
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
|
| 925 |
+
|
| 926 |
+
>>> mp.dps = 30
|
| 927 |
+
>>> f = lambda x: cos(x**2)
|
| 928 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
| 929 |
+
0.626657068657750125603941321203
|
| 930 |
+
>>> f = lambda x: sin(x**2)
|
| 931 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
| 932 |
+
0.626657068657750125603941321203
|
| 933 |
+
>>> sqrt(pi/8)
|
| 934 |
+
0.626657068657750125603941321203
|
| 935 |
+
|
| 936 |
+
(Interestingly, these integrals can still be evaluated if one
|
| 937 |
+
places some other constant than `\pi` in the square root sign.)
|
| 938 |
+
|
| 939 |
+
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
|
| 940 |
+
the inverse-function distribution `h^{-1}(x)`::
|
| 941 |
+
|
| 942 |
+
>>> mp.dps = 15
|
| 943 |
+
>>> f = lambda x: sin(exp(x))
|
| 944 |
+
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
|
| 945 |
+
-0.25024394235267
|
| 946 |
+
>>> pi/2-si(e)
|
| 947 |
+
-0.250243942352671
|
| 948 |
+
|
| 949 |
+
**Non-alternating functions**
|
| 950 |
+
|
| 951 |
+
If the integrand oscillates around a positive value, without
|
| 952 |
+
alternating signs, the extrapolation might fail. A simple trick
|
| 953 |
+
that sometimes works is to multiply or divide the frequency by 2::
|
| 954 |
+
|
| 955 |
+
>>> f = lambda x: 1/x**2+sin(x)/x**4
|
| 956 |
+
>>> quadosc(f, [1,inf], omega=1) # Bad
|
| 957 |
+
1.28642190869861
|
| 958 |
+
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
|
| 959 |
+
1.28652953559617
|
| 960 |
+
>>> 1+(cos(1)+ci(1)+sin(1))/6
|
| 961 |
+
1.28652953559617
|
| 962 |
+
|
| 963 |
+
**Fast decay**
|
| 964 |
+
|
| 965 |
+
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
|
| 966 |
+
integrands. If the integrand decreases exponentially or faster,
|
| 967 |
+
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
|
| 968 |
+
much faster than :func:`~mpmath.quadosc`)::
|
| 969 |
+
|
| 970 |
+
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
|
| 971 |
+
0.5
|
| 972 |
+
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
|
| 973 |
+
0.5
|
| 974 |
+
|
| 975 |
+
"""
|
| 976 |
+
a, b = ctx._as_points(interval)
|
| 977 |
+
a = ctx.convert(a)
|
| 978 |
+
b = ctx.convert(b)
|
| 979 |
+
if [omega, period, zeros].count(None) != 2:
|
| 980 |
+
raise ValueError( \
|
| 981 |
+
"must specify exactly one of omega, period, zeros")
|
| 982 |
+
if a == ctx.ninf and b == ctx.inf:
|
| 983 |
+
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
|
| 984 |
+
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
|
| 985 |
+
return s1 + s2
|
| 986 |
+
if a == ctx.ninf:
|
| 987 |
+
if zeros:
|
| 988 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
|
| 989 |
+
else:
|
| 990 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
|
| 991 |
+
if b != ctx.inf:
|
| 992 |
+
raise ValueError("quadosc requires an infinite integration interval")
|
| 993 |
+
if not zeros:
|
| 994 |
+
if omega:
|
| 995 |
+
period = 2*ctx.pi/omega
|
| 996 |
+
zeros = lambda n: n*period/2
|
| 997 |
+
#for n in range(1,10):
|
| 998 |
+
# p = zeros(n)
|
| 999 |
+
# if p > a:
|
| 1000 |
+
# break
|
| 1001 |
+
#if n >= 9:
|
| 1002 |
+
# raise ValueError("zeros do not appear to be correctly indexed")
|
| 1003 |
+
n = 1
|
| 1004 |
+
s = ctx.quadgl(f, [a, zeros(n)])
|
| 1005 |
+
def term(k):
|
| 1006 |
+
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
|
| 1007 |
+
s += ctx.nsum(term, [n, ctx.inf])
|
| 1008 |
+
return s
|
| 1009 |
+
|
| 1010 |
+
def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
|
| 1011 |
+
"""
|
| 1012 |
+
Computes the integral of *f* over the interval or path specified
|
| 1013 |
+
by *interval*, using :func:`~mpmath.quad` together with adaptive
|
| 1014 |
+
subdivision of the interval.
|
| 1015 |
+
|
| 1016 |
+
This function gives an accurate answer for some integrals where
|
| 1017 |
+
:func:`~mpmath.quad` fails::
|
| 1018 |
+
|
| 1019 |
+
>>> from mpmath import *
|
| 1020 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 1021 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi])
|
| 1022 |
+
3.99900894176779
|
| 1023 |
+
>>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
|
| 1024 |
+
4.0
|
| 1025 |
+
>>> quadsubdiv(sin, [0, 1000])
|
| 1026 |
+
0.437620923709297
|
| 1027 |
+
>>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
|
| 1028 |
+
3.12159332021646
|
| 1029 |
+
>>> quadsubdiv(lambda x: ceil(x), [0, 100])
|
| 1030 |
+
5050.0
|
| 1031 |
+
>>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
|
| 1032 |
+
0.347400172657248
|
| 1033 |
+
|
| 1034 |
+
The argument *maxintervals* can be set to limit the permissible
|
| 1035 |
+
subdivision::
|
| 1036 |
+
|
| 1037 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
|
| 1038 |
+
(-5.40487904307774, 5.011)
|
| 1039 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
|
| 1040 |
+
(0.631417921866934, 1.10101120134116e-17)
|
| 1041 |
+
|
| 1042 |
+
Subdivision does not guarantee a correct answer since, the error
|
| 1043 |
+
estimate on subintervals may be inaccurate::
|
| 1044 |
+
|
| 1045 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
| 1046 |
+
(0.210802735500549, 1.0001111101e-17)
|
| 1047 |
+
>>> mp.dps = 20
|
| 1048 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
| 1049 |
+
(0.21080273550054927738, 2.200000001e-24)
|
| 1050 |
+
|
| 1051 |
+
The second answer is correct. We can get an accurate result at lower
|
| 1052 |
+
precision by forcing a finer initial subdivision::
|
| 1053 |
+
|
| 1054 |
+
>>> mp.dps = 15
|
| 1055 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
|
| 1056 |
+
0.210802735500549
|
| 1057 |
+
|
| 1058 |
+
The following integral is too oscillatory for convergence, but we can get a
|
| 1059 |
+
reasonable estimate::
|
| 1060 |
+
|
| 1061 |
+
>>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
|
| 1062 |
+
>>> round(v, 6), round(err, 6)
|
| 1063 |
+
(0.504067, 1e-06)
|
| 1064 |
+
>>> sin(1) - ci(1)
|
| 1065 |
+
0.504067061906928
|
| 1066 |
+
|
| 1067 |
+
"""
|
| 1068 |
+
queue = []
|
| 1069 |
+
for i in range(len(interval)-1):
|
| 1070 |
+
queue.append((interval[i], interval[i+1]))
|
| 1071 |
+
total = ctx.zero
|
| 1072 |
+
total_error = ctx.zero
|
| 1073 |
+
if maxintervals is None:
|
| 1074 |
+
maxintervals = 10 * ctx.prec
|
| 1075 |
+
count = 0
|
| 1076 |
+
quad_args = kwargs.copy()
|
| 1077 |
+
quad_args["verbose"] = False
|
| 1078 |
+
quad_args["error"] = True
|
| 1079 |
+
if tol is None:
|
| 1080 |
+
tol = +ctx.eps
|
| 1081 |
+
orig = ctx.prec
|
| 1082 |
+
try:
|
| 1083 |
+
ctx.prec += 5
|
| 1084 |
+
while queue:
|
| 1085 |
+
a, b = queue.pop()
|
| 1086 |
+
s, err = ctx.quad(f, [a, b], **quad_args)
|
| 1087 |
+
if kwargs.get("verbose"):
|
| 1088 |
+
print("subinterval", count, a, b, err)
|
| 1089 |
+
if err < tol or count > maxintervals:
|
| 1090 |
+
total += s
|
| 1091 |
+
total_error += err
|
| 1092 |
+
else:
|
| 1093 |
+
count += 1
|
| 1094 |
+
if count == maxintervals and kwargs.get("verbose"):
|
| 1095 |
+
print("warning: number of intervals exceeded maxintervals")
|
| 1096 |
+
if a == -ctx.inf and b == ctx.inf:
|
| 1097 |
+
m = 0
|
| 1098 |
+
elif a == -ctx.inf:
|
| 1099 |
+
m = min(b-1, 2*b)
|
| 1100 |
+
elif b == ctx.inf:
|
| 1101 |
+
m = max(a+1, 2*a)
|
| 1102 |
+
else:
|
| 1103 |
+
m = a + (b - a) / 2
|
| 1104 |
+
queue.append((a, m))
|
| 1105 |
+
queue.append((m, b))
|
| 1106 |
+
finally:
|
| 1107 |
+
ctx.prec = orig
|
| 1108 |
+
if kwargs.get("error"):
|
| 1109 |
+
return +total, +total_error
|
| 1110 |
+
else:
|
| 1111 |
+
return +total
|
| 1112 |
+
|
| 1113 |
+
if __name__ == '__main__':
|
| 1114 |
+
import doctest
|
| 1115 |
+
doctest.testmod()
|
vllm/lib/python3.10/site-packages/mpmath/libmp/__init__.py
ADDED
|
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .libmpf import (prec_to_dps, dps_to_prec, repr_dps,
|
| 2 |
+
round_down, round_up, round_floor, round_ceiling, round_nearest,
|
| 3 |
+
to_pickable, from_pickable, ComplexResult,
|
| 4 |
+
fzero, fnzero, fone, fnone, ftwo, ften, fhalf, fnan, finf, fninf,
|
| 5 |
+
math_float_inf, round_int, normalize, normalize1,
|
| 6 |
+
from_man_exp, from_int, to_man_exp, to_int, mpf_ceil, mpf_floor,
|
| 7 |
+
mpf_nint, mpf_frac,
|
| 8 |
+
from_float, from_npfloat, from_Decimal, to_float, from_rational, to_rational, to_fixed,
|
| 9 |
+
mpf_rand, mpf_eq, mpf_hash, mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_ge,
|
| 10 |
+
mpf_pos, mpf_neg, mpf_abs, mpf_sign, mpf_add, mpf_sub, mpf_sum,
|
| 11 |
+
mpf_mul, mpf_mul_int, mpf_shift, mpf_frexp,
|
| 12 |
+
mpf_div, mpf_rdiv_int, mpf_mod, mpf_pow_int,
|
| 13 |
+
mpf_perturb,
|
| 14 |
+
to_digits_exp, to_str, str_to_man_exp, from_str, from_bstr, to_bstr,
|
| 15 |
+
mpf_sqrt, mpf_hypot)
|
| 16 |
+
|
| 17 |
+
from .libmpc import (mpc_one, mpc_zero, mpc_two, mpc_half,
|
| 18 |
+
mpc_is_inf, mpc_is_infnan, mpc_to_str, mpc_to_complex, mpc_hash,
|
| 19 |
+
mpc_conjugate, mpc_is_nonzero, mpc_add, mpc_add_mpf,
|
| 20 |
+
mpc_sub, mpc_sub_mpf, mpc_pos, mpc_neg, mpc_shift, mpc_abs,
|
| 21 |
+
mpc_arg, mpc_floor, mpc_ceil, mpc_nint, mpc_frac, mpc_mul, mpc_square,
|
| 22 |
+
mpc_mul_mpf, mpc_mul_imag_mpf, mpc_mul_int,
|
| 23 |
+
mpc_div, mpc_div_mpf, mpc_reciprocal, mpc_mpf_div,
|
| 24 |
+
complex_int_pow, mpc_pow, mpc_pow_mpf, mpc_pow_int,
|
| 25 |
+
mpc_sqrt, mpc_nthroot, mpc_cbrt, mpc_exp, mpc_log, mpc_cos, mpc_sin,
|
| 26 |
+
mpc_tan, mpc_cos_pi, mpc_sin_pi, mpc_cosh, mpc_sinh, mpc_tanh,
|
| 27 |
+
mpc_atan, mpc_acos, mpc_asin, mpc_asinh, mpc_acosh, mpc_atanh,
|
| 28 |
+
mpc_fibonacci, mpf_expj, mpf_expjpi, mpc_expj, mpc_expjpi,
|
| 29 |
+
mpc_cos_sin, mpc_cos_sin_pi)
|
| 30 |
+
|
| 31 |
+
from .libelefun import (ln2_fixed, mpf_ln2, ln10_fixed, mpf_ln10,
|
| 32 |
+
pi_fixed, mpf_pi, e_fixed, mpf_e, phi_fixed, mpf_phi,
|
| 33 |
+
degree_fixed, mpf_degree,
|
| 34 |
+
mpf_pow, mpf_nthroot, mpf_cbrt, log_int_fixed, agm_fixed,
|
| 35 |
+
mpf_log, mpf_log_hypot, mpf_exp, mpf_cos_sin, mpf_cos, mpf_sin, mpf_tan,
|
| 36 |
+
mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, mpf_cosh_sinh,
|
| 37 |
+
mpf_cosh, mpf_sinh, mpf_tanh, mpf_atan, mpf_atan2, mpf_asin,
|
| 38 |
+
mpf_acos, mpf_asinh, mpf_acosh, mpf_atanh, mpf_fibonacci)
|
| 39 |
+
|
| 40 |
+
from .libhyper import (NoConvergence, make_hyp_summator,
|
| 41 |
+
mpf_erf, mpf_erfc, mpf_ei, mpc_ei, mpf_e1, mpc_e1, mpf_expint,
|
| 42 |
+
mpf_ci_si, mpf_ci, mpf_si, mpc_ci, mpc_si, mpf_besseljn,
|
| 43 |
+
mpc_besseljn, mpf_agm, mpf_agm1, mpc_agm, mpc_agm1,
|
| 44 |
+
mpf_ellipk, mpc_ellipk, mpf_ellipe, mpc_ellipe)
|
| 45 |
+
|
| 46 |
+
from .gammazeta import (catalan_fixed, mpf_catalan,
|
| 47 |
+
khinchin_fixed, mpf_khinchin, glaisher_fixed, mpf_glaisher,
|
| 48 |
+
apery_fixed, mpf_apery, euler_fixed, mpf_euler, mertens_fixed,
|
| 49 |
+
mpf_mertens, twinprime_fixed, mpf_twinprime,
|
| 50 |
+
mpf_bernoulli, bernfrac, mpf_gamma_int,
|
| 51 |
+
mpf_factorial, mpc_factorial, mpf_gamma, mpc_gamma,
|
| 52 |
+
mpf_loggamma, mpc_loggamma, mpf_rgamma, mpc_rgamma,
|
| 53 |
+
mpf_harmonic, mpc_harmonic, mpf_psi0, mpc_psi0,
|
| 54 |
+
mpf_psi, mpc_psi, mpf_zeta_int, mpf_zeta, mpc_zeta,
|
| 55 |
+
mpf_altzeta, mpc_altzeta, mpf_zetasum, mpc_zetasum)
|
| 56 |
+
|
| 57 |
+
from .libmpi import (mpi_str,
|
| 58 |
+
mpi_from_str, mpi_to_str,
|
| 59 |
+
mpi_eq, mpi_ne,
|
| 60 |
+
mpi_lt, mpi_le, mpi_gt, mpi_ge,
|
| 61 |
+
mpi_add, mpi_sub, mpi_delta, mpi_mid,
|
| 62 |
+
mpi_pos, mpi_neg, mpi_abs, mpi_mul, mpi_div, mpi_exp,
|
| 63 |
+
mpi_log, mpi_sqrt, mpi_pow_int, mpi_pow, mpi_cos_sin,
|
| 64 |
+
mpi_cos, mpi_sin, mpi_tan, mpi_cot,
|
| 65 |
+
mpi_atan, mpi_atan2,
|
| 66 |
+
mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow,
|
| 67 |
+
mpci_abs, mpci_pow, mpci_exp, mpci_log, mpci_cos, mpci_sin,
|
| 68 |
+
mpi_gamma, mpci_gamma, mpi_loggamma, mpci_loggamma,
|
| 69 |
+
mpi_rgamma, mpci_rgamma, mpi_factorial, mpci_factorial)
|
| 70 |
+
|
| 71 |
+
from .libintmath import (trailing, bitcount, numeral, bin_to_radix,
|
| 72 |
+
isqrt, isqrt_small, isqrt_fast, sqrt_fixed, sqrtrem, ifib, ifac,
|
| 73 |
+
list_primes, isprime, moebius, gcd, eulernum, stirling1, stirling2)
|
| 74 |
+
|
| 75 |
+
from .backend import (gmpy, sage, BACKEND, STRICT, MPZ, MPZ_TYPE,
|
| 76 |
+
MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_THREE, MPZ_FIVE, int_types,
|
| 77 |
+
HASH_MODULUS, HASH_BITS)
|
vllm/lib/python3.10/site-packages/mpmath/libmp/__pycache__/__init__.cpython-310.pyc
ADDED
|
Binary file (6.96 kB). View file
|
|
|