Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_fp.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_iv.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/identification.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/math2.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/rational.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/usertools.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/__pycache__/visualization.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/calculus/differentiation.py +647 -0
- .venv/lib/python3.11/site-packages/mpmath/calculus/odes.py +288 -0
- .venv/lib/python3.11/site-packages/mpmath/calculus/optimization.py +1102 -0
- .venv/lib/python3.11/site-packages/mpmath/calculus/quadrature.py +1115 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/bessel.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/elliptic.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/factorials.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/functions.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/rszeta.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/zeta.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/libmp/libelefun.py +1428 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__init__.py +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/__init__.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_convert.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_diff.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_division.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_functions.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_identify.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_interval.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_levin.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_ode.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_power.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_quad.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_str.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_summation.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_trig.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/torture.cpython-311.pyc +0 -0
- .venv/lib/python3.11/site-packages/mpmath/tests/extratest_gamma.py +215 -0
.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_fp.cpython-311.pyc
ADDED
|
Binary file (13 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_iv.cpython-311.pyc
ADDED
|
Binary file (38.7 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_mp_python.cpython-311.pyc
ADDED
|
Binary file (61.1 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/__pycache__/identification.cpython-311.pyc
ADDED
|
Binary file (41.3 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/__pycache__/math2.cpython-311.pyc
ADDED
|
Binary file (28.5 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/__pycache__/rational.cpython-311.pyc
ADDED
|
Binary file (10.4 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/__pycache__/usertools.cpython-311.pyc
ADDED
|
Binary file (4.91 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/__pycache__/visualization.cpython-311.pyc
ADDED
|
Binary file (17.1 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/calculus/differentiation.py
ADDED
|
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..libmp.backend import xrange
|
| 2 |
+
from .calculus import defun
|
| 3 |
+
|
| 4 |
+
try:
|
| 5 |
+
iteritems = dict.iteritems
|
| 6 |
+
except AttributeError:
|
| 7 |
+
iteritems = dict.items
|
| 8 |
+
|
| 9 |
+
#----------------------------------------------------------------------------#
|
| 10 |
+
# Differentiation #
|
| 11 |
+
#----------------------------------------------------------------------------#
|
| 12 |
+
|
| 13 |
+
@defun
|
| 14 |
+
def difference(ctx, s, n):
|
| 15 |
+
r"""
|
| 16 |
+
Given a sequence `(s_k)` containing at least `n+1` items, returns the
|
| 17 |
+
`n`-th forward difference,
|
| 18 |
+
|
| 19 |
+
.. math ::
|
| 20 |
+
|
| 21 |
+
\Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
|
| 22 |
+
"""
|
| 23 |
+
n = int(n)
|
| 24 |
+
d = ctx.zero
|
| 25 |
+
b = (-1) ** (n & 1)
|
| 26 |
+
for k in xrange(n+1):
|
| 27 |
+
d += b * s[k]
|
| 28 |
+
b = (b * (k-n)) // (k+1)
|
| 29 |
+
return d
|
| 30 |
+
|
| 31 |
+
def hsteps(ctx, f, x, n, prec, **options):
|
| 32 |
+
singular = options.get('singular')
|
| 33 |
+
addprec = options.get('addprec', 10)
|
| 34 |
+
direction = options.get('direction', 0)
|
| 35 |
+
workprec = (prec+2*addprec) * (n+1)
|
| 36 |
+
orig = ctx.prec
|
| 37 |
+
try:
|
| 38 |
+
ctx.prec = workprec
|
| 39 |
+
h = options.get('h')
|
| 40 |
+
if h is None:
|
| 41 |
+
if options.get('relative'):
|
| 42 |
+
hextramag = int(ctx.mag(x))
|
| 43 |
+
else:
|
| 44 |
+
hextramag = 0
|
| 45 |
+
h = ctx.ldexp(1, -prec-addprec-hextramag)
|
| 46 |
+
else:
|
| 47 |
+
h = ctx.convert(h)
|
| 48 |
+
# Directed: steps x, x+h, ... x+n*h
|
| 49 |
+
direction = options.get('direction', 0)
|
| 50 |
+
if direction:
|
| 51 |
+
h *= ctx.sign(direction)
|
| 52 |
+
steps = xrange(n+1)
|
| 53 |
+
norm = h
|
| 54 |
+
# Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
|
| 55 |
+
else:
|
| 56 |
+
steps = xrange(-n, n+1, 2)
|
| 57 |
+
norm = (2*h)
|
| 58 |
+
# Perturb
|
| 59 |
+
if singular:
|
| 60 |
+
x += 0.5*h
|
| 61 |
+
values = [f(x+k*h) for k in steps]
|
| 62 |
+
return values, norm, workprec
|
| 63 |
+
finally:
|
| 64 |
+
ctx.prec = orig
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@defun
|
| 68 |
+
def diff(ctx, f, x, n=1, **options):
|
| 69 |
+
r"""
|
| 70 |
+
Numerically computes the derivative of `f`, `f'(x)`, or generally for
|
| 71 |
+
an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
|
| 72 |
+
A few basic examples are::
|
| 73 |
+
|
| 74 |
+
>>> from mpmath import *
|
| 75 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 76 |
+
>>> diff(lambda x: x**2 + x, 1.0)
|
| 77 |
+
3.0
|
| 78 |
+
>>> diff(lambda x: x**2 + x, 1.0, 2)
|
| 79 |
+
2.0
|
| 80 |
+
>>> diff(lambda x: x**2 + x, 1.0, 3)
|
| 81 |
+
0.0
|
| 82 |
+
>>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
|
| 83 |
+
[20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
|
| 84 |
+
|
| 85 |
+
Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
|
| 86 |
+
and order `(n_1, \ldots, n_k)`, the partial derivative
|
| 87 |
+
`f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
|
| 88 |
+
|
| 89 |
+
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
|
| 90 |
+
2.75
|
| 91 |
+
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
|
| 92 |
+
3.0
|
| 93 |
+
|
| 94 |
+
**Options**
|
| 95 |
+
|
| 96 |
+
The following optional keyword arguments are recognized:
|
| 97 |
+
|
| 98 |
+
``method``
|
| 99 |
+
Supported methods are ``'step'`` or ``'quad'``: derivatives may be
|
| 100 |
+
computed using either a finite difference with a small step
|
| 101 |
+
size `h` (default), or numerical quadrature.
|
| 102 |
+
``direction``
|
| 103 |
+
Direction of finite difference: can be -1 for a left
|
| 104 |
+
difference, 0 for a central difference (default), or +1
|
| 105 |
+
for a right difference; more generally can be any complex number.
|
| 106 |
+
``addprec``
|
| 107 |
+
Extra precision for `h` used to account for the function's
|
| 108 |
+
sensitivity to perturbations (default = 10).
|
| 109 |
+
``relative``
|
| 110 |
+
Choose `h` relative to the magnitude of `x`, rather than an
|
| 111 |
+
absolute value; useful for large or tiny `x` (default = False).
|
| 112 |
+
``h``
|
| 113 |
+
As an alternative to ``addprec`` and ``relative``, manually
|
| 114 |
+
select the step size `h`.
|
| 115 |
+
``singular``
|
| 116 |
+
If True, evaluation exactly at the point `x` is avoided; this is
|
| 117 |
+
useful for differentiating functions with removable singularities.
|
| 118 |
+
Default = False.
|
| 119 |
+
``radius``
|
| 120 |
+
Radius of integration contour (with ``method = 'quad'``).
|
| 121 |
+
Default = 0.25. A larger radius typically is faster and more
|
| 122 |
+
accurate, but it must be chosen so that `f` has no
|
| 123 |
+
singularities within the radius from the evaluation point.
|
| 124 |
+
|
| 125 |
+
A finite difference requires `n+1` function evaluations and must be
|
| 126 |
+
performed at `(n+1)` times the target precision. Accordingly, `f` must
|
| 127 |
+
support fast evaluation at high precision.
|
| 128 |
+
|
| 129 |
+
With integration, a larger number of function evaluations is
|
| 130 |
+
required, but not much extra precision is required. For high order
|
| 131 |
+
derivatives, this method may thus be faster if f is very expensive to
|
| 132 |
+
evaluate at high precision.
|
| 133 |
+
|
| 134 |
+
**Further examples**
|
| 135 |
+
|
| 136 |
+
The direction option is useful for computing left- or right-sided
|
| 137 |
+
derivatives of nonsmooth functions::
|
| 138 |
+
|
| 139 |
+
>>> diff(abs, 0, direction=0)
|
| 140 |
+
0.0
|
| 141 |
+
>>> diff(abs, 0, direction=1)
|
| 142 |
+
1.0
|
| 143 |
+
>>> diff(abs, 0, direction=-1)
|
| 144 |
+
-1.0
|
| 145 |
+
|
| 146 |
+
More generally, if the direction is nonzero, a right difference
|
| 147 |
+
is computed where the step size is multiplied by sign(direction).
|
| 148 |
+
For example, with direction=+j, the derivative from the positive
|
| 149 |
+
imaginary direction will be computed::
|
| 150 |
+
|
| 151 |
+
>>> diff(abs, 0, direction=j)
|
| 152 |
+
(0.0 - 1.0j)
|
| 153 |
+
|
| 154 |
+
With integration, the result may have a small imaginary part
|
| 155 |
+
even even if the result is purely real::
|
| 156 |
+
|
| 157 |
+
>>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
|
| 158 |
+
(0.5 - 4.59...e-26j)
|
| 159 |
+
>>> chop(_)
|
| 160 |
+
0.5
|
| 161 |
+
|
| 162 |
+
Adding precision to obtain an accurate value::
|
| 163 |
+
|
| 164 |
+
>>> diff(cos, 1e-30)
|
| 165 |
+
0.0
|
| 166 |
+
>>> diff(cos, 1e-30, h=0.0001)
|
| 167 |
+
-9.99999998328279e-31
|
| 168 |
+
>>> diff(cos, 1e-30, addprec=100)
|
| 169 |
+
-1.0e-30
|
| 170 |
+
|
| 171 |
+
"""
|
| 172 |
+
partial = False
|
| 173 |
+
try:
|
| 174 |
+
orders = list(n)
|
| 175 |
+
x = list(x)
|
| 176 |
+
partial = True
|
| 177 |
+
except TypeError:
|
| 178 |
+
pass
|
| 179 |
+
if partial:
|
| 180 |
+
x = [ctx.convert(_) for _ in x]
|
| 181 |
+
return _partial_diff(ctx, f, x, orders, options)
|
| 182 |
+
method = options.get('method', 'step')
|
| 183 |
+
if n == 0 and method != 'quad' and not options.get('singular'):
|
| 184 |
+
return f(ctx.convert(x))
|
| 185 |
+
prec = ctx.prec
|
| 186 |
+
try:
|
| 187 |
+
if method == 'step':
|
| 188 |
+
values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
|
| 189 |
+
ctx.prec = workprec
|
| 190 |
+
v = ctx.difference(values, n) / norm**n
|
| 191 |
+
elif method == 'quad':
|
| 192 |
+
ctx.prec += 10
|
| 193 |
+
radius = ctx.convert(options.get('radius', 0.25))
|
| 194 |
+
def g(t):
|
| 195 |
+
rei = radius*ctx.expj(t)
|
| 196 |
+
z = x + rei
|
| 197 |
+
return f(z) / rei**n
|
| 198 |
+
d = ctx.quadts(g, [0, 2*ctx.pi])
|
| 199 |
+
v = d * ctx.factorial(n) / (2*ctx.pi)
|
| 200 |
+
else:
|
| 201 |
+
raise ValueError("unknown method: %r" % method)
|
| 202 |
+
finally:
|
| 203 |
+
ctx.prec = prec
|
| 204 |
+
return +v
|
| 205 |
+
|
| 206 |
+
def _partial_diff(ctx, f, xs, orders, options):
|
| 207 |
+
if not orders:
|
| 208 |
+
return f()
|
| 209 |
+
if not sum(orders):
|
| 210 |
+
return f(*xs)
|
| 211 |
+
i = 0
|
| 212 |
+
for i in range(len(orders)):
|
| 213 |
+
if orders[i]:
|
| 214 |
+
break
|
| 215 |
+
order = orders[i]
|
| 216 |
+
def fdiff_inner(*f_args):
|
| 217 |
+
def inner(t):
|
| 218 |
+
return f(*(f_args[:i] + (t,) + f_args[i+1:]))
|
| 219 |
+
return ctx.diff(inner, f_args[i], order, **options)
|
| 220 |
+
orders[i] = 0
|
| 221 |
+
return _partial_diff(ctx, fdiff_inner, xs, orders, options)
|
| 222 |
+
|
| 223 |
+
@defun
|
| 224 |
+
def diffs(ctx, f, x, n=None, **options):
|
| 225 |
+
r"""
|
| 226 |
+
Returns a generator that yields the sequence of derivatives
|
| 227 |
+
|
| 228 |
+
.. math ::
|
| 229 |
+
|
| 230 |
+
f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
|
| 231 |
+
|
| 232 |
+
With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
|
| 233 |
+
function evaluations to generate the first `k` derivatives,
|
| 234 |
+
rather than the roughly `O(k^2)` evaluations
|
| 235 |
+
required if one calls :func:`~mpmath.diff` `k` separate times.
|
| 236 |
+
|
| 237 |
+
With `n < \infty`, the generator stops as soon as the
|
| 238 |
+
`n`-th derivative has been generated. If the exact number of
|
| 239 |
+
needed derivatives is known in advance, this is further
|
| 240 |
+
slightly more efficient.
|
| 241 |
+
|
| 242 |
+
Options are the same as for :func:`~mpmath.diff`.
|
| 243 |
+
|
| 244 |
+
**Examples**
|
| 245 |
+
|
| 246 |
+
>>> from mpmath import *
|
| 247 |
+
>>> mp.dps = 15
|
| 248 |
+
>>> nprint(list(diffs(cos, 1, 5)))
|
| 249 |
+
[0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
|
| 250 |
+
>>> for i, d in zip(range(6), diffs(cos, 1)):
|
| 251 |
+
... print("%s %s" % (i, d))
|
| 252 |
+
...
|
| 253 |
+
0 0.54030230586814
|
| 254 |
+
1 -0.841470984807897
|
| 255 |
+
2 -0.54030230586814
|
| 256 |
+
3 0.841470984807897
|
| 257 |
+
4 0.54030230586814
|
| 258 |
+
5 -0.841470984807897
|
| 259 |
+
|
| 260 |
+
"""
|
| 261 |
+
if n is None:
|
| 262 |
+
n = ctx.inf
|
| 263 |
+
else:
|
| 264 |
+
n = int(n)
|
| 265 |
+
if options.get('method', 'step') != 'step':
|
| 266 |
+
k = 0
|
| 267 |
+
while k < n + 1:
|
| 268 |
+
yield ctx.diff(f, x, k, **options)
|
| 269 |
+
k += 1
|
| 270 |
+
return
|
| 271 |
+
singular = options.get('singular')
|
| 272 |
+
if singular:
|
| 273 |
+
yield ctx.diff(f, x, 0, singular=True)
|
| 274 |
+
else:
|
| 275 |
+
yield f(ctx.convert(x))
|
| 276 |
+
if n < 1:
|
| 277 |
+
return
|
| 278 |
+
if n == ctx.inf:
|
| 279 |
+
A, B = 1, 2
|
| 280 |
+
else:
|
| 281 |
+
A, B = 1, n+1
|
| 282 |
+
while 1:
|
| 283 |
+
callprec = ctx.prec
|
| 284 |
+
y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
|
| 285 |
+
for k in xrange(A, B):
|
| 286 |
+
try:
|
| 287 |
+
ctx.prec = workprec
|
| 288 |
+
d = ctx.difference(y, k) / norm**k
|
| 289 |
+
finally:
|
| 290 |
+
ctx.prec = callprec
|
| 291 |
+
yield +d
|
| 292 |
+
if k >= n:
|
| 293 |
+
return
|
| 294 |
+
A, B = B, int(A*1.4+1)
|
| 295 |
+
B = min(B, n)
|
| 296 |
+
|
| 297 |
+
def iterable_to_function(gen):
|
| 298 |
+
gen = iter(gen)
|
| 299 |
+
data = []
|
| 300 |
+
def f(k):
|
| 301 |
+
for i in xrange(len(data), k+1):
|
| 302 |
+
data.append(next(gen))
|
| 303 |
+
return data[k]
|
| 304 |
+
return f
|
| 305 |
+
|
| 306 |
+
@defun
|
| 307 |
+
def diffs_prod(ctx, factors):
|
| 308 |
+
r"""
|
| 309 |
+
Given a list of `N` iterables or generators yielding
|
| 310 |
+
`f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
|
| 311 |
+
generate `g(x), g'(x), g''(x), \ldots` where
|
| 312 |
+
`g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
|
| 313 |
+
|
| 314 |
+
At high precision and for large orders, this is typically more efficient
|
| 315 |
+
than numerical differentiation if the derivatives of each `f_k(x)`
|
| 316 |
+
admit direct computation.
|
| 317 |
+
|
| 318 |
+
Note: This function does not increase the working precision internally,
|
| 319 |
+
so guard digits may have to be added externally for full accuracy.
|
| 320 |
+
|
| 321 |
+
**Examples**
|
| 322 |
+
|
| 323 |
+
>>> from mpmath import *
|
| 324 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 325 |
+
>>> f = lambda x: exp(x)*cos(x)*sin(x)
|
| 326 |
+
>>> u = diffs(f, 1)
|
| 327 |
+
>>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
|
| 328 |
+
>>> next(u); next(v)
|
| 329 |
+
1.23586333600241
|
| 330 |
+
1.23586333600241
|
| 331 |
+
>>> next(u); next(v)
|
| 332 |
+
0.104658952245596
|
| 333 |
+
0.104658952245596
|
| 334 |
+
>>> next(u); next(v)
|
| 335 |
+
-5.96999877552086
|
| 336 |
+
-5.96999877552086
|
| 337 |
+
>>> next(u); next(v)
|
| 338 |
+
-12.4632923122697
|
| 339 |
+
-12.4632923122697
|
| 340 |
+
|
| 341 |
+
"""
|
| 342 |
+
N = len(factors)
|
| 343 |
+
if N == 1:
|
| 344 |
+
for c in factors[0]:
|
| 345 |
+
yield c
|
| 346 |
+
else:
|
| 347 |
+
u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
|
| 348 |
+
v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
|
| 349 |
+
n = 0
|
| 350 |
+
while 1:
|
| 351 |
+
#yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
|
| 352 |
+
s = u(n) * v(0)
|
| 353 |
+
a = 1
|
| 354 |
+
for k in xrange(1,n+1):
|
| 355 |
+
a = a * (n-k+1) // k
|
| 356 |
+
s += a * u(n-k) * v(k)
|
| 357 |
+
yield s
|
| 358 |
+
n += 1
|
| 359 |
+
|
| 360 |
+
def dpoly(n, _cache={}):
|
| 361 |
+
"""
|
| 362 |
+
nth differentiation polynomial for exp (Faa di Bruno's formula).
|
| 363 |
+
|
| 364 |
+
TODO: most exponents are zero, so maybe a sparse representation
|
| 365 |
+
would be better.
|
| 366 |
+
"""
|
| 367 |
+
if n in _cache:
|
| 368 |
+
return _cache[n]
|
| 369 |
+
if not _cache:
|
| 370 |
+
_cache[0] = {(0,):1}
|
| 371 |
+
R = dpoly(n-1)
|
| 372 |
+
R = dict((c+(0,),v) for (c,v) in iteritems(R))
|
| 373 |
+
Ra = {}
|
| 374 |
+
for powers, count in iteritems(R):
|
| 375 |
+
powers1 = (powers[0]+1,) + powers[1:]
|
| 376 |
+
if powers1 in Ra:
|
| 377 |
+
Ra[powers1] += count
|
| 378 |
+
else:
|
| 379 |
+
Ra[powers1] = count
|
| 380 |
+
for powers, count in iteritems(R):
|
| 381 |
+
if not sum(powers):
|
| 382 |
+
continue
|
| 383 |
+
for k,p in enumerate(powers):
|
| 384 |
+
if p:
|
| 385 |
+
powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
|
| 386 |
+
if powers2 in Ra:
|
| 387 |
+
Ra[powers2] += p*count
|
| 388 |
+
else:
|
| 389 |
+
Ra[powers2] = p*count
|
| 390 |
+
_cache[n] = Ra
|
| 391 |
+
return _cache[n]
|
| 392 |
+
|
| 393 |
+
@defun
|
| 394 |
+
def diffs_exp(ctx, fdiffs):
|
| 395 |
+
r"""
|
| 396 |
+
Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
|
| 397 |
+
generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
|
| 398 |
+
|
| 399 |
+
At high precision and for large orders, this is typically more efficient
|
| 400 |
+
than numerical differentiation if the derivatives of `f(x)`
|
| 401 |
+
admit direct computation.
|
| 402 |
+
|
| 403 |
+
Note: This function does not increase the working precision internally,
|
| 404 |
+
so guard digits may have to be added externally for full accuracy.
|
| 405 |
+
|
| 406 |
+
**Examples**
|
| 407 |
+
|
| 408 |
+
The derivatives of the gamma function can be computed using
|
| 409 |
+
logarithmic differentiation::
|
| 410 |
+
|
| 411 |
+
>>> from mpmath import *
|
| 412 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 413 |
+
>>>
|
| 414 |
+
>>> def diffs_loggamma(x):
|
| 415 |
+
... yield loggamma(x)
|
| 416 |
+
... i = 0
|
| 417 |
+
... while 1:
|
| 418 |
+
... yield psi(i,x)
|
| 419 |
+
... i += 1
|
| 420 |
+
...
|
| 421 |
+
>>> u = diffs_exp(diffs_loggamma(3))
|
| 422 |
+
>>> v = diffs(gamma, 3)
|
| 423 |
+
>>> next(u); next(v)
|
| 424 |
+
2.0
|
| 425 |
+
2.0
|
| 426 |
+
>>> next(u); next(v)
|
| 427 |
+
1.84556867019693
|
| 428 |
+
1.84556867019693
|
| 429 |
+
>>> next(u); next(v)
|
| 430 |
+
2.49292999190269
|
| 431 |
+
2.49292999190269
|
| 432 |
+
>>> next(u); next(v)
|
| 433 |
+
3.44996501352367
|
| 434 |
+
3.44996501352367
|
| 435 |
+
|
| 436 |
+
"""
|
| 437 |
+
fn = iterable_to_function(fdiffs)
|
| 438 |
+
f0 = ctx.exp(fn(0))
|
| 439 |
+
yield f0
|
| 440 |
+
i = 1
|
| 441 |
+
while 1:
|
| 442 |
+
s = ctx.mpf(0)
|
| 443 |
+
for powers, c in iteritems(dpoly(i)):
|
| 444 |
+
s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
|
| 445 |
+
yield s * f0
|
| 446 |
+
i += 1
|
| 447 |
+
|
| 448 |
+
@defun
|
| 449 |
+
def differint(ctx, f, x, n=1, x0=0):
|
| 450 |
+
r"""
|
| 451 |
+
Calculates the Riemann-Liouville differintegral, or fractional
|
| 452 |
+
derivative, defined by
|
| 453 |
+
|
| 454 |
+
.. math ::
|
| 455 |
+
|
| 456 |
+
\,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
|
| 457 |
+
\int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
|
| 458 |
+
|
| 459 |
+
where `f` is a given (presumably well-behaved) function,
|
| 460 |
+
`x` is the evaluation point, `n` is the order, and `x_0` is
|
| 461 |
+
the reference point of integration (`m` is an arbitrary
|
| 462 |
+
parameter selected automatically).
|
| 463 |
+
|
| 464 |
+
With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
|
| 465 |
+
the second derivative `f''(x)`, etc. With `n = -1`, it gives
|
| 466 |
+
`\int_{x_0}^x f(t) dt`, with `n = -2`
|
| 467 |
+
it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
|
| 468 |
+
|
| 469 |
+
As `n` is permitted to be any number, this operator generalizes
|
| 470 |
+
iterated differentiation and iterated integration to a single
|
| 471 |
+
operator with a continuous order parameter.
|
| 472 |
+
|
| 473 |
+
**Examples**
|
| 474 |
+
|
| 475 |
+
There is an exact formula for the fractional derivative of a
|
| 476 |
+
monomial `x^p`, which may be used as a reference. For example,
|
| 477 |
+
the following gives a half-derivative (order 0.5)::
|
| 478 |
+
|
| 479 |
+
>>> from mpmath import *
|
| 480 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 481 |
+
>>> x = mpf(3); p = 2; n = 0.5
|
| 482 |
+
>>> differint(lambda t: t**p, x, n)
|
| 483 |
+
7.81764019044672
|
| 484 |
+
>>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
|
| 485 |
+
7.81764019044672
|
| 486 |
+
|
| 487 |
+
Another useful test function is the exponential function, whose
|
| 488 |
+
integration / differentiation formula easy generalizes
|
| 489 |
+
to arbitrary order. Here we first compute a third derivative,
|
| 490 |
+
and then a triply nested integral. (The reference point `x_0`
|
| 491 |
+
is set to `-\infty` to avoid nonzero endpoint terms.)::
|
| 492 |
+
|
| 493 |
+
>>> differint(lambda x: exp(pi*x), -1.5, 3)
|
| 494 |
+
0.278538406900792
|
| 495 |
+
>>> exp(pi*-1.5) * pi**3
|
| 496 |
+
0.278538406900792
|
| 497 |
+
>>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
|
| 498 |
+
1922.50563031149
|
| 499 |
+
>>> exp(pi*3.5) / pi**3
|
| 500 |
+
1922.50563031149
|
| 501 |
+
|
| 502 |
+
However, for noninteger `n`, the differentiation formula for the
|
| 503 |
+
exponential function must be modified to give the same result as the
|
| 504 |
+
Riemann-Liouville differintegral::
|
| 505 |
+
|
| 506 |
+
>>> x = mpf(3.5)
|
| 507 |
+
>>> c = pi
|
| 508 |
+
>>> n = 1+2*j
|
| 509 |
+
>>> differint(lambda x: exp(c*x), x, n)
|
| 510 |
+
(-123295.005390743 + 140955.117867654j)
|
| 511 |
+
>>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
|
| 512 |
+
(-123295.005390743 + 140955.117867654j)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
"""
|
| 516 |
+
m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
|
| 517 |
+
r = m-n-1
|
| 518 |
+
g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
|
| 519 |
+
return ctx.diff(g, x, m) / ctx.gamma(m-n)
|
| 520 |
+
|
| 521 |
+
@defun
|
| 522 |
+
def diffun(ctx, f, n=1, **options):
|
| 523 |
+
r"""
|
| 524 |
+
Given a function `f`, returns a function `g(x)` that evaluates the nth
|
| 525 |
+
derivative `f^{(n)}(x)`::
|
| 526 |
+
|
| 527 |
+
>>> from mpmath import *
|
| 528 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 529 |
+
>>> cos2 = diffun(sin)
|
| 530 |
+
>>> sin2 = diffun(sin, 4)
|
| 531 |
+
>>> cos(1.3), cos2(1.3)
|
| 532 |
+
(0.267498828624587, 0.267498828624587)
|
| 533 |
+
>>> sin(1.3), sin2(1.3)
|
| 534 |
+
(0.963558185417193, 0.963558185417193)
|
| 535 |
+
|
| 536 |
+
The function `f` must support arbitrary precision evaluation.
|
| 537 |
+
See :func:`~mpmath.diff` for additional details and supported
|
| 538 |
+
keyword options.
|
| 539 |
+
"""
|
| 540 |
+
if n == 0:
|
| 541 |
+
return f
|
| 542 |
+
def g(x):
|
| 543 |
+
return ctx.diff(f, x, n, **options)
|
| 544 |
+
return g
|
| 545 |
+
|
| 546 |
+
@defun
|
| 547 |
+
def taylor(ctx, f, x, n, **options):
|
| 548 |
+
r"""
|
| 549 |
+
Produces a degree-`n` Taylor polynomial around the point `x` of the
|
| 550 |
+
given function `f`. The coefficients are returned as a list.
|
| 551 |
+
|
| 552 |
+
>>> from mpmath import *
|
| 553 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 554 |
+
>>> nprint(chop(taylor(sin, 0, 5)))
|
| 555 |
+
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
|
| 556 |
+
|
| 557 |
+
The coefficients are computed using high-order numerical
|
| 558 |
+
differentiation. The function must be possible to evaluate
|
| 559 |
+
to arbitrary precision. See :func:`~mpmath.diff` for additional details
|
| 560 |
+
and supported keyword options.
|
| 561 |
+
|
| 562 |
+
Note that to evaluate the Taylor polynomial as an approximation
|
| 563 |
+
of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
|
| 564 |
+
and the point of the Taylor expansion must be subtracted from
|
| 565 |
+
the argument:
|
| 566 |
+
|
| 567 |
+
>>> p = taylor(exp, 2.0, 10)
|
| 568 |
+
>>> polyval(p[::-1], 2.5 - 2.0)
|
| 569 |
+
12.1824939606092
|
| 570 |
+
>>> exp(2.5)
|
| 571 |
+
12.1824939607035
|
| 572 |
+
|
| 573 |
+
"""
|
| 574 |
+
gen = enumerate(ctx.diffs(f, x, n, **options))
|
| 575 |
+
if options.get("chop", True):
|
| 576 |
+
return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
|
| 577 |
+
else:
|
| 578 |
+
return [d/ctx.factorial(i) for i, d in gen]
|
| 579 |
+
|
| 580 |
+
@defun
|
| 581 |
+
def pade(ctx, a, L, M):
|
| 582 |
+
r"""
|
| 583 |
+
Computes a Pade approximation of degree `(L, M)` to a function.
|
| 584 |
+
Given at least `L+M+1` Taylor coefficients `a` approximating
|
| 585 |
+
a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
|
| 586 |
+
polynomials `P, Q` satisfying
|
| 587 |
+
|
| 588 |
+
.. math ::
|
| 589 |
+
|
| 590 |
+
P = \sum_{k=0}^L p_k x^k
|
| 591 |
+
|
| 592 |
+
Q = \sum_{k=0}^M q_k x^k
|
| 593 |
+
|
| 594 |
+
Q_0 = 1
|
| 595 |
+
|
| 596 |
+
A(x) Q(x) = P(x) + O(x^{L+M+1})
|
| 597 |
+
|
| 598 |
+
`P(x)/Q(x)` can provide a good approximation to an analytic function
|
| 599 |
+
beyond the radius of convergence of its Taylor series (example
|
| 600 |
+
from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
|
| 601 |
+
Ch.1A)::
|
| 602 |
+
|
| 603 |
+
>>> from mpmath import *
|
| 604 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 605 |
+
>>> one = mpf(1)
|
| 606 |
+
>>> def f(x):
|
| 607 |
+
... return sqrt((one + 2*x)/(one + x))
|
| 608 |
+
...
|
| 609 |
+
>>> a = taylor(f, 0, 6)
|
| 610 |
+
>>> p, q = pade(a, 3, 3)
|
| 611 |
+
>>> x = 10
|
| 612 |
+
>>> polyval(p[::-1], x)/polyval(q[::-1], x)
|
| 613 |
+
1.38169105566806
|
| 614 |
+
>>> f(x)
|
| 615 |
+
1.38169855941551
|
| 616 |
+
|
| 617 |
+
"""
|
| 618 |
+
# To determine L+1 coefficients of P and M coefficients of Q
|
| 619 |
+
# L+M+1 coefficients of A must be provided
|
| 620 |
+
if len(a) < L+M+1:
|
| 621 |
+
raise ValueError("L+M+1 Coefficients should be provided")
|
| 622 |
+
|
| 623 |
+
if M == 0:
|
| 624 |
+
if L == 0:
|
| 625 |
+
return [ctx.one], [ctx.one]
|
| 626 |
+
else:
|
| 627 |
+
return a[:L+1], [ctx.one]
|
| 628 |
+
|
| 629 |
+
# Solve first
|
| 630 |
+
# a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
|
| 631 |
+
# ...
|
| 632 |
+
# a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
|
| 633 |
+
A = ctx.matrix(M)
|
| 634 |
+
for j in range(M):
|
| 635 |
+
for i in range(min(M, L+j+1)):
|
| 636 |
+
A[j, i] = a[L+j-i]
|
| 637 |
+
v = -ctx.matrix(a[(L+1):(L+M+1)])
|
| 638 |
+
x = ctx.lu_solve(A, v)
|
| 639 |
+
q = [ctx.one] + list(x)
|
| 640 |
+
# compute p
|
| 641 |
+
p = [0]*(L+1)
|
| 642 |
+
for i in range(L+1):
|
| 643 |
+
s = a[i]
|
| 644 |
+
for j in range(1, min(M,i) + 1):
|
| 645 |
+
s += q[j]*a[i-j]
|
| 646 |
+
p[i] = s
|
| 647 |
+
return p, q
|
.venv/lib/python3.11/site-packages/mpmath/calculus/odes.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bisect import bisect
|
| 2 |
+
from ..libmp.backend import xrange
|
| 3 |
+
|
| 4 |
+
class ODEMethods(object):
|
| 5 |
+
pass
|
| 6 |
+
|
| 7 |
+
def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
|
| 8 |
+
h = tol = ctx.ldexp(1, -tol_prec)
|
| 9 |
+
dim = len(y0)
|
| 10 |
+
xs = [x0]
|
| 11 |
+
ys = [y0]
|
| 12 |
+
x = x0
|
| 13 |
+
y = y0
|
| 14 |
+
orig = ctx.prec
|
| 15 |
+
try:
|
| 16 |
+
ctx.prec = orig*(1+n)
|
| 17 |
+
# Use n steps with Euler's method to get
|
| 18 |
+
# evaluation points for derivatives
|
| 19 |
+
for i in range(n):
|
| 20 |
+
fxy = derivs(x, y)
|
| 21 |
+
y = [y[i]+h*fxy[i] for i in xrange(len(y))]
|
| 22 |
+
x += h
|
| 23 |
+
xs.append(x)
|
| 24 |
+
ys.append(y)
|
| 25 |
+
# Compute derivatives
|
| 26 |
+
ser = [[] for d in range(dim)]
|
| 27 |
+
for j in range(n+1):
|
| 28 |
+
s = [0]*dim
|
| 29 |
+
b = (-1) ** (j & 1)
|
| 30 |
+
k = 1
|
| 31 |
+
for i in range(j+1):
|
| 32 |
+
for d in range(dim):
|
| 33 |
+
s[d] += b * ys[i][d]
|
| 34 |
+
b = (b * (j-k+1)) // (-k)
|
| 35 |
+
k += 1
|
| 36 |
+
scale = h**(-j) / ctx.fac(j)
|
| 37 |
+
for d in range(dim):
|
| 38 |
+
s[d] = s[d] * scale
|
| 39 |
+
ser[d].append(s[d])
|
| 40 |
+
finally:
|
| 41 |
+
ctx.prec = orig
|
| 42 |
+
# Estimate radius for which we can get full accuracy.
|
| 43 |
+
# XXX: do this right for zeros
|
| 44 |
+
radius = ctx.one
|
| 45 |
+
for ts in ser:
|
| 46 |
+
if ts[-1]:
|
| 47 |
+
radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
|
| 48 |
+
radius /= 2 # XXX
|
| 49 |
+
return ser, x0+radius
|
| 50 |
+
|
| 51 |
+
def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
|
| 52 |
+
r"""
|
| 53 |
+
Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
|
| 54 |
+
that is a numerical solution of the `n+1`-dimensional first-order
|
| 55 |
+
ordinary differential equation (ODE) system
|
| 56 |
+
|
| 57 |
+
.. math ::
|
| 58 |
+
|
| 59 |
+
y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 60 |
+
|
| 61 |
+
y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 62 |
+
|
| 63 |
+
\vdots
|
| 64 |
+
|
| 65 |
+
y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 66 |
+
|
| 67 |
+
The derivatives are specified by the vector-valued function
|
| 68 |
+
*F* that evaluates
|
| 69 |
+
`[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
|
| 70 |
+
The initial point `x_0` is specified by the scalar argument *x0*,
|
| 71 |
+
and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
|
| 72 |
+
specified by the vector argument *y0*.
|
| 73 |
+
|
| 74 |
+
For convenience, if the system is one-dimensional, you may optionally
|
| 75 |
+
provide just a scalar value for *y0*. In this case, *F* should accept
|
| 76 |
+
a scalar *y* argument and return a scalar. The solution function
|
| 77 |
+
*y* will return scalar values instead of length-1 vectors.
|
| 78 |
+
|
| 79 |
+
Evaluation of the solution function `y(x)` is permitted
|
| 80 |
+
for any `x \ge x_0`.
|
| 81 |
+
|
| 82 |
+
A high-order ODE can be solved by transforming it into first-order
|
| 83 |
+
vector form. This transformation is described in standard texts
|
| 84 |
+
on ODEs. Examples will also be given below.
|
| 85 |
+
|
| 86 |
+
**Options, speed and accuracy**
|
| 87 |
+
|
| 88 |
+
By default, :func:`~mpmath.odefun` uses a high-order Taylor series
|
| 89 |
+
method. For reasonably well-behaved problems, the solution will
|
| 90 |
+
be fully accurate to within the working precision. Note that
|
| 91 |
+
*F* must be possible to evaluate to very high precision
|
| 92 |
+
for the generation of Taylor series to work.
|
| 93 |
+
|
| 94 |
+
To get a faster but less accurate solution, you can set a large
|
| 95 |
+
value for *tol* (which defaults roughly to *eps*). If you just
|
| 96 |
+
want to plot the solution or perform a basic simulation,
|
| 97 |
+
*tol = 0.01* is likely sufficient.
|
| 98 |
+
|
| 99 |
+
The *degree* argument controls the degree of the solver (with
|
| 100 |
+
*method='taylor'*, this is the degree of the Taylor series
|
| 101 |
+
expansion). A higher degree means that a longer step can be taken
|
| 102 |
+
before a new local solution must be generated from *F*,
|
| 103 |
+
meaning that fewer steps are required to get from `x_0` to a given
|
| 104 |
+
`x_1`. On the other hand, a higher degree also means that each
|
| 105 |
+
local solution becomes more expensive (i.e., more evaluations of
|
| 106 |
+
*F* are required per step, and at higher precision).
|
| 107 |
+
|
| 108 |
+
The optimal setting therefore involves a tradeoff. Generally,
|
| 109 |
+
decreasing the *degree* for Taylor series is likely to give faster
|
| 110 |
+
solution at low precision, while increasing is likely to be better
|
| 111 |
+
at higher precision.
|
| 112 |
+
|
| 113 |
+
The function
|
| 114 |
+
object returned by :func:`~mpmath.odefun` caches the solutions at all step
|
| 115 |
+
points and uses polynomial interpolation between step points.
|
| 116 |
+
Therefore, once `y(x_1)` has been evaluated for some `x_1`,
|
| 117 |
+
`y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
|
| 118 |
+
and continuing the evaluation up to `x_2 > x_1` is also fast.
|
| 119 |
+
|
| 120 |
+
**Examples of first-order ODEs**
|
| 121 |
+
|
| 122 |
+
We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
|
| 123 |
+
which has explicit solution `y(x) = \exp(x)`::
|
| 124 |
+
|
| 125 |
+
>>> from mpmath import *
|
| 126 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 127 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
| 128 |
+
>>> for x in [0, 1, 2.5]:
|
| 129 |
+
... print((f(x), exp(x)))
|
| 130 |
+
...
|
| 131 |
+
(1.0, 1.0)
|
| 132 |
+
(2.71828182845905, 2.71828182845905)
|
| 133 |
+
(12.1824939607035, 12.1824939607035)
|
| 134 |
+
|
| 135 |
+
The solution with high precision::
|
| 136 |
+
|
| 137 |
+
>>> mp.dps = 50
|
| 138 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
| 139 |
+
>>> f(1)
|
| 140 |
+
2.7182818284590452353602874713526624977572470937
|
| 141 |
+
>>> exp(1)
|
| 142 |
+
2.7182818284590452353602874713526624977572470937
|
| 143 |
+
|
| 144 |
+
Using the more general vectorized form, the test problem
|
| 145 |
+
can be input as (note that *f* returns a 1-element vector)::
|
| 146 |
+
|
| 147 |
+
>>> mp.dps = 15
|
| 148 |
+
>>> f = odefun(lambda x, y: [y[0]], 0, [1])
|
| 149 |
+
>>> f(1)
|
| 150 |
+
[2.71828182845905]
|
| 151 |
+
|
| 152 |
+
:func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
|
| 153 |
+
impossible (and at best difficult) to solve analytically. As
|
| 154 |
+
an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
|
| 155 |
+
for `y(0) = \pi/2`. An exact solution happens to be known
|
| 156 |
+
for this problem, and is given by
|
| 157 |
+
`y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
|
| 158 |
+
|
| 159 |
+
>>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
|
| 160 |
+
>>> for x in [2, 5, 10]:
|
| 161 |
+
... print((f(x), 2*atan(exp(mpf(x)**2/2))))
|
| 162 |
+
...
|
| 163 |
+
(2.87255666284091, 2.87255666284091)
|
| 164 |
+
(3.14158520028345, 3.14158520028345)
|
| 165 |
+
(3.14159265358979, 3.14159265358979)
|
| 166 |
+
|
| 167 |
+
If `F` is independent of `y`, an ODE can be solved using direct
|
| 168 |
+
integration. We can therefore obtain a reference solution with
|
| 169 |
+
:func:`~mpmath.quad`::
|
| 170 |
+
|
| 171 |
+
>>> f = lambda x: (1+x**2)/(1+x**3)
|
| 172 |
+
>>> g = odefun(lambda x, y: f(x), pi, 0)
|
| 173 |
+
>>> g(2*pi)
|
| 174 |
+
0.72128263801696
|
| 175 |
+
>>> quad(f, [pi, 2*pi])
|
| 176 |
+
0.72128263801696
|
| 177 |
+
|
| 178 |
+
**Examples of second-order ODEs**
|
| 179 |
+
|
| 180 |
+
We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
|
| 181 |
+
To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
|
| 182 |
+
whereby the original equation can be written as `y_1' + y_0' = 0`. Put
|
| 183 |
+
together, we get the first-order, two-dimensional vector ODE
|
| 184 |
+
|
| 185 |
+
.. math ::
|
| 186 |
+
|
| 187 |
+
\begin{cases}
|
| 188 |
+
y_0' = y_1 \\
|
| 189 |
+
y_1' = -y_0
|
| 190 |
+
\end{cases}
|
| 191 |
+
|
| 192 |
+
To get a well-defined IVP, we need two initial values. With
|
| 193 |
+
`y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
|
| 194 |
+
course be solved by `y(x) = y_0(x) = \cos(x)` and
|
| 195 |
+
`-y'(x) = y_1(x) = \sin(x)`. We check this::
|
| 196 |
+
|
| 197 |
+
>>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
|
| 198 |
+
>>> for x in [0, 1, 2.5, 10]:
|
| 199 |
+
... nprint(f(x), 15)
|
| 200 |
+
... nprint([cos(x), sin(x)], 15)
|
| 201 |
+
... print("---")
|
| 202 |
+
...
|
| 203 |
+
[1.0, 0.0]
|
| 204 |
+
[1.0, 0.0]
|
| 205 |
+
---
|
| 206 |
+
[0.54030230586814, 0.841470984807897]
|
| 207 |
+
[0.54030230586814, 0.841470984807897]
|
| 208 |
+
---
|
| 209 |
+
[-0.801143615546934, 0.598472144103957]
|
| 210 |
+
[-0.801143615546934, 0.598472144103957]
|
| 211 |
+
---
|
| 212 |
+
[-0.839071529076452, -0.54402111088937]
|
| 213 |
+
[-0.839071529076452, -0.54402111088937]
|
| 214 |
+
---
|
| 215 |
+
|
| 216 |
+
Note that we get both the sine and the cosine solutions
|
| 217 |
+
simultaneously.
|
| 218 |
+
|
| 219 |
+
**TODO**
|
| 220 |
+
|
| 221 |
+
* Better automatic choice of degree and step size
|
| 222 |
+
* Make determination of Taylor series convergence radius
|
| 223 |
+
more robust
|
| 224 |
+
* Allow solution for `x < x_0`
|
| 225 |
+
* Allow solution for complex `x`
|
| 226 |
+
* Test for difficult (ill-conditioned) problems
|
| 227 |
+
* Implement Runge-Kutta and other algorithms
|
| 228 |
+
|
| 229 |
+
"""
|
| 230 |
+
if tol:
|
| 231 |
+
tol_prec = int(-ctx.log(tol, 2))+10
|
| 232 |
+
else:
|
| 233 |
+
tol_prec = ctx.prec+10
|
| 234 |
+
degree = degree or (3 + int(3*ctx.dps/2.))
|
| 235 |
+
workprec = ctx.prec + 40
|
| 236 |
+
try:
|
| 237 |
+
len(y0)
|
| 238 |
+
return_vector = True
|
| 239 |
+
except TypeError:
|
| 240 |
+
F_ = F
|
| 241 |
+
F = lambda x, y: [F_(x, y[0])]
|
| 242 |
+
y0 = [y0]
|
| 243 |
+
return_vector = False
|
| 244 |
+
ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
|
| 245 |
+
series_boundaries = [x0, xb]
|
| 246 |
+
series_data = [(ser, x0, xb)]
|
| 247 |
+
# We will be working with vectors of Taylor series
|
| 248 |
+
def mpolyval(ser, a):
|
| 249 |
+
return [ctx.polyval(s[::-1], a) for s in ser]
|
| 250 |
+
# Find nearest expansion point; compute if necessary
|
| 251 |
+
def get_series(x):
|
| 252 |
+
if x < x0:
|
| 253 |
+
raise ValueError
|
| 254 |
+
n = bisect(series_boundaries, x)
|
| 255 |
+
if n < len(series_boundaries):
|
| 256 |
+
return series_data[n-1]
|
| 257 |
+
while 1:
|
| 258 |
+
ser, xa, xb = series_data[-1]
|
| 259 |
+
if verbose:
|
| 260 |
+
print("Computing Taylor series for [%f, %f]" % (xa, xb))
|
| 261 |
+
y = mpolyval(ser, xb-xa)
|
| 262 |
+
xa = xb
|
| 263 |
+
ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
|
| 264 |
+
series_boundaries.append(xb)
|
| 265 |
+
series_data.append((ser, xa, xb))
|
| 266 |
+
if x <= xb:
|
| 267 |
+
return series_data[-1]
|
| 268 |
+
# Evaluation function
|
| 269 |
+
def interpolant(x):
|
| 270 |
+
x = ctx.convert(x)
|
| 271 |
+
orig = ctx.prec
|
| 272 |
+
try:
|
| 273 |
+
ctx.prec = workprec
|
| 274 |
+
ser, xa, xb = get_series(x)
|
| 275 |
+
y = mpolyval(ser, x-xa)
|
| 276 |
+
finally:
|
| 277 |
+
ctx.prec = orig
|
| 278 |
+
if return_vector:
|
| 279 |
+
return [+yk for yk in y]
|
| 280 |
+
else:
|
| 281 |
+
return +y[0]
|
| 282 |
+
return interpolant
|
| 283 |
+
|
| 284 |
+
ODEMethods.odefun = odefun
|
| 285 |
+
|
| 286 |
+
if __name__ == "__main__":
|
| 287 |
+
import doctest
|
| 288 |
+
doctest.testmod()
|
.venv/lib/python3.11/site-packages/mpmath/calculus/optimization.py
ADDED
|
@@ -0,0 +1,1102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function
|
| 2 |
+
|
| 3 |
+
from copy import copy
|
| 4 |
+
|
| 5 |
+
from ..libmp.backend import xrange
|
| 6 |
+
|
| 7 |
+
class OptimizationMethods(object):
|
| 8 |
+
def __init__(ctx):
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
##############
|
| 12 |
+
# 1D-SOLVERS #
|
| 13 |
+
##############
|
| 14 |
+
|
| 15 |
+
class Newton:
|
| 16 |
+
"""
|
| 17 |
+
1d-solver generating pairs of approximative root and error.
|
| 18 |
+
|
| 19 |
+
Needs starting points x0 close to the root.
|
| 20 |
+
|
| 21 |
+
Pro:
|
| 22 |
+
|
| 23 |
+
* converges fast
|
| 24 |
+
* sometimes more robust than secant with bad second starting point
|
| 25 |
+
|
| 26 |
+
Contra:
|
| 27 |
+
|
| 28 |
+
* converges slowly for multiple roots
|
| 29 |
+
* needs first derivative
|
| 30 |
+
* 2 function evaluations per iteration
|
| 31 |
+
"""
|
| 32 |
+
maxsteps = 20
|
| 33 |
+
|
| 34 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 35 |
+
self.ctx = ctx
|
| 36 |
+
if len(x0) == 1:
|
| 37 |
+
self.x0 = x0[0]
|
| 38 |
+
else:
|
| 39 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 40 |
+
self.f = f
|
| 41 |
+
if not 'df' in kwargs:
|
| 42 |
+
def df(x):
|
| 43 |
+
return self.ctx.diff(f, x)
|
| 44 |
+
else:
|
| 45 |
+
df = kwargs['df']
|
| 46 |
+
self.df = df
|
| 47 |
+
|
| 48 |
+
def __iter__(self):
|
| 49 |
+
f = self.f
|
| 50 |
+
df = self.df
|
| 51 |
+
x0 = self.x0
|
| 52 |
+
while True:
|
| 53 |
+
x1 = x0 - f(x0) / df(x0)
|
| 54 |
+
error = abs(x1 - x0)
|
| 55 |
+
x0 = x1
|
| 56 |
+
yield (x1, error)
|
| 57 |
+
|
| 58 |
+
class Secant:
|
| 59 |
+
"""
|
| 60 |
+
1d-solver generating pairs of approximative root and error.
|
| 61 |
+
|
| 62 |
+
Needs starting points x0 and x1 close to the root.
|
| 63 |
+
x1 defaults to x0 + 0.25.
|
| 64 |
+
|
| 65 |
+
Pro:
|
| 66 |
+
|
| 67 |
+
* converges fast
|
| 68 |
+
|
| 69 |
+
Contra:
|
| 70 |
+
|
| 71 |
+
* converges slowly for multiple roots
|
| 72 |
+
"""
|
| 73 |
+
maxsteps = 30
|
| 74 |
+
|
| 75 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 76 |
+
self.ctx = ctx
|
| 77 |
+
if len(x0) == 1:
|
| 78 |
+
self.x0 = x0[0]
|
| 79 |
+
self.x1 = self.x0 + 0.25
|
| 80 |
+
elif len(x0) == 2:
|
| 81 |
+
self.x0 = x0[0]
|
| 82 |
+
self.x1 = x0[1]
|
| 83 |
+
else:
|
| 84 |
+
raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
|
| 85 |
+
self.f = f
|
| 86 |
+
|
| 87 |
+
def __iter__(self):
|
| 88 |
+
f = self.f
|
| 89 |
+
x0 = self.x0
|
| 90 |
+
x1 = self.x1
|
| 91 |
+
f0 = f(x0)
|
| 92 |
+
while True:
|
| 93 |
+
f1 = f(x1)
|
| 94 |
+
l = x1 - x0
|
| 95 |
+
if not l:
|
| 96 |
+
break
|
| 97 |
+
s = (f1 - f0) / l
|
| 98 |
+
if not s:
|
| 99 |
+
break
|
| 100 |
+
x0, x1 = x1, x1 - f1/s
|
| 101 |
+
f0 = f1
|
| 102 |
+
yield x1, abs(l)
|
| 103 |
+
|
| 104 |
+
class MNewton:
|
| 105 |
+
"""
|
| 106 |
+
1d-solver generating pairs of approximative root and error.
|
| 107 |
+
|
| 108 |
+
Needs starting point x0 close to the root.
|
| 109 |
+
Uses modified Newton's method that converges fast regardless of the
|
| 110 |
+
multiplicity of the root.
|
| 111 |
+
|
| 112 |
+
Pro:
|
| 113 |
+
|
| 114 |
+
* converges fast for multiple roots
|
| 115 |
+
|
| 116 |
+
Contra:
|
| 117 |
+
|
| 118 |
+
* needs first and second derivative of f
|
| 119 |
+
* 3 function evaluations per iteration
|
| 120 |
+
"""
|
| 121 |
+
maxsteps = 20
|
| 122 |
+
|
| 123 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 124 |
+
self.ctx = ctx
|
| 125 |
+
if not len(x0) == 1:
|
| 126 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 127 |
+
self.x0 = x0[0]
|
| 128 |
+
self.f = f
|
| 129 |
+
if not 'df' in kwargs:
|
| 130 |
+
def df(x):
|
| 131 |
+
return self.ctx.diff(f, x)
|
| 132 |
+
else:
|
| 133 |
+
df = kwargs['df']
|
| 134 |
+
self.df = df
|
| 135 |
+
if not 'd2f' in kwargs:
|
| 136 |
+
def d2f(x):
|
| 137 |
+
return self.ctx.diff(df, x)
|
| 138 |
+
else:
|
| 139 |
+
d2f = kwargs['df']
|
| 140 |
+
self.d2f = d2f
|
| 141 |
+
|
| 142 |
+
def __iter__(self):
|
| 143 |
+
x = self.x0
|
| 144 |
+
f = self.f
|
| 145 |
+
df = self.df
|
| 146 |
+
d2f = self.d2f
|
| 147 |
+
while True:
|
| 148 |
+
prevx = x
|
| 149 |
+
fx = f(x)
|
| 150 |
+
if fx == 0:
|
| 151 |
+
break
|
| 152 |
+
dfx = df(x)
|
| 153 |
+
d2fx = d2f(x)
|
| 154 |
+
# x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
|
| 155 |
+
x -= fx / (dfx - fx * d2fx / dfx)
|
| 156 |
+
error = abs(x - prevx)
|
| 157 |
+
yield x, error
|
| 158 |
+
|
| 159 |
+
class Halley:
|
| 160 |
+
"""
|
| 161 |
+
1d-solver generating pairs of approximative root and error.
|
| 162 |
+
|
| 163 |
+
Needs a starting point x0 close to the root.
|
| 164 |
+
Uses Halley's method with cubic convergence rate.
|
| 165 |
+
|
| 166 |
+
Pro:
|
| 167 |
+
|
| 168 |
+
* converges even faster the Newton's method
|
| 169 |
+
* useful when computing with *many* digits
|
| 170 |
+
|
| 171 |
+
Contra:
|
| 172 |
+
|
| 173 |
+
* needs first and second derivative of f
|
| 174 |
+
* 3 function evaluations per iteration
|
| 175 |
+
* converges slowly for multiple roots
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
maxsteps = 20
|
| 179 |
+
|
| 180 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 181 |
+
self.ctx = ctx
|
| 182 |
+
if not len(x0) == 1:
|
| 183 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 184 |
+
self.x0 = x0[0]
|
| 185 |
+
self.f = f
|
| 186 |
+
if not 'df' in kwargs:
|
| 187 |
+
def df(x):
|
| 188 |
+
return self.ctx.diff(f, x)
|
| 189 |
+
else:
|
| 190 |
+
df = kwargs['df']
|
| 191 |
+
self.df = df
|
| 192 |
+
if not 'd2f' in kwargs:
|
| 193 |
+
def d2f(x):
|
| 194 |
+
return self.ctx.diff(df, x)
|
| 195 |
+
else:
|
| 196 |
+
d2f = kwargs['df']
|
| 197 |
+
self.d2f = d2f
|
| 198 |
+
|
| 199 |
+
def __iter__(self):
|
| 200 |
+
x = self.x0
|
| 201 |
+
f = self.f
|
| 202 |
+
df = self.df
|
| 203 |
+
d2f = self.d2f
|
| 204 |
+
while True:
|
| 205 |
+
prevx = x
|
| 206 |
+
fx = f(x)
|
| 207 |
+
dfx = df(x)
|
| 208 |
+
d2fx = d2f(x)
|
| 209 |
+
x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
|
| 210 |
+
error = abs(x - prevx)
|
| 211 |
+
yield x, error
|
| 212 |
+
|
| 213 |
+
class Muller:
|
| 214 |
+
"""
|
| 215 |
+
1d-solver generating pairs of approximative root and error.
|
| 216 |
+
|
| 217 |
+
Needs starting points x0, x1 and x2 close to the root.
|
| 218 |
+
x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
|
| 219 |
+
Uses Muller's method that converges towards complex roots.
|
| 220 |
+
|
| 221 |
+
Pro:
|
| 222 |
+
|
| 223 |
+
* converges fast (somewhat faster than secant)
|
| 224 |
+
* can find complex roots
|
| 225 |
+
|
| 226 |
+
Contra:
|
| 227 |
+
|
| 228 |
+
* converges slowly for multiple roots
|
| 229 |
+
* may have complex values for real starting points and real roots
|
| 230 |
+
|
| 231 |
+
http://en.wikipedia.org/wiki/Muller's_method
|
| 232 |
+
"""
|
| 233 |
+
maxsteps = 30
|
| 234 |
+
|
| 235 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 236 |
+
self.ctx = ctx
|
| 237 |
+
if len(x0) == 1:
|
| 238 |
+
self.x0 = x0[0]
|
| 239 |
+
self.x1 = self.x0 + 0.25
|
| 240 |
+
self.x2 = self.x1 + 0.25
|
| 241 |
+
elif len(x0) == 2:
|
| 242 |
+
self.x0 = x0[0]
|
| 243 |
+
self.x1 = x0[1]
|
| 244 |
+
self.x2 = self.x1 + 0.25
|
| 245 |
+
elif len(x0) == 3:
|
| 246 |
+
self.x0 = x0[0]
|
| 247 |
+
self.x1 = x0[1]
|
| 248 |
+
self.x2 = x0[2]
|
| 249 |
+
else:
|
| 250 |
+
raise ValueError('expected 1, 2 or 3 starting points, got %i'
|
| 251 |
+
% len(x0))
|
| 252 |
+
self.f = f
|
| 253 |
+
self.verbose = kwargs['verbose']
|
| 254 |
+
|
| 255 |
+
def __iter__(self):
|
| 256 |
+
f = self.f
|
| 257 |
+
x0 = self.x0
|
| 258 |
+
x1 = self.x1
|
| 259 |
+
x2 = self.x2
|
| 260 |
+
fx0 = f(x0)
|
| 261 |
+
fx1 = f(x1)
|
| 262 |
+
fx2 = f(x2)
|
| 263 |
+
while True:
|
| 264 |
+
# TODO: maybe refactoring with function for divided differences
|
| 265 |
+
# calculate divided differences
|
| 266 |
+
fx2x1 = (fx1 - fx2) / (x1 - x2)
|
| 267 |
+
fx2x0 = (fx0 - fx2) / (x0 - x2)
|
| 268 |
+
fx1x0 = (fx0 - fx1) / (x0 - x1)
|
| 269 |
+
w = fx2x1 + fx2x0 - fx1x0
|
| 270 |
+
fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
|
| 271 |
+
if w == 0 and fx2x1x0 == 0:
|
| 272 |
+
if self.verbose:
|
| 273 |
+
print('canceled with')
|
| 274 |
+
print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
|
| 275 |
+
break
|
| 276 |
+
x0 = x1
|
| 277 |
+
fx0 = fx1
|
| 278 |
+
x1 = x2
|
| 279 |
+
fx1 = fx2
|
| 280 |
+
# denominator should be as large as possible => choose sign
|
| 281 |
+
r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
|
| 282 |
+
if abs(w - r) > abs(w + r):
|
| 283 |
+
r = -r
|
| 284 |
+
x2 -= 2*fx2 / (w + r)
|
| 285 |
+
fx2 = f(x2)
|
| 286 |
+
error = abs(x2 - x1)
|
| 287 |
+
yield x2, error
|
| 288 |
+
|
| 289 |
+
# TODO: consider raising a ValueError when there's no sign change in a and b
|
| 290 |
+
class Bisection:
|
| 291 |
+
"""
|
| 292 |
+
1d-solver generating pairs of approximative root and error.
|
| 293 |
+
|
| 294 |
+
Uses bisection method to find a root of f in [a, b].
|
| 295 |
+
Might fail for multiple roots (needs sign change).
|
| 296 |
+
|
| 297 |
+
Pro:
|
| 298 |
+
|
| 299 |
+
* robust and reliable
|
| 300 |
+
|
| 301 |
+
Contra:
|
| 302 |
+
|
| 303 |
+
* converges slowly
|
| 304 |
+
* needs sign change
|
| 305 |
+
"""
|
| 306 |
+
maxsteps = 100
|
| 307 |
+
|
| 308 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 309 |
+
self.ctx = ctx
|
| 310 |
+
if len(x0) != 2:
|
| 311 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 312 |
+
self.f = f
|
| 313 |
+
self.a = x0[0]
|
| 314 |
+
self.b = x0[1]
|
| 315 |
+
|
| 316 |
+
def __iter__(self):
|
| 317 |
+
f = self.f
|
| 318 |
+
a = self.a
|
| 319 |
+
b = self.b
|
| 320 |
+
l = b - a
|
| 321 |
+
fb = f(b)
|
| 322 |
+
while True:
|
| 323 |
+
m = self.ctx.ldexp(a + b, -1)
|
| 324 |
+
fm = f(m)
|
| 325 |
+
sign = fm * fb
|
| 326 |
+
if sign < 0:
|
| 327 |
+
a = m
|
| 328 |
+
elif sign > 0:
|
| 329 |
+
b = m
|
| 330 |
+
fb = fm
|
| 331 |
+
else:
|
| 332 |
+
yield m, self.ctx.zero
|
| 333 |
+
l /= 2
|
| 334 |
+
yield (a + b)/2, abs(l)
|
| 335 |
+
|
| 336 |
+
def _getm(method):
|
| 337 |
+
"""
|
| 338 |
+
Return a function to calculate m for Illinois-like methods.
|
| 339 |
+
"""
|
| 340 |
+
if method == 'illinois':
|
| 341 |
+
def getm(fz, fb):
|
| 342 |
+
return 0.5
|
| 343 |
+
elif method == 'pegasus':
|
| 344 |
+
def getm(fz, fb):
|
| 345 |
+
return fb/(fb + fz)
|
| 346 |
+
elif method == 'anderson':
|
| 347 |
+
def getm(fz, fb):
|
| 348 |
+
m = 1 - fz/fb
|
| 349 |
+
if m > 0:
|
| 350 |
+
return m
|
| 351 |
+
else:
|
| 352 |
+
return 0.5
|
| 353 |
+
else:
|
| 354 |
+
raise ValueError("method '%s' not recognized" % method)
|
| 355 |
+
return getm
|
| 356 |
+
|
| 357 |
+
class Illinois:
|
| 358 |
+
"""
|
| 359 |
+
1d-solver generating pairs of approximative root and error.
|
| 360 |
+
|
| 361 |
+
Uses Illinois method or similar to find a root of f in [a, b].
|
| 362 |
+
Might fail for multiple roots (needs sign change).
|
| 363 |
+
Combines bisect with secant (improved regula falsi).
|
| 364 |
+
|
| 365 |
+
The only difference between the methods is the scaling factor m, which is
|
| 366 |
+
used to ensure convergence (you can choose one using the 'method' keyword):
|
| 367 |
+
|
| 368 |
+
Illinois method ('illinois'):
|
| 369 |
+
m = 0.5
|
| 370 |
+
|
| 371 |
+
Pegasus method ('pegasus'):
|
| 372 |
+
m = fb/(fb + fz)
|
| 373 |
+
|
| 374 |
+
Anderson-Bjoerk method ('anderson'):
|
| 375 |
+
m = 1 - fz/fb if positive else 0.5
|
| 376 |
+
|
| 377 |
+
Pro:
|
| 378 |
+
|
| 379 |
+
* converges very fast
|
| 380 |
+
|
| 381 |
+
Contra:
|
| 382 |
+
|
| 383 |
+
* has problems with multiple roots
|
| 384 |
+
* needs sign change
|
| 385 |
+
"""
|
| 386 |
+
maxsteps = 30
|
| 387 |
+
|
| 388 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 389 |
+
self.ctx = ctx
|
| 390 |
+
if len(x0) != 2:
|
| 391 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 392 |
+
self.a = x0[0]
|
| 393 |
+
self.b = x0[1]
|
| 394 |
+
self.f = f
|
| 395 |
+
self.tol = kwargs['tol']
|
| 396 |
+
self.verbose = kwargs['verbose']
|
| 397 |
+
self.method = kwargs.get('method', 'illinois')
|
| 398 |
+
self.getm = _getm(self.method)
|
| 399 |
+
if self.verbose:
|
| 400 |
+
print('using %s method' % self.method)
|
| 401 |
+
|
| 402 |
+
def __iter__(self):
|
| 403 |
+
method = self.method
|
| 404 |
+
f = self.f
|
| 405 |
+
a = self.a
|
| 406 |
+
b = self.b
|
| 407 |
+
fa = f(a)
|
| 408 |
+
fb = f(b)
|
| 409 |
+
m = None
|
| 410 |
+
while True:
|
| 411 |
+
l = b - a
|
| 412 |
+
if l == 0:
|
| 413 |
+
break
|
| 414 |
+
s = (fb - fa) / l
|
| 415 |
+
z = a - fa/s
|
| 416 |
+
fz = f(z)
|
| 417 |
+
if abs(fz) < self.tol:
|
| 418 |
+
# TODO: better condition (when f is very flat)
|
| 419 |
+
if self.verbose:
|
| 420 |
+
print('canceled with z =', z)
|
| 421 |
+
yield z, l
|
| 422 |
+
break
|
| 423 |
+
if fz * fb < 0: # root in [z, b]
|
| 424 |
+
a = b
|
| 425 |
+
fa = fb
|
| 426 |
+
b = z
|
| 427 |
+
fb = fz
|
| 428 |
+
else: # root in [a, z]
|
| 429 |
+
m = self.getm(fz, fb)
|
| 430 |
+
b = z
|
| 431 |
+
fb = fz
|
| 432 |
+
fa = m*fa # scale down to ensure convergence
|
| 433 |
+
if self.verbose and m and not method == 'illinois':
|
| 434 |
+
print('m:', m)
|
| 435 |
+
yield (a + b)/2, abs(l)
|
| 436 |
+
|
| 437 |
+
def Pegasus(*args, **kwargs):
|
| 438 |
+
"""
|
| 439 |
+
1d-solver generating pairs of approximative root and error.
|
| 440 |
+
|
| 441 |
+
Uses Pegasus method to find a root of f in [a, b].
|
| 442 |
+
Wrapper for illinois to use method='pegasus'.
|
| 443 |
+
"""
|
| 444 |
+
kwargs['method'] = 'pegasus'
|
| 445 |
+
return Illinois(*args, **kwargs)
|
| 446 |
+
|
| 447 |
+
def Anderson(*args, **kwargs):
|
| 448 |
+
"""
|
| 449 |
+
1d-solver generating pairs of approximative root and error.
|
| 450 |
+
|
| 451 |
+
Uses Anderson-Bjoerk method to find a root of f in [a, b].
|
| 452 |
+
Wrapper for illinois to use method='pegasus'.
|
| 453 |
+
"""
|
| 454 |
+
kwargs['method'] = 'anderson'
|
| 455 |
+
return Illinois(*args, **kwargs)
|
| 456 |
+
|
| 457 |
+
# TODO: check whether it's possible to combine it with Illinois stuff
|
| 458 |
+
class Ridder:
|
| 459 |
+
"""
|
| 460 |
+
1d-solver generating pairs of approximative root and error.
|
| 461 |
+
|
| 462 |
+
Ridders' method to find a root of f in [a, b].
|
| 463 |
+
Is told to perform as well as Brent's method while being simpler.
|
| 464 |
+
|
| 465 |
+
Pro:
|
| 466 |
+
|
| 467 |
+
* very fast
|
| 468 |
+
* simpler than Brent's method
|
| 469 |
+
|
| 470 |
+
Contra:
|
| 471 |
+
|
| 472 |
+
* two function evaluations per step
|
| 473 |
+
* has problems with multiple roots
|
| 474 |
+
* needs sign change
|
| 475 |
+
|
| 476 |
+
http://en.wikipedia.org/wiki/Ridders'_method
|
| 477 |
+
"""
|
| 478 |
+
maxsteps = 30
|
| 479 |
+
|
| 480 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 481 |
+
self.ctx = ctx
|
| 482 |
+
self.f = f
|
| 483 |
+
if len(x0) != 2:
|
| 484 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 485 |
+
self.x1 = x0[0]
|
| 486 |
+
self.x2 = x0[1]
|
| 487 |
+
self.verbose = kwargs['verbose']
|
| 488 |
+
self.tol = kwargs['tol']
|
| 489 |
+
|
| 490 |
+
def __iter__(self):
|
| 491 |
+
ctx = self.ctx
|
| 492 |
+
f = self.f
|
| 493 |
+
x1 = self.x1
|
| 494 |
+
fx1 = f(x1)
|
| 495 |
+
x2 = self.x2
|
| 496 |
+
fx2 = f(x2)
|
| 497 |
+
while True:
|
| 498 |
+
x3 = 0.5*(x1 + x2)
|
| 499 |
+
fx3 = f(x3)
|
| 500 |
+
x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
|
| 501 |
+
fx4 = f(x4)
|
| 502 |
+
if abs(fx4) < self.tol:
|
| 503 |
+
# TODO: better condition (when f is very flat)
|
| 504 |
+
if self.verbose:
|
| 505 |
+
print('canceled with f(x4) =', fx4)
|
| 506 |
+
yield x4, abs(x1 - x2)
|
| 507 |
+
break
|
| 508 |
+
if fx4 * fx2 < 0: # root in [x4, x2]
|
| 509 |
+
x1 = x4
|
| 510 |
+
fx1 = fx4
|
| 511 |
+
else: # root in [x1, x4]
|
| 512 |
+
x2 = x4
|
| 513 |
+
fx2 = fx4
|
| 514 |
+
error = abs(x1 - x2)
|
| 515 |
+
yield (x1 + x2)/2, error
|
| 516 |
+
|
| 517 |
+
class ANewton:
|
| 518 |
+
"""
|
| 519 |
+
EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
|
| 520 |
+
|
| 521 |
+
Uses Newton's method modified to use Steffensens method when convergence is
|
| 522 |
+
slow. (I.e. for multiple roots.)
|
| 523 |
+
"""
|
| 524 |
+
maxsteps = 20
|
| 525 |
+
|
| 526 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 527 |
+
self.ctx = ctx
|
| 528 |
+
if not len(x0) == 1:
|
| 529 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 530 |
+
self.x0 = x0[0]
|
| 531 |
+
self.f = f
|
| 532 |
+
if not 'df' in kwargs:
|
| 533 |
+
def df(x):
|
| 534 |
+
return self.ctx.diff(f, x)
|
| 535 |
+
else:
|
| 536 |
+
df = kwargs['df']
|
| 537 |
+
self.df = df
|
| 538 |
+
def phi(x):
|
| 539 |
+
return x - f(x) / df(x)
|
| 540 |
+
self.phi = phi
|
| 541 |
+
self.verbose = kwargs['verbose']
|
| 542 |
+
|
| 543 |
+
def __iter__(self):
|
| 544 |
+
x0 = self.x0
|
| 545 |
+
f = self.f
|
| 546 |
+
df = self.df
|
| 547 |
+
phi = self.phi
|
| 548 |
+
error = 0
|
| 549 |
+
counter = 0
|
| 550 |
+
while True:
|
| 551 |
+
prevx = x0
|
| 552 |
+
try:
|
| 553 |
+
x0 = phi(x0)
|
| 554 |
+
except ZeroDivisionError:
|
| 555 |
+
if self.verbose:
|
| 556 |
+
print('ZeroDivisionError: canceled with x =', x0)
|
| 557 |
+
break
|
| 558 |
+
preverror = error
|
| 559 |
+
error = abs(prevx - x0)
|
| 560 |
+
# TODO: decide not to use convergence acceleration
|
| 561 |
+
if error and abs(error - preverror) / error < 1:
|
| 562 |
+
if self.verbose:
|
| 563 |
+
print('converging slowly')
|
| 564 |
+
counter += 1
|
| 565 |
+
if counter >= 3:
|
| 566 |
+
# accelerate convergence
|
| 567 |
+
phi = steffensen(phi)
|
| 568 |
+
counter = 0
|
| 569 |
+
if self.verbose:
|
| 570 |
+
print('accelerating convergence')
|
| 571 |
+
yield x0, error
|
| 572 |
+
|
| 573 |
+
# TODO: add Brent
|
| 574 |
+
|
| 575 |
+
############################
|
| 576 |
+
# MULTIDIMENSIONAL SOLVERS #
|
| 577 |
+
############################
|
| 578 |
+
|
| 579 |
+
def jacobian(ctx, f, x):
|
| 580 |
+
"""
|
| 581 |
+
Calculate the Jacobian matrix of a function at the point x0.
|
| 582 |
+
|
| 583 |
+
This is the first derivative of a vectorial function:
|
| 584 |
+
|
| 585 |
+
f : R^m -> R^n with m >= n
|
| 586 |
+
"""
|
| 587 |
+
x = ctx.matrix(x)
|
| 588 |
+
h = ctx.sqrt(ctx.eps)
|
| 589 |
+
fx = ctx.matrix(f(*x))
|
| 590 |
+
m = len(fx)
|
| 591 |
+
n = len(x)
|
| 592 |
+
J = ctx.matrix(m, n)
|
| 593 |
+
for j in xrange(n):
|
| 594 |
+
xj = x.copy()
|
| 595 |
+
xj[j] += h
|
| 596 |
+
Jj = (ctx.matrix(f(*xj)) - fx) / h
|
| 597 |
+
for i in xrange(m):
|
| 598 |
+
J[i,j] = Jj[i]
|
| 599 |
+
return J
|
| 600 |
+
|
| 601 |
+
# TODO: test with user-specified jacobian matrix
|
| 602 |
+
class MDNewton:
|
| 603 |
+
"""
|
| 604 |
+
Find the root of a vector function numerically using Newton's method.
|
| 605 |
+
|
| 606 |
+
f is a vector function representing a nonlinear equation system.
|
| 607 |
+
|
| 608 |
+
x0 is the starting point close to the root.
|
| 609 |
+
|
| 610 |
+
J is a function returning the Jacobian matrix for a point.
|
| 611 |
+
|
| 612 |
+
Supports overdetermined systems.
|
| 613 |
+
|
| 614 |
+
Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
|
| 615 |
+
The function to calculate the Jacobian matrix can be given using the
|
| 616 |
+
keyword 'J'. Otherwise it will be calculated numerically.
|
| 617 |
+
|
| 618 |
+
Please note that this method converges only locally. Especially for high-
|
| 619 |
+
dimensional systems it is not trivial to find a good starting point being
|
| 620 |
+
close enough to the root.
|
| 621 |
+
|
| 622 |
+
It is recommended to use a faster, low-precision solver from SciPy [1] or
|
| 623 |
+
OpenOpt [2] to get an initial guess. Afterwards you can use this method for
|
| 624 |
+
root-polishing to any precision.
|
| 625 |
+
|
| 626 |
+
[1] http://scipy.org
|
| 627 |
+
|
| 628 |
+
[2] http://openopt.org/Welcome
|
| 629 |
+
"""
|
| 630 |
+
maxsteps = 10
|
| 631 |
+
|
| 632 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 633 |
+
self.ctx = ctx
|
| 634 |
+
self.f = f
|
| 635 |
+
if isinstance(x0, (tuple, list)):
|
| 636 |
+
x0 = ctx.matrix(x0)
|
| 637 |
+
assert x0.cols == 1, 'need a vector'
|
| 638 |
+
self.x0 = x0
|
| 639 |
+
if 'J' in kwargs:
|
| 640 |
+
self.J = kwargs['J']
|
| 641 |
+
else:
|
| 642 |
+
def J(*x):
|
| 643 |
+
return ctx.jacobian(f, x)
|
| 644 |
+
self.J = J
|
| 645 |
+
self.norm = kwargs['norm']
|
| 646 |
+
self.verbose = kwargs['verbose']
|
| 647 |
+
|
| 648 |
+
def __iter__(self):
|
| 649 |
+
f = self.f
|
| 650 |
+
x0 = self.x0
|
| 651 |
+
norm = self.norm
|
| 652 |
+
J = self.J
|
| 653 |
+
fx = self.ctx.matrix(f(*x0))
|
| 654 |
+
fxnorm = norm(fx)
|
| 655 |
+
cancel = False
|
| 656 |
+
while not cancel:
|
| 657 |
+
# get direction of descent
|
| 658 |
+
fxn = -fx
|
| 659 |
+
Jx = J(*x0)
|
| 660 |
+
s = self.ctx.lu_solve(Jx, fxn)
|
| 661 |
+
if self.verbose:
|
| 662 |
+
print('Jx:')
|
| 663 |
+
print(Jx)
|
| 664 |
+
print('s:', s)
|
| 665 |
+
# damping step size TODO: better strategy (hard task)
|
| 666 |
+
l = self.ctx.one
|
| 667 |
+
x1 = x0 + s
|
| 668 |
+
while True:
|
| 669 |
+
if x1 == x0:
|
| 670 |
+
if self.verbose:
|
| 671 |
+
print("canceled, won't get more excact")
|
| 672 |
+
cancel = True
|
| 673 |
+
break
|
| 674 |
+
fx = self.ctx.matrix(f(*x1))
|
| 675 |
+
newnorm = norm(fx)
|
| 676 |
+
if newnorm < fxnorm:
|
| 677 |
+
# new x accepted
|
| 678 |
+
fxnorm = newnorm
|
| 679 |
+
x0 = x1
|
| 680 |
+
break
|
| 681 |
+
l /= 2
|
| 682 |
+
x1 = x0 + l*s
|
| 683 |
+
yield (x0, fxnorm)
|
| 684 |
+
|
| 685 |
+
#############
|
| 686 |
+
# UTILITIES #
|
| 687 |
+
#############
|
| 688 |
+
|
| 689 |
+
str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton,
|
| 690 |
+
'halley':Halley, 'muller':Muller, 'bisect':Bisection,
|
| 691 |
+
'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
|
| 692 |
+
'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
|
| 693 |
+
|
| 694 |
+
def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs):
|
| 695 |
+
r"""
|
| 696 |
+
Find an approximate solution to `f(x) = 0`, using *x0* as starting point or
|
| 697 |
+
interval for *x*.
|
| 698 |
+
|
| 699 |
+
Multidimensional overdetermined systems are supported.
|
| 700 |
+
You can specify them using a function or a list of functions.
|
| 701 |
+
|
| 702 |
+
Mathematically speaking, this function returns `x` such that
|
| 703 |
+
`|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision.
|
| 704 |
+
If the computed value does not meet this criterion, an exception is raised.
|
| 705 |
+
This exception can be disabled with *verify=False*.
|
| 706 |
+
|
| 707 |
+
For interval arithmetic (``iv.findroot()``), please note that
|
| 708 |
+
the returned interval ``x`` is not guaranteed to contain `f(x)=0`!
|
| 709 |
+
It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds
|
| 710 |
+
regardless of numerical error. This may be improved in the future.
|
| 711 |
+
|
| 712 |
+
**Arguments**
|
| 713 |
+
|
| 714 |
+
*f*
|
| 715 |
+
one dimensional function
|
| 716 |
+
*x0*
|
| 717 |
+
starting point, several starting points or interval (depends on solver)
|
| 718 |
+
*tol*
|
| 719 |
+
the returned solution has an error smaller than this
|
| 720 |
+
*verbose*
|
| 721 |
+
print additional information for each iteration if true
|
| 722 |
+
*verify*
|
| 723 |
+
verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
|
| 724 |
+
*solver*
|
| 725 |
+
a generator for *f* and *x0* returning approximative solution and error
|
| 726 |
+
*maxsteps*
|
| 727 |
+
after how many steps the solver will cancel
|
| 728 |
+
*df*
|
| 729 |
+
first derivative of *f* (used by some solvers)
|
| 730 |
+
*d2f*
|
| 731 |
+
second derivative of *f* (used by some solvers)
|
| 732 |
+
*multidimensional*
|
| 733 |
+
force multidimensional solving
|
| 734 |
+
*J*
|
| 735 |
+
Jacobian matrix of *f* (used by multidimensional solvers)
|
| 736 |
+
*norm*
|
| 737 |
+
used vector norm (used by multidimensional solvers)
|
| 738 |
+
|
| 739 |
+
solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
|
| 740 |
+
yielding pairs of approximative solution and estimated error (which is
|
| 741 |
+
expected to be positive).
|
| 742 |
+
You can use the following string aliases:
|
| 743 |
+
'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
|
| 744 |
+
'ridder', 'anewton', 'bisect'
|
| 745 |
+
|
| 746 |
+
See mpmath.calculus.optimization for their documentation.
|
| 747 |
+
|
| 748 |
+
**Examples**
|
| 749 |
+
|
| 750 |
+
The function :func:`~mpmath.findroot` locates a root of a given function using the
|
| 751 |
+
secant method by default. A simple example use of the secant method is to
|
| 752 |
+
compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
|
| 753 |
+
|
| 754 |
+
>>> from mpmath import *
|
| 755 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 756 |
+
>>> findroot(sin, 3)
|
| 757 |
+
3.14159265358979323846264338328
|
| 758 |
+
|
| 759 |
+
The secant method can be used to find complex roots of analytic functions,
|
| 760 |
+
although it must in that case generally be given a nonreal starting value
|
| 761 |
+
(or else it will never leave the real line)::
|
| 762 |
+
|
| 763 |
+
>>> mp.dps = 15
|
| 764 |
+
>>> findroot(lambda x: x**3 + 2*x + 1, j)
|
| 765 |
+
(0.226698825758202 + 1.46771150871022j)
|
| 766 |
+
|
| 767 |
+
A nice application is to compute nontrivial roots of the Riemann zeta
|
| 768 |
+
function with many digits (good initial values are needed for convergence)::
|
| 769 |
+
|
| 770 |
+
>>> mp.dps = 30
|
| 771 |
+
>>> findroot(zeta, 0.5+14j)
|
| 772 |
+
(0.5 + 14.1347251417346937904572519836j)
|
| 773 |
+
|
| 774 |
+
The secant method can also be used as an optimization algorithm, by passing
|
| 775 |
+
it a derivative of a function. The following example locates the positive
|
| 776 |
+
minimum of the gamma function::
|
| 777 |
+
|
| 778 |
+
>>> mp.dps = 20
|
| 779 |
+
>>> findroot(lambda x: diff(gamma, x), 1)
|
| 780 |
+
1.4616321449683623413
|
| 781 |
+
|
| 782 |
+
Finally, a useful application is to compute inverse functions, such as the
|
| 783 |
+
Lambert W function which is the inverse of `w e^w`, given the first
|
| 784 |
+
term of the solution's asymptotic expansion as the initial value. In basic
|
| 785 |
+
cases, this gives identical results to mpmath's built-in ``lambertw``
|
| 786 |
+
function::
|
| 787 |
+
|
| 788 |
+
>>> def lambert(x):
|
| 789 |
+
... return findroot(lambda w: w*exp(w) - x, log(1+x))
|
| 790 |
+
...
|
| 791 |
+
>>> mp.dps = 15
|
| 792 |
+
>>> lambert(1); lambertw(1)
|
| 793 |
+
0.567143290409784
|
| 794 |
+
0.567143290409784
|
| 795 |
+
>>> lambert(1000); lambert(1000)
|
| 796 |
+
5.2496028524016
|
| 797 |
+
5.2496028524016
|
| 798 |
+
|
| 799 |
+
Multidimensional functions are also supported::
|
| 800 |
+
|
| 801 |
+
>>> f = [lambda x1, x2: x1**2 + x2,
|
| 802 |
+
... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
|
| 803 |
+
>>> findroot(f, (0, 0))
|
| 804 |
+
[-0.618033988749895]
|
| 805 |
+
[-0.381966011250105]
|
| 806 |
+
>>> findroot(f, (10, 10))
|
| 807 |
+
[ 1.61803398874989]
|
| 808 |
+
[-2.61803398874989]
|
| 809 |
+
|
| 810 |
+
You can verify this by solving the system manually.
|
| 811 |
+
|
| 812 |
+
Please note that the following (more general) syntax also works::
|
| 813 |
+
|
| 814 |
+
>>> def f(x1, x2):
|
| 815 |
+
... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
|
| 816 |
+
...
|
| 817 |
+
>>> findroot(f, (0, 0))
|
| 818 |
+
[-0.618033988749895]
|
| 819 |
+
[-0.381966011250105]
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
**Multiple roots**
|
| 823 |
+
|
| 824 |
+
For multiple roots all methods of the Newtonian family (including secant)
|
| 825 |
+
converge slowly. Consider this example::
|
| 826 |
+
|
| 827 |
+
>>> f = lambda x: (x - 1)**99
|
| 828 |
+
>>> findroot(f, 0.9, verify=False)
|
| 829 |
+
0.918073542444929
|
| 830 |
+
|
| 831 |
+
Even for a very close starting point the secant method converges very
|
| 832 |
+
slowly. Use ``verbose=True`` to illustrate this.
|
| 833 |
+
|
| 834 |
+
It is possible to modify Newton's method to make it converge regardless of
|
| 835 |
+
the root's multiplicity::
|
| 836 |
+
|
| 837 |
+
>>> findroot(f, -10, solver='mnewton')
|
| 838 |
+
1.0
|
| 839 |
+
|
| 840 |
+
This variant uses the first and second derivative of the function, which is
|
| 841 |
+
not very efficient.
|
| 842 |
+
|
| 843 |
+
Alternatively you can use an experimental Newtonian solver that keeps track
|
| 844 |
+
of the speed of convergence and accelerates it using Steffensen's method if
|
| 845 |
+
necessary::
|
| 846 |
+
|
| 847 |
+
>>> findroot(f, -10, solver='anewton', verbose=True)
|
| 848 |
+
x: -9.88888888888888888889
|
| 849 |
+
error: 0.111111111111111111111
|
| 850 |
+
converging slowly
|
| 851 |
+
x: -9.77890011223344556678
|
| 852 |
+
error: 0.10998877665544332211
|
| 853 |
+
converging slowly
|
| 854 |
+
x: -9.67002233332199662166
|
| 855 |
+
error: 0.108877778911448945119
|
| 856 |
+
converging slowly
|
| 857 |
+
accelerating convergence
|
| 858 |
+
x: -9.5622443299551077669
|
| 859 |
+
error: 0.107778003366888854764
|
| 860 |
+
converging slowly
|
| 861 |
+
x: 0.99999999999999999214
|
| 862 |
+
error: 10.562244329955107759
|
| 863 |
+
x: 1.0
|
| 864 |
+
error: 7.8598304758094664213e-18
|
| 865 |
+
ZeroDivisionError: canceled with x = 1.0
|
| 866 |
+
1.0
|
| 867 |
+
|
| 868 |
+
**Complex roots**
|
| 869 |
+
|
| 870 |
+
For complex roots it's recommended to use Muller's method as it converges
|
| 871 |
+
even for real starting points very fast::
|
| 872 |
+
|
| 873 |
+
>>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
|
| 874 |
+
(0.727136084491197 + 0.934099289460529j)
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
**Intersection methods**
|
| 878 |
+
|
| 879 |
+
When you need to find a root in a known interval, it's highly recommended to
|
| 880 |
+
use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
|
| 881 |
+
Usually they converge faster and more reliable. They have however problems
|
| 882 |
+
with multiple roots and usually need a sign change to find a root::
|
| 883 |
+
|
| 884 |
+
>>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
|
| 885 |
+
0.0
|
| 886 |
+
|
| 887 |
+
Be careful with symmetric functions::
|
| 888 |
+
|
| 889 |
+
>>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
|
| 890 |
+
Traceback (most recent call last):
|
| 891 |
+
...
|
| 892 |
+
ZeroDivisionError
|
| 893 |
+
|
| 894 |
+
It fails even for better starting points, because there is no sign change::
|
| 895 |
+
|
| 896 |
+
>>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
|
| 897 |
+
Traceback (most recent call last):
|
| 898 |
+
...
|
| 899 |
+
ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19)
|
| 900 |
+
Try another starting point or tweak arguments.
|
| 901 |
+
|
| 902 |
+
"""
|
| 903 |
+
prec = ctx.prec
|
| 904 |
+
try:
|
| 905 |
+
ctx.prec += 20
|
| 906 |
+
|
| 907 |
+
# initialize arguments
|
| 908 |
+
if tol is None:
|
| 909 |
+
tol = ctx.eps * 2**10
|
| 910 |
+
|
| 911 |
+
kwargs['verbose'] = kwargs.get('verbose', verbose)
|
| 912 |
+
|
| 913 |
+
if 'd1f' in kwargs:
|
| 914 |
+
kwargs['df'] = kwargs['d1f']
|
| 915 |
+
|
| 916 |
+
kwargs['tol'] = tol
|
| 917 |
+
if isinstance(x0, (list, tuple)):
|
| 918 |
+
x0 = [ctx.convert(x) for x in x0]
|
| 919 |
+
else:
|
| 920 |
+
x0 = [ctx.convert(x0)]
|
| 921 |
+
|
| 922 |
+
if isinstance(solver, str):
|
| 923 |
+
try:
|
| 924 |
+
solver = str2solver[solver]
|
| 925 |
+
except KeyError:
|
| 926 |
+
raise ValueError('could not recognize solver')
|
| 927 |
+
|
| 928 |
+
# accept list of functions
|
| 929 |
+
if isinstance(f, (list, tuple)):
|
| 930 |
+
f2 = copy(f)
|
| 931 |
+
def tmp(*args):
|
| 932 |
+
return [fn(*args) for fn in f2]
|
| 933 |
+
f = tmp
|
| 934 |
+
|
| 935 |
+
# detect multidimensional functions
|
| 936 |
+
try:
|
| 937 |
+
fx = f(*x0)
|
| 938 |
+
multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
|
| 939 |
+
except TypeError:
|
| 940 |
+
fx = f(x0[0])
|
| 941 |
+
multidimensional = False
|
| 942 |
+
if 'multidimensional' in kwargs:
|
| 943 |
+
multidimensional = kwargs['multidimensional']
|
| 944 |
+
if multidimensional:
|
| 945 |
+
# only one multidimensional solver available at the moment
|
| 946 |
+
solver = MDNewton
|
| 947 |
+
if not 'norm' in kwargs:
|
| 948 |
+
norm = lambda x: ctx.norm(x, 'inf')
|
| 949 |
+
kwargs['norm'] = norm
|
| 950 |
+
else:
|
| 951 |
+
norm = kwargs['norm']
|
| 952 |
+
else:
|
| 953 |
+
norm = abs
|
| 954 |
+
|
| 955 |
+
# happily return starting point if it's a root
|
| 956 |
+
if norm(fx) == 0:
|
| 957 |
+
if multidimensional:
|
| 958 |
+
return ctx.matrix(x0)
|
| 959 |
+
else:
|
| 960 |
+
return x0[0]
|
| 961 |
+
|
| 962 |
+
# use solver
|
| 963 |
+
iterations = solver(ctx, f, x0, **kwargs)
|
| 964 |
+
if 'maxsteps' in kwargs:
|
| 965 |
+
maxsteps = kwargs['maxsteps']
|
| 966 |
+
else:
|
| 967 |
+
maxsteps = iterations.maxsteps
|
| 968 |
+
i = 0
|
| 969 |
+
for x, error in iterations:
|
| 970 |
+
if verbose:
|
| 971 |
+
print('x: ', x)
|
| 972 |
+
print('error:', error)
|
| 973 |
+
i += 1
|
| 974 |
+
if error < tol * max(1, norm(x)) or i >= maxsteps:
|
| 975 |
+
break
|
| 976 |
+
else:
|
| 977 |
+
if not i:
|
| 978 |
+
raise ValueError('Could not find root using the given solver.\n'
|
| 979 |
+
'Try another starting point or tweak arguments.')
|
| 980 |
+
if not isinstance(x, (list, tuple, ctx.matrix)):
|
| 981 |
+
xl = [x]
|
| 982 |
+
else:
|
| 983 |
+
xl = x
|
| 984 |
+
if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
|
| 985 |
+
raise ValueError('Could not find root within given tolerance. '
|
| 986 |
+
'(%s > %s)\n'
|
| 987 |
+
'Try another starting point or tweak arguments.'
|
| 988 |
+
% (norm(f(*xl))**2, tol))
|
| 989 |
+
return x
|
| 990 |
+
finally:
|
| 991 |
+
ctx.prec = prec
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
|
| 995 |
+
"""
|
| 996 |
+
Return the multiplicity of a given root of f.
|
| 997 |
+
|
| 998 |
+
Internally, numerical derivatives are used. This might be inefficient for
|
| 999 |
+
higher order derviatives. Due to this, ``multiplicity`` cancels after
|
| 1000 |
+
evaluating 10 derivatives by default. You can be specify the n-th derivative
|
| 1001 |
+
using the dnf keyword.
|
| 1002 |
+
|
| 1003 |
+
>>> from mpmath import *
|
| 1004 |
+
>>> multiplicity(lambda x: sin(x) - 1, pi/2)
|
| 1005 |
+
2
|
| 1006 |
+
|
| 1007 |
+
"""
|
| 1008 |
+
if tol is None:
|
| 1009 |
+
tol = ctx.eps ** 0.8
|
| 1010 |
+
kwargs['d0f'] = f
|
| 1011 |
+
for i in xrange(maxsteps):
|
| 1012 |
+
dfstr = 'd' + str(i) + 'f'
|
| 1013 |
+
if dfstr in kwargs:
|
| 1014 |
+
df = kwargs[dfstr]
|
| 1015 |
+
else:
|
| 1016 |
+
df = lambda x: ctx.diff(f, x, i)
|
| 1017 |
+
if not abs(df(root)) < tol:
|
| 1018 |
+
break
|
| 1019 |
+
return i
|
| 1020 |
+
|
| 1021 |
+
def steffensen(f):
|
| 1022 |
+
"""
|
| 1023 |
+
linear convergent function -> quadratic convergent function
|
| 1024 |
+
|
| 1025 |
+
Steffensen's method for quadratic convergence of a linear converging
|
| 1026 |
+
sequence.
|
| 1027 |
+
Don not use it for higher rates of convergence.
|
| 1028 |
+
It may even work for divergent sequences.
|
| 1029 |
+
|
| 1030 |
+
Definition:
|
| 1031 |
+
F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
|
| 1032 |
+
|
| 1033 |
+
Example
|
| 1034 |
+
.......
|
| 1035 |
+
|
| 1036 |
+
You can use Steffensen's method to accelerate a fixpoint iteration of linear
|
| 1037 |
+
(or less) convergence.
|
| 1038 |
+
|
| 1039 |
+
x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
|
| 1040 |
+
phi(x) = x**2 there are two fixpoints: 0 and 1.
|
| 1041 |
+
|
| 1042 |
+
Let's try Steffensen's method:
|
| 1043 |
+
|
| 1044 |
+
>>> f = lambda x: x**2
|
| 1045 |
+
>>> from mpmath.calculus.optimization import steffensen
|
| 1046 |
+
>>> F = steffensen(f)
|
| 1047 |
+
>>> for x in [0.5, 0.9, 2.0]:
|
| 1048 |
+
... fx = Fx = x
|
| 1049 |
+
... for i in xrange(9):
|
| 1050 |
+
... try:
|
| 1051 |
+
... fx = f(fx)
|
| 1052 |
+
... except OverflowError:
|
| 1053 |
+
... pass
|
| 1054 |
+
... try:
|
| 1055 |
+
... Fx = F(Fx)
|
| 1056 |
+
... except ZeroDivisionError:
|
| 1057 |
+
... pass
|
| 1058 |
+
... print('%20g %20g' % (fx, Fx))
|
| 1059 |
+
0.25 -0.5
|
| 1060 |
+
0.0625 0.1
|
| 1061 |
+
0.00390625 -0.0011236
|
| 1062 |
+
1.52588e-05 1.41691e-09
|
| 1063 |
+
2.32831e-10 -2.84465e-27
|
| 1064 |
+
5.42101e-20 2.30189e-80
|
| 1065 |
+
2.93874e-39 -1.2197e-239
|
| 1066 |
+
8.63617e-78 0
|
| 1067 |
+
7.45834e-155 0
|
| 1068 |
+
0.81 1.02676
|
| 1069 |
+
0.6561 1.00134
|
| 1070 |
+
0.430467 1
|
| 1071 |
+
0.185302 1
|
| 1072 |
+
0.0343368 1
|
| 1073 |
+
0.00117902 1
|
| 1074 |
+
1.39008e-06 1
|
| 1075 |
+
1.93233e-12 1
|
| 1076 |
+
3.73392e-24 1
|
| 1077 |
+
4 1.6
|
| 1078 |
+
16 1.2962
|
| 1079 |
+
256 1.10194
|
| 1080 |
+
65536 1.01659
|
| 1081 |
+
4.29497e+09 1.00053
|
| 1082 |
+
1.84467e+19 1
|
| 1083 |
+
3.40282e+38 1
|
| 1084 |
+
1.15792e+77 1
|
| 1085 |
+
1.34078e+154 1
|
| 1086 |
+
|
| 1087 |
+
Unmodified, the iteration converges only towards 0. Modified it converges
|
| 1088 |
+
not only much faster, it converges even to the repelling fixpoint 1.
|
| 1089 |
+
"""
|
| 1090 |
+
def F(x):
|
| 1091 |
+
fx = f(x)
|
| 1092 |
+
ffx = f(fx)
|
| 1093 |
+
return (x*ffx - fx**2) / (ffx - 2*fx + x)
|
| 1094 |
+
return F
|
| 1095 |
+
|
| 1096 |
+
OptimizationMethods.jacobian = jacobian
|
| 1097 |
+
OptimizationMethods.findroot = findroot
|
| 1098 |
+
OptimizationMethods.multiplicity = multiplicity
|
| 1099 |
+
|
| 1100 |
+
if __name__ == '__main__':
|
| 1101 |
+
import doctest
|
| 1102 |
+
doctest.testmod()
|
.venv/lib/python3.11/site-packages/mpmath/calculus/quadrature.py
ADDED
|
@@ -0,0 +1,1115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from ..libmp.backend import xrange
|
| 4 |
+
|
| 5 |
+
class QuadratureRule(object):
|
| 6 |
+
"""
|
| 7 |
+
Quadrature rules are implemented using this class, in order to
|
| 8 |
+
simplify the code and provide a common infrastructure
|
| 9 |
+
for tasks such as error estimation and node caching.
|
| 10 |
+
|
| 11 |
+
You can implement a custom quadrature rule by subclassing
|
| 12 |
+
:class:`QuadratureRule` and implementing the appropriate
|
| 13 |
+
methods. The subclass can then be used by :func:`~mpmath.quad` by
|
| 14 |
+
passing it as the *method* argument.
|
| 15 |
+
|
| 16 |
+
:class:`QuadratureRule` instances are supposed to be singletons.
|
| 17 |
+
:class:`QuadratureRule` therefore implements instance caching
|
| 18 |
+
in :func:`~mpmath.__new__`.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, ctx):
|
| 22 |
+
self.ctx = ctx
|
| 23 |
+
self.standard_cache = {}
|
| 24 |
+
self.transformed_cache = {}
|
| 25 |
+
self.interval_count = {}
|
| 26 |
+
|
| 27 |
+
def clear(self):
|
| 28 |
+
"""
|
| 29 |
+
Delete cached node data.
|
| 30 |
+
"""
|
| 31 |
+
self.standard_cache = {}
|
| 32 |
+
self.transformed_cache = {}
|
| 33 |
+
self.interval_count = {}
|
| 34 |
+
|
| 35 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 36 |
+
r"""
|
| 37 |
+
Compute nodes for the standard interval `[-1, 1]`. Subclasses
|
| 38 |
+
should probably implement only this method, and use
|
| 39 |
+
:func:`~mpmath.get_nodes` method to retrieve the nodes.
|
| 40 |
+
"""
|
| 41 |
+
raise NotImplementedError
|
| 42 |
+
|
| 43 |
+
def get_nodes(self, a, b, degree, prec, verbose=False):
|
| 44 |
+
"""
|
| 45 |
+
Return nodes for given interval, degree and precision. The
|
| 46 |
+
nodes are retrieved from a cache if already computed;
|
| 47 |
+
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
|
| 48 |
+
and are then cached.
|
| 49 |
+
|
| 50 |
+
Subclasses should probably not implement this method,
|
| 51 |
+
but just implement :func:`~mpmath.calc_nodes` for the actual
|
| 52 |
+
node computation.
|
| 53 |
+
"""
|
| 54 |
+
key = (a, b, degree, prec)
|
| 55 |
+
if key in self.transformed_cache:
|
| 56 |
+
return self.transformed_cache[key]
|
| 57 |
+
orig = self.ctx.prec
|
| 58 |
+
try:
|
| 59 |
+
self.ctx.prec = prec+20
|
| 60 |
+
# Get nodes on standard interval
|
| 61 |
+
if (degree, prec) in self.standard_cache:
|
| 62 |
+
nodes = self.standard_cache[degree, prec]
|
| 63 |
+
else:
|
| 64 |
+
nodes = self.calc_nodes(degree, prec, verbose)
|
| 65 |
+
self.standard_cache[degree, prec] = nodes
|
| 66 |
+
# Transform to general interval
|
| 67 |
+
nodes = self.transform_nodes(nodes, a, b, verbose)
|
| 68 |
+
if key in self.interval_count:
|
| 69 |
+
self.transformed_cache[key] = nodes
|
| 70 |
+
else:
|
| 71 |
+
self.interval_count[key] = True
|
| 72 |
+
finally:
|
| 73 |
+
self.ctx.prec = orig
|
| 74 |
+
return nodes
|
| 75 |
+
|
| 76 |
+
def transform_nodes(self, nodes, a, b, verbose=False):
|
| 77 |
+
r"""
|
| 78 |
+
Rescale standardized nodes (for `[-1, 1]`) to a general
|
| 79 |
+
interval `[a, b]`. For a finite interval, a simple linear
|
| 80 |
+
change of variables is used. Otherwise, the following
|
| 81 |
+
transformations are used:
|
| 82 |
+
|
| 83 |
+
.. math ::
|
| 84 |
+
|
| 85 |
+
\lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
|
| 86 |
+
|
| 87 |
+
\lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
|
| 88 |
+
|
| 89 |
+
\lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
ctx = self.ctx
|
| 93 |
+
a = ctx.convert(a)
|
| 94 |
+
b = ctx.convert(b)
|
| 95 |
+
one = ctx.one
|
| 96 |
+
if (a, b) == (-one, one):
|
| 97 |
+
return nodes
|
| 98 |
+
half = ctx.mpf(0.5)
|
| 99 |
+
new_nodes = []
|
| 100 |
+
if ctx.isinf(a) or ctx.isinf(b):
|
| 101 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
| 102 |
+
p05 = -half
|
| 103 |
+
for x, w in nodes:
|
| 104 |
+
x2 = x*x
|
| 105 |
+
px1 = one-x2
|
| 106 |
+
spx1 = px1**p05
|
| 107 |
+
x = x*spx1
|
| 108 |
+
w *= spx1/px1
|
| 109 |
+
new_nodes.append((x, w))
|
| 110 |
+
elif a == ctx.ninf:
|
| 111 |
+
b1 = b+1
|
| 112 |
+
for x, w in nodes:
|
| 113 |
+
u = 2/(x+one)
|
| 114 |
+
x = b1-u
|
| 115 |
+
w *= half*u**2
|
| 116 |
+
new_nodes.append((x, w))
|
| 117 |
+
elif b == ctx.inf:
|
| 118 |
+
a1 = a-1
|
| 119 |
+
for x, w in nodes:
|
| 120 |
+
u = 2/(x+one)
|
| 121 |
+
x = a1+u
|
| 122 |
+
w *= half*u**2
|
| 123 |
+
new_nodes.append((x, w))
|
| 124 |
+
elif a == ctx.inf or b == ctx.ninf:
|
| 125 |
+
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
|
| 126 |
+
else:
|
| 127 |
+
raise NotImplementedError
|
| 128 |
+
else:
|
| 129 |
+
# Simple linear change of variables
|
| 130 |
+
C = (b-a)/2
|
| 131 |
+
D = (b+a)/2
|
| 132 |
+
for x, w in nodes:
|
| 133 |
+
new_nodes.append((D+C*x, C*w))
|
| 134 |
+
return new_nodes
|
| 135 |
+
|
| 136 |
+
def guess_degree(self, prec):
|
| 137 |
+
"""
|
| 138 |
+
Given a desired precision `p` in bits, estimate the degree `m`
|
| 139 |
+
of the quadrature required to accomplish full accuracy for
|
| 140 |
+
typical integrals. By default, :func:`~mpmath.quad` will perform up
|
| 141 |
+
to `m` iterations. The value of `m` should be a slight
|
| 142 |
+
overestimate, so that "slightly bad" integrals can be dealt
|
| 143 |
+
with automatically using a few extra iterations. On the
|
| 144 |
+
other hand, it should not be too big, so :func:`~mpmath.quad` can
|
| 145 |
+
quit within a reasonable amount of time when it is given
|
| 146 |
+
an "unsolvable" integral.
|
| 147 |
+
|
| 148 |
+
The default formula used by :func:`~mpmath.guess_degree` is tuned
|
| 149 |
+
for both :class:`TanhSinh` and :class:`GaussLegendre`.
|
| 150 |
+
The output is roughly as follows:
|
| 151 |
+
|
| 152 |
+
+---------+---------+
|
| 153 |
+
| `p` | `m` |
|
| 154 |
+
+=========+=========+
|
| 155 |
+
| 50 | 6 |
|
| 156 |
+
+---------+---------+
|
| 157 |
+
| 100 | 7 |
|
| 158 |
+
+---------+---------+
|
| 159 |
+
| 500 | 10 |
|
| 160 |
+
+---------+---------+
|
| 161 |
+
| 3000 | 12 |
|
| 162 |
+
+---------+---------+
|
| 163 |
+
|
| 164 |
+
This formula is based purely on a limited amount of
|
| 165 |
+
experimentation and will sometimes be wrong.
|
| 166 |
+
"""
|
| 167 |
+
# Expected degree
|
| 168 |
+
# XXX: use mag
|
| 169 |
+
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
|
| 170 |
+
# Reasonable "worst case"
|
| 171 |
+
g += 2
|
| 172 |
+
return g
|
| 173 |
+
|
| 174 |
+
def estimate_error(self, results, prec, epsilon):
|
| 175 |
+
r"""
|
| 176 |
+
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
|
| 177 |
+
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
|
| 178 |
+
the error of `I_k`.
|
| 179 |
+
|
| 180 |
+
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
|
| 181 |
+
|
| 182 |
+
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
|
| 183 |
+
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
|
| 184 |
+
that each degree increment roughly doubles the accuracy of
|
| 185 |
+
the quadrature rule (this is true for both :class:`TanhSinh`
|
| 186 |
+
and :class:`GaussLegendre`). The extrapolation formula is given
|
| 187 |
+
by Borwein, Bailey & Girgensohn. Although not very conservative,
|
| 188 |
+
this method seems to be very robust in practice.
|
| 189 |
+
"""
|
| 190 |
+
if len(results) == 2:
|
| 191 |
+
return abs(results[0]-results[1])
|
| 192 |
+
try:
|
| 193 |
+
if results[-1] == results[-2] == results[-3]:
|
| 194 |
+
return self.ctx.zero
|
| 195 |
+
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
|
| 196 |
+
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
|
| 197 |
+
except ValueError:
|
| 198 |
+
return epsilon
|
| 199 |
+
D3 = -prec
|
| 200 |
+
D4 = min(0, max(D1**2/D2, 2*D1, D3))
|
| 201 |
+
return self.ctx.mpf(10) ** int(D4)
|
| 202 |
+
|
| 203 |
+
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
|
| 204 |
+
"""
|
| 205 |
+
Main integration function. Computes the 1D integral over
|
| 206 |
+
the interval specified by *points*. For each subinterval,
|
| 207 |
+
performs quadrature of degree from 1 up to *max_degree*
|
| 208 |
+
until :func:`~mpmath.estimate_error` signals convergence.
|
| 209 |
+
|
| 210 |
+
:func:`~mpmath.summation` transforms each subintegration to
|
| 211 |
+
the standard interval and then calls :func:`~mpmath.sum_next`.
|
| 212 |
+
"""
|
| 213 |
+
ctx = self.ctx
|
| 214 |
+
I = total_err = ctx.zero
|
| 215 |
+
for i in xrange(len(points)-1):
|
| 216 |
+
a, b = points[i], points[i+1]
|
| 217 |
+
if a == b:
|
| 218 |
+
continue
|
| 219 |
+
# XXX: we could use a single variable transformation,
|
| 220 |
+
# but this is not good in practice. We get better accuracy
|
| 221 |
+
# by having 0 as an endpoint.
|
| 222 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
| 223 |
+
_f = f
|
| 224 |
+
f = lambda x: _f(-x) + _f(x)
|
| 225 |
+
a, b = (ctx.zero, ctx.inf)
|
| 226 |
+
results = []
|
| 227 |
+
err = ctx.zero
|
| 228 |
+
for degree in xrange(1, max_degree+1):
|
| 229 |
+
nodes = self.get_nodes(a, b, degree, prec, verbose)
|
| 230 |
+
if verbose:
|
| 231 |
+
print("Integrating from %s to %s (degree %s of %s)" % \
|
| 232 |
+
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
|
| 233 |
+
result = self.sum_next(f, nodes, degree, prec, results, verbose)
|
| 234 |
+
results.append(result)
|
| 235 |
+
if degree > 1:
|
| 236 |
+
err = self.estimate_error(results, prec, epsilon)
|
| 237 |
+
if verbose:
|
| 238 |
+
print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
|
| 239 |
+
if err <= epsilon:
|
| 240 |
+
break
|
| 241 |
+
I += results[-1]
|
| 242 |
+
total_err += err
|
| 243 |
+
if total_err > epsilon:
|
| 244 |
+
if verbose:
|
| 245 |
+
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
|
| 246 |
+
return I, total_err
|
| 247 |
+
|
| 248 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
| 249 |
+
r"""
|
| 250 |
+
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
|
| 251 |
+
contains the `(w_k, x_k)` pairs.
|
| 252 |
+
|
| 253 |
+
:func:`~mpmath.summation` will supply the list *results* of
|
| 254 |
+
values computed by :func:`~mpmath.sum_next` at previous degrees, in
|
| 255 |
+
case the quadrature rule is able to reuse them.
|
| 256 |
+
"""
|
| 257 |
+
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class TanhSinh(QuadratureRule):
|
| 261 |
+
r"""
|
| 262 |
+
This class implements "tanh-sinh" or "doubly exponential"
|
| 263 |
+
quadrature. This quadrature rule is based on the Euler-Maclaurin
|
| 264 |
+
integral formula. By performing a change of variables involving
|
| 265 |
+
nested exponentials / hyperbolic functions (hence the name), the
|
| 266 |
+
derivatives at the endpoints vanish rapidly. Since the error term
|
| 267 |
+
in the Euler-Maclaurin formula depends on the derivatives at the
|
| 268 |
+
endpoints, a simple step sum becomes extremely accurate. In
|
| 269 |
+
practice, this means that doubling the number of evaluation
|
| 270 |
+
points roughly doubles the number of accurate digits.
|
| 271 |
+
|
| 272 |
+
Comparison to Gauss-Legendre:
|
| 273 |
+
* Initial computation of nodes is usually faster
|
| 274 |
+
* Handles endpoint singularities better
|
| 275 |
+
* Handles infinite integration intervals better
|
| 276 |
+
* Is slower for smooth integrands once nodes have been computed
|
| 277 |
+
|
| 278 |
+
The implementation of the tanh-sinh algorithm is based on the
|
| 279 |
+
description given in Borwein, Bailey & Girgensohn, "Experimentation
|
| 280 |
+
in Mathematics - Computational Paths to Discovery", A K Peters,
|
| 281 |
+
2003, pages 312-313. In the present implementation, a few
|
| 282 |
+
improvements have been made:
|
| 283 |
+
|
| 284 |
+
* A more efficient scheme is used to compute nodes (exploiting
|
| 285 |
+
recurrence for the exponential function)
|
| 286 |
+
* The nodes are computed successively instead of all at once
|
| 287 |
+
|
| 288 |
+
**References**
|
| 289 |
+
|
| 290 |
+
* [Bailey]_
|
| 291 |
+
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
|
| 292 |
+
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
| 296 |
+
"""
|
| 297 |
+
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
|
| 298 |
+
fact that half of the abscissas at degree `m` are precisely the
|
| 299 |
+
abscissas from degree `m-1`. Thus reusing the result from
|
| 300 |
+
the previous level allows a 2x speedup.
|
| 301 |
+
"""
|
| 302 |
+
h = self.ctx.mpf(2)**(-degree)
|
| 303 |
+
# Abscissas overlap, so reusing saves half of the time
|
| 304 |
+
if previous:
|
| 305 |
+
S = previous[-1]/(h*2)
|
| 306 |
+
else:
|
| 307 |
+
S = self.ctx.zero
|
| 308 |
+
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
|
| 309 |
+
return h*S
|
| 310 |
+
|
| 311 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 312 |
+
r"""
|
| 313 |
+
The abscissas and weights for tanh-sinh quadrature of degree
|
| 314 |
+
`m` are given by
|
| 315 |
+
|
| 316 |
+
.. math::
|
| 317 |
+
|
| 318 |
+
x_k = \tanh(\pi/2 \sinh(t_k))
|
| 319 |
+
|
| 320 |
+
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
|
| 321 |
+
|
| 322 |
+
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
|
| 323 |
+
list of nodes is actually infinite, but the weights die off so
|
| 324 |
+
rapidly that only a few are needed.
|
| 325 |
+
"""
|
| 326 |
+
ctx = self.ctx
|
| 327 |
+
nodes = []
|
| 328 |
+
|
| 329 |
+
extra = 20
|
| 330 |
+
ctx.prec += extra
|
| 331 |
+
tol = ctx.ldexp(1, -prec-10)
|
| 332 |
+
pi4 = ctx.pi/4
|
| 333 |
+
|
| 334 |
+
# For simplicity, we work in steps h = 1/2^n, with the first point
|
| 335 |
+
# offset so that we can reuse the sum from the previous degree
|
| 336 |
+
|
| 337 |
+
# We define degree 1 to include the "degree 0" steps, including
|
| 338 |
+
# the point x = 0. (It doesn't work well otherwise; not sure why.)
|
| 339 |
+
t0 = ctx.ldexp(1, -degree)
|
| 340 |
+
if degree == 1:
|
| 341 |
+
#nodes.append((mpf(0), pi4))
|
| 342 |
+
#nodes.append((-mpf(0), pi4))
|
| 343 |
+
nodes.append((ctx.zero, ctx.pi/2))
|
| 344 |
+
h = t0
|
| 345 |
+
else:
|
| 346 |
+
h = t0*2
|
| 347 |
+
|
| 348 |
+
# Since h is fixed, we can compute the next exponential
|
| 349 |
+
# by simply multiplying by exp(h)
|
| 350 |
+
expt0 = ctx.exp(t0)
|
| 351 |
+
a = pi4 * expt0
|
| 352 |
+
b = pi4 / expt0
|
| 353 |
+
udelta = ctx.exp(h)
|
| 354 |
+
urdelta = 1/udelta
|
| 355 |
+
|
| 356 |
+
for k in xrange(0, 20*2**degree+1):
|
| 357 |
+
# Reference implementation:
|
| 358 |
+
# t = t0 + k*h
|
| 359 |
+
# x = tanh(pi/2 * sinh(t))
|
| 360 |
+
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
|
| 361 |
+
|
| 362 |
+
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
|
| 363 |
+
c = ctx.exp(a-b)
|
| 364 |
+
d = 1/c
|
| 365 |
+
co = (c+d)/2
|
| 366 |
+
si = (c-d)/2
|
| 367 |
+
x = si / co
|
| 368 |
+
w = (a+b) / co**2
|
| 369 |
+
diff = abs(x-1)
|
| 370 |
+
if diff <= tol:
|
| 371 |
+
break
|
| 372 |
+
|
| 373 |
+
nodes.append((x, w))
|
| 374 |
+
nodes.append((-x, w))
|
| 375 |
+
|
| 376 |
+
a *= udelta
|
| 377 |
+
b *= urdelta
|
| 378 |
+
|
| 379 |
+
if verbose and k % 300 == 150:
|
| 380 |
+
# Note: the number displayed is rather arbitrary. Should
|
| 381 |
+
# figure out how to print something that looks more like a
|
| 382 |
+
# percentage
|
| 383 |
+
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
|
| 384 |
+
|
| 385 |
+
ctx.prec -= extra
|
| 386 |
+
return nodes
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class GaussLegendre(QuadratureRule):
|
| 390 |
+
r"""
|
| 391 |
+
This class implements Gauss-Legendre quadrature, which is
|
| 392 |
+
exceptionally efficient for polynomials and polynomial-like (i.e.
|
| 393 |
+
very smooth) integrands.
|
| 394 |
+
|
| 395 |
+
The abscissas and weights are given by roots and values of
|
| 396 |
+
Legendre polynomials, which are the orthogonal polynomials
|
| 397 |
+
on `[-1, 1]` with respect to the unit weight
|
| 398 |
+
(see :func:`~mpmath.legendre`).
|
| 399 |
+
|
| 400 |
+
In this implementation, we take the "degree" `m` of the quadrature
|
| 401 |
+
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
|
| 402 |
+
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
|
| 403 |
+
than linear, convergence as the degree is incremented.
|
| 404 |
+
|
| 405 |
+
Comparison to tanh-sinh quadrature:
|
| 406 |
+
* Is faster for smooth integrands once nodes have been computed
|
| 407 |
+
* Initial computation of nodes is usually slower
|
| 408 |
+
* Handles endpoint singularities worse
|
| 409 |
+
* Handles infinite integration intervals worse
|
| 410 |
+
|
| 411 |
+
"""
|
| 412 |
+
|
| 413 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 414 |
+
r"""
|
| 415 |
+
Calculates the abscissas and weights for Gauss-Legendre
|
| 416 |
+
quadrature of degree of given degree (actually `3 \cdot 2^m`).
|
| 417 |
+
"""
|
| 418 |
+
ctx = self.ctx
|
| 419 |
+
# It is important that the epsilon is set lower than the
|
| 420 |
+
# "real" epsilon
|
| 421 |
+
epsilon = ctx.ldexp(1, -prec-8)
|
| 422 |
+
# Fairly high precision might be required for accurate
|
| 423 |
+
# evaluation of the roots
|
| 424 |
+
orig = ctx.prec
|
| 425 |
+
ctx.prec = int(prec*1.5)
|
| 426 |
+
if degree == 1:
|
| 427 |
+
x = ctx.sqrt(ctx.mpf(3)/5)
|
| 428 |
+
w = ctx.mpf(5)/9
|
| 429 |
+
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
|
| 430 |
+
ctx.prec = orig
|
| 431 |
+
return nodes
|
| 432 |
+
nodes = []
|
| 433 |
+
n = 3*2**(degree-1)
|
| 434 |
+
upto = n//2 + 1
|
| 435 |
+
for j in xrange(1, upto):
|
| 436 |
+
# Asymptotic formula for the roots
|
| 437 |
+
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
|
| 438 |
+
# Newton iteration
|
| 439 |
+
while 1:
|
| 440 |
+
t1, t2 = 1, 0
|
| 441 |
+
# Evaluates the Legendre polynomial using its defining
|
| 442 |
+
# recurrence relation
|
| 443 |
+
for j1 in xrange(1,n+1):
|
| 444 |
+
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
|
| 445 |
+
t4 = n*(r*t1-t2)/(r**2-1)
|
| 446 |
+
a = t1/t4
|
| 447 |
+
r = r - a
|
| 448 |
+
if abs(a) < epsilon:
|
| 449 |
+
break
|
| 450 |
+
x = r
|
| 451 |
+
w = 2/((1-r**2)*t4**2)
|
| 452 |
+
if verbose and j % 30 == 15:
|
| 453 |
+
print("Computing nodes (%i of %i)" % (j, upto))
|
| 454 |
+
nodes.append((x, w))
|
| 455 |
+
nodes.append((-x, w))
|
| 456 |
+
ctx.prec = orig
|
| 457 |
+
return nodes
|
| 458 |
+
|
| 459 |
+
class QuadratureMethods(object):
|
| 460 |
+
|
| 461 |
+
def __init__(ctx, *args, **kwargs):
|
| 462 |
+
ctx._gauss_legendre = GaussLegendre(ctx)
|
| 463 |
+
ctx._tanh_sinh = TanhSinh(ctx)
|
| 464 |
+
|
| 465 |
+
def quad(ctx, f, *points, **kwargs):
|
| 466 |
+
r"""
|
| 467 |
+
Computes a single, double or triple integral over a given
|
| 468 |
+
1D interval, 2D rectangle, or 3D cuboid. A basic example::
|
| 469 |
+
|
| 470 |
+
>>> from mpmath import *
|
| 471 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 472 |
+
>>> quad(sin, [0, pi])
|
| 473 |
+
2.0
|
| 474 |
+
|
| 475 |
+
A basic 2D integral::
|
| 476 |
+
|
| 477 |
+
>>> f = lambda x, y: cos(x+y/2)
|
| 478 |
+
>>> quad(f, [-pi/2, pi/2], [0, pi])
|
| 479 |
+
4.0
|
| 480 |
+
|
| 481 |
+
**Interval format**
|
| 482 |
+
|
| 483 |
+
The integration range for each dimension may be specified
|
| 484 |
+
using a list or tuple. Arguments are interpreted as follows:
|
| 485 |
+
|
| 486 |
+
``quad(f, [x1, x2])`` -- calculates
|
| 487 |
+
`\int_{x_1}^{x_2} f(x) \, dx`
|
| 488 |
+
|
| 489 |
+
``quad(f, [x1, x2], [y1, y2])`` -- calculates
|
| 490 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
|
| 491 |
+
|
| 492 |
+
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
|
| 493 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
|
| 494 |
+
\, dz \, dy \, dx`
|
| 495 |
+
|
| 496 |
+
Endpoints may be finite or infinite. An interval descriptor
|
| 497 |
+
may also contain more than two points. In this
|
| 498 |
+
case, the integration is split into subintervals, between
|
| 499 |
+
each pair of consecutive points. This is useful for
|
| 500 |
+
dealing with mid-interval discontinuities, or integrating
|
| 501 |
+
over large intervals where the function is irregular or
|
| 502 |
+
oscillates.
|
| 503 |
+
|
| 504 |
+
**Options**
|
| 505 |
+
|
| 506 |
+
:func:`~mpmath.quad` recognizes the following keyword arguments:
|
| 507 |
+
|
| 508 |
+
*method*
|
| 509 |
+
Chooses integration algorithm (described below).
|
| 510 |
+
*error*
|
| 511 |
+
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
|
| 512 |
+
integral and `e` is the estimated error.
|
| 513 |
+
*maxdegree*
|
| 514 |
+
Maximum degree of the quadrature rule to try before
|
| 515 |
+
quitting.
|
| 516 |
+
*verbose*
|
| 517 |
+
Print details about progress.
|
| 518 |
+
|
| 519 |
+
**Algorithms**
|
| 520 |
+
|
| 521 |
+
Mpmath presently implements two integration algorithms: tanh-sinh
|
| 522 |
+
quadrature and Gauss-Legendre quadrature. These can be selected
|
| 523 |
+
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
|
| 524 |
+
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
|
| 525 |
+
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
|
| 526 |
+
as shortcuts.
|
| 527 |
+
|
| 528 |
+
Both algorithms have the property that doubling the number of
|
| 529 |
+
evaluation points roughly doubles the accuracy, so both are ideal
|
| 530 |
+
for high precision quadrature (hundreds or thousands of digits).
|
| 531 |
+
|
| 532 |
+
At high precision, computing the nodes and weights for the
|
| 533 |
+
integration can be expensive (more expensive than computing the
|
| 534 |
+
function values). To make repeated integrations fast, nodes
|
| 535 |
+
are automatically cached.
|
| 536 |
+
|
| 537 |
+
The advantages of the tanh-sinh algorithm are that it tends to
|
| 538 |
+
handle endpoint singularities well, and that the nodes are cheap
|
| 539 |
+
to compute on the first run. For these reasons, it is used by
|
| 540 |
+
:func:`~mpmath.quad` as the default algorithm.
|
| 541 |
+
|
| 542 |
+
Gauss-Legendre quadrature often requires fewer function
|
| 543 |
+
evaluations, and is therefore often faster for repeated use, but
|
| 544 |
+
the algorithm does not handle endpoint singularities as well and
|
| 545 |
+
the nodes are more expensive to compute. Gauss-Legendre quadrature
|
| 546 |
+
can be a better choice if the integrand is smooth and repeated
|
| 547 |
+
integrations are required (e.g. for multiple integrals).
|
| 548 |
+
|
| 549 |
+
See the documentation for :class:`TanhSinh` and
|
| 550 |
+
:class:`GaussLegendre` for additional details.
|
| 551 |
+
|
| 552 |
+
**Examples of 1D integrals**
|
| 553 |
+
|
| 554 |
+
Intervals may be infinite or half-infinite. The following two
|
| 555 |
+
examples evaluate the limits of the inverse tangent function
|
| 556 |
+
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
|
| 557 |
+
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
|
| 558 |
+
|
| 559 |
+
>>> mp.dps = 15
|
| 560 |
+
>>> quad(lambda x: 2/(x**2+1), [0, inf])
|
| 561 |
+
3.14159265358979
|
| 562 |
+
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
|
| 563 |
+
3.14159265358979
|
| 564 |
+
|
| 565 |
+
Integrals can typically be resolved to high precision.
|
| 566 |
+
The following computes 50 digits of `\pi` by integrating the
|
| 567 |
+
area of the half-circle defined by `x^2 + y^2 \le 1`,
|
| 568 |
+
`-1 \le x \le 1`, `y \ge 0`::
|
| 569 |
+
|
| 570 |
+
>>> mp.dps = 50
|
| 571 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
|
| 572 |
+
3.1415926535897932384626433832795028841971693993751
|
| 573 |
+
|
| 574 |
+
One can just as well compute 1000 digits (output truncated)::
|
| 575 |
+
|
| 576 |
+
>>> mp.dps = 1000
|
| 577 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
|
| 578 |
+
3.141592653589793238462643383279502884...216420199
|
| 579 |
+
|
| 580 |
+
Complex integrals are supported. The following computes
|
| 581 |
+
a residue at `z = 0` by integrating counterclockwise along the
|
| 582 |
+
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
|
| 583 |
+
|
| 584 |
+
>>> mp.dps = 15
|
| 585 |
+
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
|
| 586 |
+
(0.0 + 6.28318530717959j)
|
| 587 |
+
|
| 588 |
+
**Examples of 2D and 3D integrals**
|
| 589 |
+
|
| 590 |
+
Here are several nice examples of analytically solvable
|
| 591 |
+
2D integrals (taken from MathWorld [1]) that can be evaluated
|
| 592 |
+
to high precision fairly rapidly by :func:`~mpmath.quad`::
|
| 593 |
+
|
| 594 |
+
>>> mp.dps = 30
|
| 595 |
+
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
|
| 596 |
+
>>> quad(f, [0, 1], [0, 1])
|
| 597 |
+
0.577215664901532860606512090082
|
| 598 |
+
>>> +euler
|
| 599 |
+
0.577215664901532860606512090082
|
| 600 |
+
|
| 601 |
+
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
|
| 602 |
+
>>> quad(f, [-1, 1], [-1, 1])
|
| 603 |
+
3.17343648530607134219175646705
|
| 604 |
+
>>> 4*log(2+sqrt(3))-2*pi/3
|
| 605 |
+
3.17343648530607134219175646705
|
| 606 |
+
|
| 607 |
+
>>> f = lambda x, y: 1/(1-x**2 * y**2)
|
| 608 |
+
>>> quad(f, [0, 1], [0, 1])
|
| 609 |
+
1.23370055013616982735431137498
|
| 610 |
+
>>> pi**2 / 8
|
| 611 |
+
1.23370055013616982735431137498
|
| 612 |
+
|
| 613 |
+
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
|
| 614 |
+
1.64493406684822643647241516665
|
| 615 |
+
>>> pi**2 / 6
|
| 616 |
+
1.64493406684822643647241516665
|
| 617 |
+
|
| 618 |
+
Multiple integrals may be done over infinite ranges::
|
| 619 |
+
|
| 620 |
+
>>> mp.dps = 15
|
| 621 |
+
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
|
| 622 |
+
0.367879441171442
|
| 623 |
+
>>> print(1/e)
|
| 624 |
+
0.367879441171442
|
| 625 |
+
|
| 626 |
+
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
|
| 627 |
+
For example, we can replicate the earlier example of calculating
|
| 628 |
+
`\pi` by integrating over the unit-circle, and actually use double
|
| 629 |
+
quadrature to actually measure the area circle::
|
| 630 |
+
|
| 631 |
+
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
|
| 632 |
+
>>> quad(f, [-1, 1])
|
| 633 |
+
3.14159265358979
|
| 634 |
+
|
| 635 |
+
Here is a simple triple integral::
|
| 636 |
+
|
| 637 |
+
>>> mp.dps = 15
|
| 638 |
+
>>> f = lambda x,y,z: x*y/(1+z)
|
| 639 |
+
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
|
| 640 |
+
0.101366277027041
|
| 641 |
+
>>> (log(3)-log(2))/4
|
| 642 |
+
0.101366277027041
|
| 643 |
+
|
| 644 |
+
**Singularities**
|
| 645 |
+
|
| 646 |
+
Both tanh-sinh and Gauss-Legendre quadrature are designed to
|
| 647 |
+
integrate smooth (infinitely differentiable) functions. Neither
|
| 648 |
+
algorithm copes well with mid-interval singularities (such as
|
| 649 |
+
mid-interval discontinuities in `f(x)` or `f'(x)`).
|
| 650 |
+
The best solution is to split the integral into parts::
|
| 651 |
+
|
| 652 |
+
>>> mp.dps = 15
|
| 653 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
|
| 654 |
+
3.99900894176779
|
| 655 |
+
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
|
| 656 |
+
4.0
|
| 657 |
+
|
| 658 |
+
The tanh-sinh rule often works well for integrands having a
|
| 659 |
+
singularity at one or both endpoints::
|
| 660 |
+
|
| 661 |
+
>>> mp.dps = 15
|
| 662 |
+
>>> quad(log, [0, 1], method='tanh-sinh') # Good
|
| 663 |
+
-1.0
|
| 664 |
+
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
|
| 665 |
+
-0.999932197413801
|
| 666 |
+
|
| 667 |
+
However, the result may still be inaccurate for some functions::
|
| 668 |
+
|
| 669 |
+
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
| 670 |
+
1.99999999946942
|
| 671 |
+
|
| 672 |
+
This problem is not due to the quadrature rule per se, but to
|
| 673 |
+
numerical amplification of errors in the nodes. The problem can be
|
| 674 |
+
circumvented by temporarily increasing the precision::
|
| 675 |
+
|
| 676 |
+
>>> mp.dps = 30
|
| 677 |
+
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
| 678 |
+
>>> mp.dps = 15
|
| 679 |
+
>>> +a
|
| 680 |
+
2.0
|
| 681 |
+
|
| 682 |
+
**Highly variable functions**
|
| 683 |
+
|
| 684 |
+
For functions that are smooth (in the sense of being infinitely
|
| 685 |
+
differentiable) but contain sharp mid-interval peaks or many
|
| 686 |
+
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
|
| 687 |
+
example, with default settings, :func:`~mpmath.quad` is able to integrate
|
| 688 |
+
`\sin(x)` accurately over an interval of length 100 but not over
|
| 689 |
+
length 1000::
|
| 690 |
+
|
| 691 |
+
>>> quad(sin, [0, 100]); 1-cos(100) # Good
|
| 692 |
+
0.137681127712316
|
| 693 |
+
0.137681127712316
|
| 694 |
+
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
|
| 695 |
+
-37.8587612408485
|
| 696 |
+
0.437620923709297
|
| 697 |
+
|
| 698 |
+
One solution is to break the integration into 10 intervals of
|
| 699 |
+
length 100::
|
| 700 |
+
|
| 701 |
+
>>> quad(sin, linspace(0, 1000, 10)) # Good
|
| 702 |
+
0.437620923709297
|
| 703 |
+
|
| 704 |
+
Another is to increase the degree of the quadrature::
|
| 705 |
+
|
| 706 |
+
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
|
| 707 |
+
0.437620923709297
|
| 708 |
+
|
| 709 |
+
Whether splitting the interval or increasing the degree is
|
| 710 |
+
more efficient differs from case to case. Another example is the
|
| 711 |
+
function `1/(1+x^2)`, which has a sharp peak centered around
|
| 712 |
+
`x = 0`::
|
| 713 |
+
|
| 714 |
+
>>> f = lambda x: 1/(1+x**2)
|
| 715 |
+
>>> quad(f, [-100, 100]) # Bad
|
| 716 |
+
3.64804647105268
|
| 717 |
+
>>> quad(f, [-100, 100], maxdegree=10) # Good
|
| 718 |
+
3.12159332021646
|
| 719 |
+
>>> quad(f, [-100, 0, 100]) # Also good
|
| 720 |
+
3.12159332021646
|
| 721 |
+
|
| 722 |
+
**References**
|
| 723 |
+
|
| 724 |
+
1. http://mathworld.wolfram.com/DoubleIntegral.html
|
| 725 |
+
|
| 726 |
+
"""
|
| 727 |
+
rule = kwargs.get('method', 'tanh-sinh')
|
| 728 |
+
if type(rule) is str:
|
| 729 |
+
if rule == 'tanh-sinh':
|
| 730 |
+
rule = ctx._tanh_sinh
|
| 731 |
+
elif rule == 'gauss-legendre':
|
| 732 |
+
rule = ctx._gauss_legendre
|
| 733 |
+
else:
|
| 734 |
+
raise ValueError("unknown quadrature rule: %s" % rule)
|
| 735 |
+
else:
|
| 736 |
+
rule = rule(ctx)
|
| 737 |
+
verbose = kwargs.get('verbose')
|
| 738 |
+
dim = len(points)
|
| 739 |
+
orig = prec = ctx.prec
|
| 740 |
+
epsilon = ctx.eps/8
|
| 741 |
+
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
|
| 742 |
+
points = [ctx._as_points(p) for p in points]
|
| 743 |
+
try:
|
| 744 |
+
ctx.prec += 20
|
| 745 |
+
if dim == 1:
|
| 746 |
+
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
|
| 747 |
+
elif dim == 2:
|
| 748 |
+
v, err = rule.summation(lambda x: \
|
| 749 |
+
rule.summation(lambda y: f(x,y), \
|
| 750 |
+
points[1], prec, epsilon, m)[0],
|
| 751 |
+
points[0], prec, epsilon, m, verbose)
|
| 752 |
+
elif dim == 3:
|
| 753 |
+
v, err = rule.summation(lambda x: \
|
| 754 |
+
rule.summation(lambda y: \
|
| 755 |
+
rule.summation(lambda z: f(x,y,z), \
|
| 756 |
+
points[2], prec, epsilon, m)[0],
|
| 757 |
+
points[1], prec, epsilon, m)[0],
|
| 758 |
+
points[0], prec, epsilon, m, verbose)
|
| 759 |
+
else:
|
| 760 |
+
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
|
| 761 |
+
finally:
|
| 762 |
+
ctx.prec = orig
|
| 763 |
+
if kwargs.get("error"):
|
| 764 |
+
return +v, err
|
| 765 |
+
return +v
|
| 766 |
+
|
| 767 |
+
def quadts(ctx, *args, **kwargs):
|
| 768 |
+
"""
|
| 769 |
+
Performs tanh-sinh quadrature. The call
|
| 770 |
+
|
| 771 |
+
quadts(func, *points, ...)
|
| 772 |
+
|
| 773 |
+
is simply a shortcut for:
|
| 774 |
+
|
| 775 |
+
quad(func, *points, ..., method=TanhSinh)
|
| 776 |
+
|
| 777 |
+
For example, a single integral and a double integral:
|
| 778 |
+
|
| 779 |
+
quadts(lambda x: exp(cos(x)), [0, 1])
|
| 780 |
+
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
| 781 |
+
|
| 782 |
+
See the documentation for quad for information about how points
|
| 783 |
+
arguments and keyword arguments are parsed.
|
| 784 |
+
|
| 785 |
+
See documentation for TanhSinh for algorithmic information about
|
| 786 |
+
tanh-sinh quadrature.
|
| 787 |
+
"""
|
| 788 |
+
kwargs['method'] = 'tanh-sinh'
|
| 789 |
+
return ctx.quad(*args, **kwargs)
|
| 790 |
+
|
| 791 |
+
def quadgl(ctx, *args, **kwargs):
|
| 792 |
+
"""
|
| 793 |
+
Performs Gauss-Legendre quadrature. The call
|
| 794 |
+
|
| 795 |
+
quadgl(func, *points, ...)
|
| 796 |
+
|
| 797 |
+
is simply a shortcut for:
|
| 798 |
+
|
| 799 |
+
quad(func, *points, ..., method=GaussLegendre)
|
| 800 |
+
|
| 801 |
+
For example, a single integral and a double integral:
|
| 802 |
+
|
| 803 |
+
quadgl(lambda x: exp(cos(x)), [0, 1])
|
| 804 |
+
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
| 805 |
+
|
| 806 |
+
See the documentation for quad for information about how points
|
| 807 |
+
arguments and keyword arguments are parsed.
|
| 808 |
+
|
| 809 |
+
See documentation for TanhSinh for algorithmic information about
|
| 810 |
+
tanh-sinh quadrature.
|
| 811 |
+
"""
|
| 812 |
+
kwargs['method'] = 'gauss-legendre'
|
| 813 |
+
return ctx.quad(*args, **kwargs)
|
| 814 |
+
|
| 815 |
+
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
|
| 816 |
+
r"""
|
| 817 |
+
Calculates
|
| 818 |
+
|
| 819 |
+
.. math ::
|
| 820 |
+
|
| 821 |
+
I = \int_a^b f(x) dx
|
| 822 |
+
|
| 823 |
+
where at least one of `a` and `b` is infinite and where
|
| 824 |
+
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
|
| 825 |
+
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
|
| 826 |
+
can also handle oscillatory integrals where the oscillation
|
| 827 |
+
rate is different from a pure sine or cosine wave.
|
| 828 |
+
|
| 829 |
+
In the standard case when `|a| < \infty, b = \infty`,
|
| 830 |
+
:func:`~mpmath.quadosc` works by evaluating the infinite series
|
| 831 |
+
|
| 832 |
+
.. math ::
|
| 833 |
+
|
| 834 |
+
I = \int_a^{x_1} f(x) dx +
|
| 835 |
+
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
|
| 836 |
+
|
| 837 |
+
where `x_k` are consecutive zeros (alternatively
|
| 838 |
+
some other periodic reference point) of `f(x)`.
|
| 839 |
+
Accordingly, :func:`~mpmath.quadosc` requires information about the
|
| 840 |
+
zeros of `f(x)`. For a periodic function, you can specify
|
| 841 |
+
the zeros by either providing the angular frequency `\omega`
|
| 842 |
+
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
|
| 843 |
+
specify the `n`-th zero by providing the *zeros* arguments.
|
| 844 |
+
Below is an example of each::
|
| 845 |
+
|
| 846 |
+
>>> from mpmath import *
|
| 847 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 848 |
+
>>> f = lambda x: sin(3*x)/(x**2+1)
|
| 849 |
+
>>> quadosc(f, [0,inf], omega=3)
|
| 850 |
+
0.37833007080198
|
| 851 |
+
>>> quadosc(f, [0,inf], period=2*pi/3)
|
| 852 |
+
0.37833007080198
|
| 853 |
+
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
|
| 854 |
+
0.37833007080198
|
| 855 |
+
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
|
| 856 |
+
0.37833007080198
|
| 857 |
+
|
| 858 |
+
Note that *zeros* was specified to multiply `n` by the
|
| 859 |
+
*half-period*, not the full period. In theory, it does not matter
|
| 860 |
+
whether each partial integral is done over a half period or a full
|
| 861 |
+
period. However, if done over half-periods, the infinite series
|
| 862 |
+
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
|
| 863 |
+
typically makes the extrapolation much more efficient.
|
| 864 |
+
|
| 865 |
+
Here is an example of an integration over the entire real line,
|
| 866 |
+
and a half-infinite integration starting at `-\infty`::
|
| 867 |
+
|
| 868 |
+
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
|
| 869 |
+
1.15572734979092
|
| 870 |
+
>>> pi/e
|
| 871 |
+
1.15572734979092
|
| 872 |
+
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
|
| 873 |
+
-0.0844109505595739
|
| 874 |
+
>>> cos(1)+si(1)-pi/2
|
| 875 |
+
-0.0844109505595738
|
| 876 |
+
|
| 877 |
+
Of course, the integrand may contain a complex exponential just as
|
| 878 |
+
well as a real sine or cosine::
|
| 879 |
+
|
| 880 |
+
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
|
| 881 |
+
(0.156410688228254 + 0.0j)
|
| 882 |
+
>>> pi/e**3
|
| 883 |
+
0.156410688228254
|
| 884 |
+
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
|
| 885 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
| 886 |
+
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
|
| 887 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
| 888 |
+
|
| 889 |
+
**Non-periodic functions**
|
| 890 |
+
|
| 891 |
+
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
|
| 892 |
+
strictly periodic, *omega* or *period* might not work, and it might
|
| 893 |
+
be necessary to use *zeros*.
|
| 894 |
+
|
| 895 |
+
A notable exception can be made for Bessel functions which, though not
|
| 896 |
+
periodic, are "asymptotically periodic" in a sufficiently strong sense
|
| 897 |
+
that the sum extrapolation will work out::
|
| 898 |
+
|
| 899 |
+
>>> quadosc(j0, [0, inf], period=2*pi)
|
| 900 |
+
1.0
|
| 901 |
+
>>> quadosc(j1, [0, inf], period=2*pi)
|
| 902 |
+
1.0
|
| 903 |
+
|
| 904 |
+
More properly, one should provide the exact Bessel function zeros::
|
| 905 |
+
|
| 906 |
+
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
|
| 907 |
+
>>> quadosc(j0, [0, inf], zeros=j0zero)
|
| 908 |
+
1.0
|
| 909 |
+
|
| 910 |
+
For an example where *zeros* becomes necessary, consider the
|
| 911 |
+
complete Fresnel integrals
|
| 912 |
+
|
| 913 |
+
.. math ::
|
| 914 |
+
|
| 915 |
+
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
|
| 916 |
+
= \sqrt{\frac{\pi}{8}}.
|
| 917 |
+
|
| 918 |
+
Although the integrands do not decrease in magnitude as
|
| 919 |
+
`x \to \infty`, the integrals are convergent since the oscillation
|
| 920 |
+
rate increases (causing consecutive periods to asymptotically
|
| 921 |
+
cancel out). These integrals are virtually impossible to calculate
|
| 922 |
+
to any kind of accuracy using standard quadrature rules. However,
|
| 923 |
+
if one provides the correct asymptotic distribution of zeros
|
| 924 |
+
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
|
| 925 |
+
|
| 926 |
+
>>> mp.dps = 30
|
| 927 |
+
>>> f = lambda x: cos(x**2)
|
| 928 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
| 929 |
+
0.626657068657750125603941321203
|
| 930 |
+
>>> f = lambda x: sin(x**2)
|
| 931 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
| 932 |
+
0.626657068657750125603941321203
|
| 933 |
+
>>> sqrt(pi/8)
|
| 934 |
+
0.626657068657750125603941321203
|
| 935 |
+
|
| 936 |
+
(Interestingly, these integrals can still be evaluated if one
|
| 937 |
+
places some other constant than `\pi` in the square root sign.)
|
| 938 |
+
|
| 939 |
+
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
|
| 940 |
+
the inverse-function distribution `h^{-1}(x)`::
|
| 941 |
+
|
| 942 |
+
>>> mp.dps = 15
|
| 943 |
+
>>> f = lambda x: sin(exp(x))
|
| 944 |
+
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
|
| 945 |
+
-0.25024394235267
|
| 946 |
+
>>> pi/2-si(e)
|
| 947 |
+
-0.250243942352671
|
| 948 |
+
|
| 949 |
+
**Non-alternating functions**
|
| 950 |
+
|
| 951 |
+
If the integrand oscillates around a positive value, without
|
| 952 |
+
alternating signs, the extrapolation might fail. A simple trick
|
| 953 |
+
that sometimes works is to multiply or divide the frequency by 2::
|
| 954 |
+
|
| 955 |
+
>>> f = lambda x: 1/x**2+sin(x)/x**4
|
| 956 |
+
>>> quadosc(f, [1,inf], omega=1) # Bad
|
| 957 |
+
1.28642190869861
|
| 958 |
+
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
|
| 959 |
+
1.28652953559617
|
| 960 |
+
>>> 1+(cos(1)+ci(1)+sin(1))/6
|
| 961 |
+
1.28652953559617
|
| 962 |
+
|
| 963 |
+
**Fast decay**
|
| 964 |
+
|
| 965 |
+
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
|
| 966 |
+
integrands. If the integrand decreases exponentially or faster,
|
| 967 |
+
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
|
| 968 |
+
much faster than :func:`~mpmath.quadosc`)::
|
| 969 |
+
|
| 970 |
+
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
|
| 971 |
+
0.5
|
| 972 |
+
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
|
| 973 |
+
0.5
|
| 974 |
+
|
| 975 |
+
"""
|
| 976 |
+
a, b = ctx._as_points(interval)
|
| 977 |
+
a = ctx.convert(a)
|
| 978 |
+
b = ctx.convert(b)
|
| 979 |
+
if [omega, period, zeros].count(None) != 2:
|
| 980 |
+
raise ValueError( \
|
| 981 |
+
"must specify exactly one of omega, period, zeros")
|
| 982 |
+
if a == ctx.ninf and b == ctx.inf:
|
| 983 |
+
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
|
| 984 |
+
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
|
| 985 |
+
return s1 + s2
|
| 986 |
+
if a == ctx.ninf:
|
| 987 |
+
if zeros:
|
| 988 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
|
| 989 |
+
else:
|
| 990 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
|
| 991 |
+
if b != ctx.inf:
|
| 992 |
+
raise ValueError("quadosc requires an infinite integration interval")
|
| 993 |
+
if not zeros:
|
| 994 |
+
if omega:
|
| 995 |
+
period = 2*ctx.pi/omega
|
| 996 |
+
zeros = lambda n: n*period/2
|
| 997 |
+
#for n in range(1,10):
|
| 998 |
+
# p = zeros(n)
|
| 999 |
+
# if p > a:
|
| 1000 |
+
# break
|
| 1001 |
+
#if n >= 9:
|
| 1002 |
+
# raise ValueError("zeros do not appear to be correctly indexed")
|
| 1003 |
+
n = 1
|
| 1004 |
+
s = ctx.quadgl(f, [a, zeros(n)])
|
| 1005 |
+
def term(k):
|
| 1006 |
+
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
|
| 1007 |
+
s += ctx.nsum(term, [n, ctx.inf])
|
| 1008 |
+
return s
|
| 1009 |
+
|
| 1010 |
+
def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
|
| 1011 |
+
"""
|
| 1012 |
+
Computes the integral of *f* over the interval or path specified
|
| 1013 |
+
by *interval*, using :func:`~mpmath.quad` together with adaptive
|
| 1014 |
+
subdivision of the interval.
|
| 1015 |
+
|
| 1016 |
+
This function gives an accurate answer for some integrals where
|
| 1017 |
+
:func:`~mpmath.quad` fails::
|
| 1018 |
+
|
| 1019 |
+
>>> from mpmath import *
|
| 1020 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 1021 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi])
|
| 1022 |
+
3.99900894176779
|
| 1023 |
+
>>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
|
| 1024 |
+
4.0
|
| 1025 |
+
>>> quadsubdiv(sin, [0, 1000])
|
| 1026 |
+
0.437620923709297
|
| 1027 |
+
>>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
|
| 1028 |
+
3.12159332021646
|
| 1029 |
+
>>> quadsubdiv(lambda x: ceil(x), [0, 100])
|
| 1030 |
+
5050.0
|
| 1031 |
+
>>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
|
| 1032 |
+
0.347400172657248
|
| 1033 |
+
|
| 1034 |
+
The argument *maxintervals* can be set to limit the permissible
|
| 1035 |
+
subdivision::
|
| 1036 |
+
|
| 1037 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
|
| 1038 |
+
(-5.40487904307774, 5.011)
|
| 1039 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
|
| 1040 |
+
(0.631417921866934, 1.10101120134116e-17)
|
| 1041 |
+
|
| 1042 |
+
Subdivision does not guarantee a correct answer since, the error
|
| 1043 |
+
estimate on subintervals may be inaccurate::
|
| 1044 |
+
|
| 1045 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
| 1046 |
+
(0.210802735500549, 1.0001111101e-17)
|
| 1047 |
+
>>> mp.dps = 20
|
| 1048 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
| 1049 |
+
(0.21080273550054927738, 2.200000001e-24)
|
| 1050 |
+
|
| 1051 |
+
The second answer is correct. We can get an accurate result at lower
|
| 1052 |
+
precision by forcing a finer initial subdivision::
|
| 1053 |
+
|
| 1054 |
+
>>> mp.dps = 15
|
| 1055 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
|
| 1056 |
+
0.210802735500549
|
| 1057 |
+
|
| 1058 |
+
The following integral is too oscillatory for convergence, but we can get a
|
| 1059 |
+
reasonable estimate::
|
| 1060 |
+
|
| 1061 |
+
>>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
|
| 1062 |
+
>>> round(v, 6), round(err, 6)
|
| 1063 |
+
(0.504067, 1e-06)
|
| 1064 |
+
>>> sin(1) - ci(1)
|
| 1065 |
+
0.504067061906928
|
| 1066 |
+
|
| 1067 |
+
"""
|
| 1068 |
+
queue = []
|
| 1069 |
+
for i in range(len(interval)-1):
|
| 1070 |
+
queue.append((interval[i], interval[i+1]))
|
| 1071 |
+
total = ctx.zero
|
| 1072 |
+
total_error = ctx.zero
|
| 1073 |
+
if maxintervals is None:
|
| 1074 |
+
maxintervals = 10 * ctx.prec
|
| 1075 |
+
count = 0
|
| 1076 |
+
quad_args = kwargs.copy()
|
| 1077 |
+
quad_args["verbose"] = False
|
| 1078 |
+
quad_args["error"] = True
|
| 1079 |
+
if tol is None:
|
| 1080 |
+
tol = +ctx.eps
|
| 1081 |
+
orig = ctx.prec
|
| 1082 |
+
try:
|
| 1083 |
+
ctx.prec += 5
|
| 1084 |
+
while queue:
|
| 1085 |
+
a, b = queue.pop()
|
| 1086 |
+
s, err = ctx.quad(f, [a, b], **quad_args)
|
| 1087 |
+
if kwargs.get("verbose"):
|
| 1088 |
+
print("subinterval", count, a, b, err)
|
| 1089 |
+
if err < tol or count > maxintervals:
|
| 1090 |
+
total += s
|
| 1091 |
+
total_error += err
|
| 1092 |
+
else:
|
| 1093 |
+
count += 1
|
| 1094 |
+
if count == maxintervals and kwargs.get("verbose"):
|
| 1095 |
+
print("warning: number of intervals exceeded maxintervals")
|
| 1096 |
+
if a == -ctx.inf and b == ctx.inf:
|
| 1097 |
+
m = 0
|
| 1098 |
+
elif a == -ctx.inf:
|
| 1099 |
+
m = min(b-1, 2*b)
|
| 1100 |
+
elif b == ctx.inf:
|
| 1101 |
+
m = max(a+1, 2*a)
|
| 1102 |
+
else:
|
| 1103 |
+
m = a + (b - a) / 2
|
| 1104 |
+
queue.append((a, m))
|
| 1105 |
+
queue.append((m, b))
|
| 1106 |
+
finally:
|
| 1107 |
+
ctx.prec = orig
|
| 1108 |
+
if kwargs.get("error"):
|
| 1109 |
+
return +total, +total_error
|
| 1110 |
+
else:
|
| 1111 |
+
return +total
|
| 1112 |
+
|
| 1113 |
+
if __name__ == '__main__':
|
| 1114 |
+
import doctest
|
| 1115 |
+
doctest.testmod()
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (806 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/bessel.cpython-311.pyc
ADDED
|
Binary file (63 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/elliptic.cpython-311.pyc
ADDED
|
Binary file (57.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/expintegrals.cpython-311.pyc
ADDED
|
Binary file (23.6 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/factorials.cpython-311.pyc
ADDED
|
Binary file (10.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/functions.cpython-311.pyc
ADDED
|
Binary file (33.3 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/hypergeometric.cpython-311.pyc
ADDED
|
Binary file (76.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/rszeta.cpython-311.pyc
ADDED
|
Binary file (73.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/zeta.cpython-311.pyc
ADDED
|
Binary file (64.2 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/zetazeros.cpython-311.pyc
ADDED
|
Binary file (45.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/libmp/libelefun.py
ADDED
|
@@ -0,0 +1,1428 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module implements computation of elementary transcendental
|
| 3 |
+
functions (powers, logarithms, trigonometric and hyperbolic
|
| 4 |
+
functions, inverse trigonometric and hyperbolic) for real
|
| 5 |
+
floating-point numbers.
|
| 6 |
+
|
| 7 |
+
For complex and interval implementations of the same functions,
|
| 8 |
+
see libmpc and libmpi.
|
| 9 |
+
|
| 10 |
+
"""
|
| 11 |
+
|
| 12 |
+
import math
|
| 13 |
+
from bisect import bisect
|
| 14 |
+
|
| 15 |
+
from .backend import xrange
|
| 16 |
+
from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE, BACKEND
|
| 17 |
+
|
| 18 |
+
from .libmpf import (
|
| 19 |
+
round_floor, round_ceiling, round_down, round_up,
|
| 20 |
+
round_nearest, round_fast,
|
| 21 |
+
ComplexResult,
|
| 22 |
+
bitcount, bctable, lshift, rshift, giant_steps, sqrt_fixed,
|
| 23 |
+
from_int, to_int, from_man_exp, to_fixed, to_float, from_float,
|
| 24 |
+
from_rational, normalize,
|
| 25 |
+
fzero, fone, fnone, fhalf, finf, fninf, fnan,
|
| 26 |
+
mpf_cmp, mpf_sign, mpf_abs,
|
| 27 |
+
mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_div, mpf_shift,
|
| 28 |
+
mpf_rdiv_int, mpf_pow_int, mpf_sqrt,
|
| 29 |
+
reciprocal_rnd, negative_rnd, mpf_perturb,
|
| 30 |
+
isqrt_fast
|
| 31 |
+
)
|
| 32 |
+
|
| 33 |
+
from .libintmath import ifib
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
#-------------------------------------------------------------------------------
|
| 37 |
+
# Tuning parameters
|
| 38 |
+
#-------------------------------------------------------------------------------
|
| 39 |
+
|
| 40 |
+
# Cutoff for computing exp from cosh+sinh. This reduces the
|
| 41 |
+
# number of terms by half, but also requires a square root which
|
| 42 |
+
# is expensive with the pure-Python square root code.
|
| 43 |
+
if BACKEND == 'python':
|
| 44 |
+
EXP_COSH_CUTOFF = 600
|
| 45 |
+
else:
|
| 46 |
+
EXP_COSH_CUTOFF = 400
|
| 47 |
+
# Cutoff for using more than 2 series
|
| 48 |
+
EXP_SERIES_U_CUTOFF = 1500
|
| 49 |
+
|
| 50 |
+
# Also basically determined by sqrt
|
| 51 |
+
if BACKEND == 'python':
|
| 52 |
+
COS_SIN_CACHE_PREC = 400
|
| 53 |
+
else:
|
| 54 |
+
COS_SIN_CACHE_PREC = 200
|
| 55 |
+
COS_SIN_CACHE_STEP = 8
|
| 56 |
+
cos_sin_cache = {}
|
| 57 |
+
|
| 58 |
+
# Number of integer logarithms to cache (for zeta sums)
|
| 59 |
+
MAX_LOG_INT_CACHE = 2000
|
| 60 |
+
log_int_cache = {}
|
| 61 |
+
|
| 62 |
+
LOG_TAYLOR_PREC = 2500 # Use Taylor series with caching up to this prec
|
| 63 |
+
LOG_TAYLOR_SHIFT = 9 # Cache log values in steps of size 2^-N
|
| 64 |
+
log_taylor_cache = {}
|
| 65 |
+
# prec/size ratio of x for fastest convergence in AGM formula
|
| 66 |
+
LOG_AGM_MAG_PREC_RATIO = 20
|
| 67 |
+
|
| 68 |
+
ATAN_TAYLOR_PREC = 3000 # Same as for log
|
| 69 |
+
ATAN_TAYLOR_SHIFT = 7 # steps of size 2^-N
|
| 70 |
+
atan_taylor_cache = {}
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# ~= next power of two + 20
|
| 74 |
+
cache_prec_steps = [22,22]
|
| 75 |
+
for k in xrange(1, bitcount(LOG_TAYLOR_PREC)+1):
|
| 76 |
+
cache_prec_steps += [min(2**k,LOG_TAYLOR_PREC)+20] * 2**(k-1)
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
#----------------------------------------------------------------------------#
|
| 80 |
+
# #
|
| 81 |
+
# Elementary mathematical constants #
|
| 82 |
+
# #
|
| 83 |
+
#----------------------------------------------------------------------------#
|
| 84 |
+
|
| 85 |
+
def constant_memo(f):
|
| 86 |
+
"""
|
| 87 |
+
Decorator for caching computed values of mathematical
|
| 88 |
+
constants. This decorator should be applied to a
|
| 89 |
+
function taking a single argument prec as input and
|
| 90 |
+
returning a fixed-point value with the given precision.
|
| 91 |
+
"""
|
| 92 |
+
f.memo_prec = -1
|
| 93 |
+
f.memo_val = None
|
| 94 |
+
def g(prec, **kwargs):
|
| 95 |
+
memo_prec = f.memo_prec
|
| 96 |
+
if prec <= memo_prec:
|
| 97 |
+
return f.memo_val >> (memo_prec-prec)
|
| 98 |
+
newprec = int(prec*1.05+10)
|
| 99 |
+
f.memo_val = f(newprec, **kwargs)
|
| 100 |
+
f.memo_prec = newprec
|
| 101 |
+
return f.memo_val >> (newprec-prec)
|
| 102 |
+
g.__name__ = f.__name__
|
| 103 |
+
g.__doc__ = f.__doc__
|
| 104 |
+
return g
|
| 105 |
+
|
| 106 |
+
def def_mpf_constant(fixed):
|
| 107 |
+
"""
|
| 108 |
+
Create a function that computes the mpf value for a mathematical
|
| 109 |
+
constant, given a function that computes the fixed-point value.
|
| 110 |
+
|
| 111 |
+
Assumptions: the constant is positive and has magnitude ~= 1;
|
| 112 |
+
the fixed-point function rounds to floor.
|
| 113 |
+
"""
|
| 114 |
+
def f(prec, rnd=round_fast):
|
| 115 |
+
wp = prec + 20
|
| 116 |
+
v = fixed(wp)
|
| 117 |
+
if rnd in (round_up, round_ceiling):
|
| 118 |
+
v += 1
|
| 119 |
+
return normalize(0, v, -wp, bitcount(v), prec, rnd)
|
| 120 |
+
f.__doc__ = fixed.__doc__
|
| 121 |
+
return f
|
| 122 |
+
|
| 123 |
+
def bsp_acot(q, a, b, hyperbolic):
|
| 124 |
+
if b - a == 1:
|
| 125 |
+
a1 = MPZ(2*a + 3)
|
| 126 |
+
if hyperbolic or a&1:
|
| 127 |
+
return MPZ_ONE, a1 * q**2, a1
|
| 128 |
+
else:
|
| 129 |
+
return -MPZ_ONE, a1 * q**2, a1
|
| 130 |
+
m = (a+b)//2
|
| 131 |
+
p1, q1, r1 = bsp_acot(q, a, m, hyperbolic)
|
| 132 |
+
p2, q2, r2 = bsp_acot(q, m, b, hyperbolic)
|
| 133 |
+
return q2*p1 + r1*p2, q1*q2, r1*r2
|
| 134 |
+
|
| 135 |
+
# the acoth(x) series converges like the geometric series for x^2
|
| 136 |
+
# N = ceil(p*log(2)/(2*log(x)))
|
| 137 |
+
def acot_fixed(a, prec, hyperbolic):
|
| 138 |
+
"""
|
| 139 |
+
Compute acot(a) or acoth(a) for an integer a with binary splitting; see
|
| 140 |
+
http://numbers.computation.free.fr/Constants/Algorithms/splitting.html
|
| 141 |
+
"""
|
| 142 |
+
N = int(0.35 * prec/math.log(a) + 20)
|
| 143 |
+
p, q, r = bsp_acot(a, 0,N, hyperbolic)
|
| 144 |
+
return ((p+q)<<prec)//(q*a)
|
| 145 |
+
|
| 146 |
+
def machin(coefs, prec, hyperbolic=False):
|
| 147 |
+
"""
|
| 148 |
+
Evaluate a Machin-like formula, i.e., a linear combination of
|
| 149 |
+
acot(n) or acoth(n) for specific integer values of n, using fixed-
|
| 150 |
+
point arithmetic. The input should be a list [(c, n), ...], giving
|
| 151 |
+
c*acot[h](n) + ...
|
| 152 |
+
"""
|
| 153 |
+
extraprec = 10
|
| 154 |
+
s = MPZ_ZERO
|
| 155 |
+
for a, b in coefs:
|
| 156 |
+
s += MPZ(a) * acot_fixed(MPZ(b), prec+extraprec, hyperbolic)
|
| 157 |
+
return (s >> extraprec)
|
| 158 |
+
|
| 159 |
+
# Logarithms of integers are needed for various computations involving
|
| 160 |
+
# logarithms, powers, radix conversion, etc
|
| 161 |
+
|
| 162 |
+
@constant_memo
|
| 163 |
+
def ln2_fixed(prec):
|
| 164 |
+
"""
|
| 165 |
+
Computes ln(2). This is done with a hyperbolic Machin-type formula,
|
| 166 |
+
with binary splitting at high precision.
|
| 167 |
+
"""
|
| 168 |
+
return machin([(18, 26), (-2, 4801), (8, 8749)], prec, True)
|
| 169 |
+
|
| 170 |
+
@constant_memo
|
| 171 |
+
def ln10_fixed(prec):
|
| 172 |
+
"""
|
| 173 |
+
Computes ln(10). This is done with a hyperbolic Machin-type formula.
|
| 174 |
+
"""
|
| 175 |
+
return machin([(46, 31), (34, 49), (20, 161)], prec, True)
|
| 176 |
+
|
| 177 |
+
|
| 178 |
+
r"""
|
| 179 |
+
For computation of pi, we use the Chudnovsky series:
|
| 180 |
+
|
| 181 |
+
oo
|
| 182 |
+
___ k
|
| 183 |
+
1 \ (-1) (6 k)! (A + B k)
|
| 184 |
+
----- = ) -----------------------
|
| 185 |
+
12 pi /___ 3 3k+3/2
|
| 186 |
+
(3 k)! (k!) C
|
| 187 |
+
k = 0
|
| 188 |
+
|
| 189 |
+
where A, B, and C are certain integer constants. This series adds roughly
|
| 190 |
+
14 digits per term. Note that C^(3/2) can be extracted so that the
|
| 191 |
+
series contains only rational terms. This makes binary splitting very
|
| 192 |
+
efficient.
|
| 193 |
+
|
| 194 |
+
The recurrence formulas for the binary splitting were taken from
|
| 195 |
+
ftp://ftp.gmplib.org/pub/src/gmp-chudnovsky.c
|
| 196 |
+
|
| 197 |
+
Previously, Machin's formula was used at low precision and the AGM iteration
|
| 198 |
+
was used at high precision. However, the Chudnovsky series is essentially as
|
| 199 |
+
fast as the Machin formula at low precision and in practice about 3x faster
|
| 200 |
+
than the AGM at high precision (despite theoretically having a worse
|
| 201 |
+
asymptotic complexity), so there is no reason not to use it in all cases.
|
| 202 |
+
|
| 203 |
+
"""
|
| 204 |
+
|
| 205 |
+
# Constants in Chudnovsky's series
|
| 206 |
+
CHUD_A = MPZ(13591409)
|
| 207 |
+
CHUD_B = MPZ(545140134)
|
| 208 |
+
CHUD_C = MPZ(640320)
|
| 209 |
+
CHUD_D = MPZ(12)
|
| 210 |
+
|
| 211 |
+
def bs_chudnovsky(a, b, level, verbose):
|
| 212 |
+
"""
|
| 213 |
+
Computes the sum from a to b of the series in the Chudnovsky
|
| 214 |
+
formula. Returns g, p, q where p/q is the sum as an exact
|
| 215 |
+
fraction and g is a temporary value used to save work
|
| 216 |
+
for recursive calls.
|
| 217 |
+
"""
|
| 218 |
+
if b-a == 1:
|
| 219 |
+
g = MPZ((6*b-5)*(2*b-1)*(6*b-1))
|
| 220 |
+
p = b**3 * CHUD_C**3 // 24
|
| 221 |
+
q = (-1)**b * g * (CHUD_A+CHUD_B*b)
|
| 222 |
+
else:
|
| 223 |
+
if verbose and level < 4:
|
| 224 |
+
print(" binary splitting", a, b)
|
| 225 |
+
mid = (a+b)//2
|
| 226 |
+
g1, p1, q1 = bs_chudnovsky(a, mid, level+1, verbose)
|
| 227 |
+
g2, p2, q2 = bs_chudnovsky(mid, b, level+1, verbose)
|
| 228 |
+
p = p1*p2
|
| 229 |
+
g = g1*g2
|
| 230 |
+
q = q1*p2 + q2*g1
|
| 231 |
+
return g, p, q
|
| 232 |
+
|
| 233 |
+
@constant_memo
|
| 234 |
+
def pi_fixed(prec, verbose=False, verbose_base=None):
|
| 235 |
+
"""
|
| 236 |
+
Compute floor(pi * 2**prec) as a big integer.
|
| 237 |
+
|
| 238 |
+
This is done using Chudnovsky's series (see comments in
|
| 239 |
+
libelefun.py for details).
|
| 240 |
+
"""
|
| 241 |
+
# The Chudnovsky series gives 14.18 digits per term
|
| 242 |
+
N = int(prec/3.3219280948/14.181647462 + 2)
|
| 243 |
+
if verbose:
|
| 244 |
+
print("binary splitting with N =", N)
|
| 245 |
+
g, p, q = bs_chudnovsky(0, N, 0, verbose)
|
| 246 |
+
sqrtC = isqrt_fast(CHUD_C<<(2*prec))
|
| 247 |
+
v = p*CHUD_C*sqrtC//((q+CHUD_A*p)*CHUD_D)
|
| 248 |
+
return v
|
| 249 |
+
|
| 250 |
+
def degree_fixed(prec):
|
| 251 |
+
return pi_fixed(prec)//180
|
| 252 |
+
|
| 253 |
+
def bspe(a, b):
|
| 254 |
+
"""
|
| 255 |
+
Sum series for exp(1)-1 between a, b, returning the result
|
| 256 |
+
as an exact fraction (p, q).
|
| 257 |
+
"""
|
| 258 |
+
if b-a == 1:
|
| 259 |
+
return MPZ_ONE, MPZ(b)
|
| 260 |
+
m = (a+b)//2
|
| 261 |
+
p1, q1 = bspe(a, m)
|
| 262 |
+
p2, q2 = bspe(m, b)
|
| 263 |
+
return p1*q2+p2, q1*q2
|
| 264 |
+
|
| 265 |
+
@constant_memo
|
| 266 |
+
def e_fixed(prec):
|
| 267 |
+
"""
|
| 268 |
+
Computes exp(1). This is done using the ordinary Taylor series for
|
| 269 |
+
exp, with binary splitting. For a description of the algorithm,
|
| 270 |
+
see:
|
| 271 |
+
|
| 272 |
+
http://numbers.computation.free.fr/Constants/
|
| 273 |
+
Algorithms/splitting.html
|
| 274 |
+
"""
|
| 275 |
+
# Slight overestimate of N needed for 1/N! < 2**(-prec)
|
| 276 |
+
# This could be tightened for large N.
|
| 277 |
+
N = int(1.1*prec/math.log(prec) + 20)
|
| 278 |
+
p, q = bspe(0,N)
|
| 279 |
+
return ((p+q)<<prec)//q
|
| 280 |
+
|
| 281 |
+
@constant_memo
|
| 282 |
+
def phi_fixed(prec):
|
| 283 |
+
"""
|
| 284 |
+
Computes the golden ratio, (1+sqrt(5))/2
|
| 285 |
+
"""
|
| 286 |
+
prec += 10
|
| 287 |
+
a = isqrt_fast(MPZ_FIVE<<(2*prec)) + (MPZ_ONE << prec)
|
| 288 |
+
return a >> 11
|
| 289 |
+
|
| 290 |
+
mpf_phi = def_mpf_constant(phi_fixed)
|
| 291 |
+
mpf_pi = def_mpf_constant(pi_fixed)
|
| 292 |
+
mpf_e = def_mpf_constant(e_fixed)
|
| 293 |
+
mpf_degree = def_mpf_constant(degree_fixed)
|
| 294 |
+
mpf_ln2 = def_mpf_constant(ln2_fixed)
|
| 295 |
+
mpf_ln10 = def_mpf_constant(ln10_fixed)
|
| 296 |
+
|
| 297 |
+
|
| 298 |
+
@constant_memo
|
| 299 |
+
def ln_sqrt2pi_fixed(prec):
|
| 300 |
+
wp = prec + 10
|
| 301 |
+
# ln(sqrt(2*pi)) = ln(2*pi)/2
|
| 302 |
+
return to_fixed(mpf_log(mpf_shift(mpf_pi(wp), 1), wp), prec-1)
|
| 303 |
+
|
| 304 |
+
@constant_memo
|
| 305 |
+
def sqrtpi_fixed(prec):
|
| 306 |
+
return sqrt_fixed(pi_fixed(prec), prec)
|
| 307 |
+
|
| 308 |
+
mpf_sqrtpi = def_mpf_constant(sqrtpi_fixed)
|
| 309 |
+
mpf_ln_sqrt2pi = def_mpf_constant(ln_sqrt2pi_fixed)
|
| 310 |
+
|
| 311 |
+
|
| 312 |
+
#----------------------------------------------------------------------------#
|
| 313 |
+
# #
|
| 314 |
+
# Powers #
|
| 315 |
+
# #
|
| 316 |
+
#----------------------------------------------------------------------------#
|
| 317 |
+
|
| 318 |
+
def mpf_pow(s, t, prec, rnd=round_fast):
|
| 319 |
+
"""
|
| 320 |
+
Compute s**t. Raises ComplexResult if s is negative and t is
|
| 321 |
+
fractional.
|
| 322 |
+
"""
|
| 323 |
+
ssign, sman, sexp, sbc = s
|
| 324 |
+
tsign, tman, texp, tbc = t
|
| 325 |
+
if ssign and texp < 0:
|
| 326 |
+
raise ComplexResult("negative number raised to a fractional power")
|
| 327 |
+
if texp >= 0:
|
| 328 |
+
return mpf_pow_int(s, (-1)**tsign * (tman<<texp), prec, rnd)
|
| 329 |
+
# s**(n/2) = sqrt(s)**n
|
| 330 |
+
if texp == -1:
|
| 331 |
+
if tman == 1:
|
| 332 |
+
if tsign:
|
| 333 |
+
return mpf_div(fone, mpf_sqrt(s, prec+10,
|
| 334 |
+
reciprocal_rnd[rnd]), prec, rnd)
|
| 335 |
+
return mpf_sqrt(s, prec, rnd)
|
| 336 |
+
else:
|
| 337 |
+
if tsign:
|
| 338 |
+
return mpf_pow_int(mpf_sqrt(s, prec+10,
|
| 339 |
+
reciprocal_rnd[rnd]), -tman, prec, rnd)
|
| 340 |
+
return mpf_pow_int(mpf_sqrt(s, prec+10, rnd), tman, prec, rnd)
|
| 341 |
+
# General formula: s**t = exp(t*log(s))
|
| 342 |
+
# TODO: handle rnd direction of the logarithm carefully
|
| 343 |
+
c = mpf_log(s, prec+10, rnd)
|
| 344 |
+
return mpf_exp(mpf_mul(t, c), prec, rnd)
|
| 345 |
+
|
| 346 |
+
def int_pow_fixed(y, n, prec):
|
| 347 |
+
"""n-th power of a fixed point number with precision prec
|
| 348 |
+
|
| 349 |
+
Returns the power in the form man, exp,
|
| 350 |
+
man * 2**exp ~= y**n
|
| 351 |
+
"""
|
| 352 |
+
if n == 2:
|
| 353 |
+
return (y*y), 0
|
| 354 |
+
bc = bitcount(y)
|
| 355 |
+
exp = 0
|
| 356 |
+
workprec = 2 * (prec + 4*bitcount(n) + 4)
|
| 357 |
+
_, pm, pe, pbc = fone
|
| 358 |
+
while 1:
|
| 359 |
+
if n & 1:
|
| 360 |
+
pm = pm*y
|
| 361 |
+
pe = pe+exp
|
| 362 |
+
pbc += bc - 2
|
| 363 |
+
pbc = pbc + bctable[int(pm >> pbc)]
|
| 364 |
+
if pbc > workprec:
|
| 365 |
+
pm = pm >> (pbc-workprec)
|
| 366 |
+
pe += pbc - workprec
|
| 367 |
+
pbc = workprec
|
| 368 |
+
n -= 1
|
| 369 |
+
if not n:
|
| 370 |
+
break
|
| 371 |
+
y = y*y
|
| 372 |
+
exp = exp+exp
|
| 373 |
+
bc = bc + bc - 2
|
| 374 |
+
bc = bc + bctable[int(y >> bc)]
|
| 375 |
+
if bc > workprec:
|
| 376 |
+
y = y >> (bc-workprec)
|
| 377 |
+
exp += bc - workprec
|
| 378 |
+
bc = workprec
|
| 379 |
+
n = n // 2
|
| 380 |
+
return pm, pe
|
| 381 |
+
|
| 382 |
+
# froot(s, n, prec, rnd) computes the real n-th root of a
|
| 383 |
+
# positive mpf tuple s.
|
| 384 |
+
# To compute the root we start from a 50-bit estimate for r
|
| 385 |
+
# generated with ordinary floating-point arithmetic, and then refine
|
| 386 |
+
# the value to full accuracy using the iteration
|
| 387 |
+
|
| 388 |
+
# 1 / y \
|
| 389 |
+
# r = --- | (n-1) * r + ---------- |
|
| 390 |
+
# n+1 n \ n r_n**(n-1) /
|
| 391 |
+
|
| 392 |
+
# which is simply Newton's method applied to the equation r**n = y.
|
| 393 |
+
# With giant_steps(start, prec+extra) = [p0,...,pm, prec+extra]
|
| 394 |
+
# and y = man * 2**-shift one has
|
| 395 |
+
# (man * 2**exp)**(1/n) =
|
| 396 |
+
# y**(1/n) * 2**(start-prec/n) * 2**(p0-start) * ... * 2**(prec+extra-pm) *
|
| 397 |
+
# 2**((exp+shift-(n-1)*prec)/n -extra))
|
| 398 |
+
# The last factor is accounted for in the last line of froot.
|
| 399 |
+
|
| 400 |
+
def nthroot_fixed(y, n, prec, exp1):
|
| 401 |
+
start = 50
|
| 402 |
+
try:
|
| 403 |
+
y1 = rshift(y, prec - n*start)
|
| 404 |
+
r = MPZ(int(y1**(1.0/n)))
|
| 405 |
+
except OverflowError:
|
| 406 |
+
y1 = from_int(y1, start)
|
| 407 |
+
fn = from_int(n)
|
| 408 |
+
fn = mpf_rdiv_int(1, fn, start)
|
| 409 |
+
r = mpf_pow(y1, fn, start)
|
| 410 |
+
r = to_int(r)
|
| 411 |
+
extra = 10
|
| 412 |
+
extra1 = n
|
| 413 |
+
prevp = start
|
| 414 |
+
for p in giant_steps(start, prec+extra):
|
| 415 |
+
pm, pe = int_pow_fixed(r, n-1, prevp)
|
| 416 |
+
r2 = rshift(pm, (n-1)*prevp - p - pe - extra1)
|
| 417 |
+
B = lshift(y, 2*p-prec+extra1)//r2
|
| 418 |
+
r = (B + (n-1) * lshift(r, p-prevp))//n
|
| 419 |
+
prevp = p
|
| 420 |
+
return r
|
| 421 |
+
|
| 422 |
+
def mpf_nthroot(s, n, prec, rnd=round_fast):
|
| 423 |
+
"""nth-root of a positive number
|
| 424 |
+
|
| 425 |
+
Use the Newton method when faster, otherwise use x**(1/n)
|
| 426 |
+
"""
|
| 427 |
+
sign, man, exp, bc = s
|
| 428 |
+
if sign:
|
| 429 |
+
raise ComplexResult("nth root of a negative number")
|
| 430 |
+
if not man:
|
| 431 |
+
if s == fnan:
|
| 432 |
+
return fnan
|
| 433 |
+
if s == fzero:
|
| 434 |
+
if n > 0:
|
| 435 |
+
return fzero
|
| 436 |
+
if n == 0:
|
| 437 |
+
return fone
|
| 438 |
+
return finf
|
| 439 |
+
# Infinity
|
| 440 |
+
if not n:
|
| 441 |
+
return fnan
|
| 442 |
+
if n < 0:
|
| 443 |
+
return fzero
|
| 444 |
+
return finf
|
| 445 |
+
flag_inverse = False
|
| 446 |
+
if n < 2:
|
| 447 |
+
if n == 0:
|
| 448 |
+
return fone
|
| 449 |
+
if n == 1:
|
| 450 |
+
return mpf_pos(s, prec, rnd)
|
| 451 |
+
if n == -1:
|
| 452 |
+
return mpf_div(fone, s, prec, rnd)
|
| 453 |
+
# n < 0
|
| 454 |
+
rnd = reciprocal_rnd[rnd]
|
| 455 |
+
flag_inverse = True
|
| 456 |
+
extra_inverse = 5
|
| 457 |
+
prec += extra_inverse
|
| 458 |
+
n = -n
|
| 459 |
+
if n > 20 and (n >= 20000 or prec < int(233 + 28.3 * n**0.62)):
|
| 460 |
+
prec2 = prec + 10
|
| 461 |
+
fn = from_int(n)
|
| 462 |
+
nth = mpf_rdiv_int(1, fn, prec2)
|
| 463 |
+
r = mpf_pow(s, nth, prec2, rnd)
|
| 464 |
+
s = normalize(r[0], r[1], r[2], r[3], prec, rnd)
|
| 465 |
+
if flag_inverse:
|
| 466 |
+
return mpf_div(fone, s, prec-extra_inverse, rnd)
|
| 467 |
+
else:
|
| 468 |
+
return s
|
| 469 |
+
# Convert to a fixed-point number with prec2 bits.
|
| 470 |
+
prec2 = prec + 2*n - (prec%n)
|
| 471 |
+
# a few tests indicate that
|
| 472 |
+
# for 10 < n < 10**4 a bit more precision is needed
|
| 473 |
+
if n > 10:
|
| 474 |
+
prec2 += prec2//10
|
| 475 |
+
prec2 = prec2 - prec2%n
|
| 476 |
+
# Mantissa may have more bits than we need. Trim it down.
|
| 477 |
+
shift = bc - prec2
|
| 478 |
+
# Adjust exponents to make prec2 and exp+shift multiples of n.
|
| 479 |
+
sign1 = 0
|
| 480 |
+
es = exp+shift
|
| 481 |
+
if es < 0:
|
| 482 |
+
sign1 = 1
|
| 483 |
+
es = -es
|
| 484 |
+
if sign1:
|
| 485 |
+
shift += es%n
|
| 486 |
+
else:
|
| 487 |
+
shift -= es%n
|
| 488 |
+
man = rshift(man, shift)
|
| 489 |
+
extra = 10
|
| 490 |
+
exp1 = ((exp+shift-(n-1)*prec2)//n) - extra
|
| 491 |
+
rnd_shift = 0
|
| 492 |
+
if flag_inverse:
|
| 493 |
+
if rnd == 'u' or rnd == 'c':
|
| 494 |
+
rnd_shift = 1
|
| 495 |
+
else:
|
| 496 |
+
if rnd == 'd' or rnd == 'f':
|
| 497 |
+
rnd_shift = 1
|
| 498 |
+
man = nthroot_fixed(man+rnd_shift, n, prec2, exp1)
|
| 499 |
+
s = from_man_exp(man, exp1, prec, rnd)
|
| 500 |
+
if flag_inverse:
|
| 501 |
+
return mpf_div(fone, s, prec-extra_inverse, rnd)
|
| 502 |
+
else:
|
| 503 |
+
return s
|
| 504 |
+
|
| 505 |
+
def mpf_cbrt(s, prec, rnd=round_fast):
|
| 506 |
+
"""cubic root of a positive number"""
|
| 507 |
+
return mpf_nthroot(s, 3, prec, rnd)
|
| 508 |
+
|
| 509 |
+
#----------------------------------------------------------------------------#
|
| 510 |
+
# #
|
| 511 |
+
# Logarithms #
|
| 512 |
+
# #
|
| 513 |
+
#----------------------------------------------------------------------------#
|
| 514 |
+
|
| 515 |
+
|
| 516 |
+
def log_int_fixed(n, prec, ln2=None):
|
| 517 |
+
"""
|
| 518 |
+
Fast computation of log(n), caching the value for small n,
|
| 519 |
+
intended for zeta sums.
|
| 520 |
+
"""
|
| 521 |
+
if n in log_int_cache:
|
| 522 |
+
value, vprec = log_int_cache[n]
|
| 523 |
+
if vprec >= prec:
|
| 524 |
+
return value >> (vprec - prec)
|
| 525 |
+
wp = prec + 10
|
| 526 |
+
if wp <= LOG_TAYLOR_SHIFT:
|
| 527 |
+
if ln2 is None:
|
| 528 |
+
ln2 = ln2_fixed(wp)
|
| 529 |
+
r = bitcount(n)
|
| 530 |
+
x = n << (wp-r)
|
| 531 |
+
v = log_taylor_cached(x, wp) + r*ln2
|
| 532 |
+
else:
|
| 533 |
+
v = to_fixed(mpf_log(from_int(n), wp+5), wp)
|
| 534 |
+
if n < MAX_LOG_INT_CACHE:
|
| 535 |
+
log_int_cache[n] = (v, wp)
|
| 536 |
+
return v >> (wp-prec)
|
| 537 |
+
|
| 538 |
+
def agm_fixed(a, b, prec):
|
| 539 |
+
"""
|
| 540 |
+
Fixed-point computation of agm(a,b), assuming
|
| 541 |
+
a, b both close to unit magnitude.
|
| 542 |
+
"""
|
| 543 |
+
i = 0
|
| 544 |
+
while 1:
|
| 545 |
+
anew = (a+b)>>1
|
| 546 |
+
if i > 4 and abs(a-anew) < 8:
|
| 547 |
+
return a
|
| 548 |
+
b = isqrt_fast(a*b)
|
| 549 |
+
a = anew
|
| 550 |
+
i += 1
|
| 551 |
+
return a
|
| 552 |
+
|
| 553 |
+
def log_agm(x, prec):
|
| 554 |
+
"""
|
| 555 |
+
Fixed-point computation of -log(x) = log(1/x), suitable
|
| 556 |
+
for large precision. It is required that 0 < x < 1. The
|
| 557 |
+
algorithm used is the Sasaki-Kanada formula
|
| 558 |
+
|
| 559 |
+
-log(x) = pi/agm(theta2(x)^2,theta3(x)^2). [1]
|
| 560 |
+
|
| 561 |
+
For faster convergence in the theta functions, x should
|
| 562 |
+
be chosen closer to 0.
|
| 563 |
+
|
| 564 |
+
Guard bits must be added by the caller.
|
| 565 |
+
|
| 566 |
+
HYPOTHESIS: if x = 2^(-n), n bits need to be added to
|
| 567 |
+
account for the truncation to a fixed-point number,
|
| 568 |
+
and this is the only significant cancellation error.
|
| 569 |
+
|
| 570 |
+
The number of bits lost to roundoff is small and can be
|
| 571 |
+
considered constant.
|
| 572 |
+
|
| 573 |
+
[1] Richard P. Brent, "Fast Algorithms for High-Precision
|
| 574 |
+
Computation of Elementary Functions (extended abstract)",
|
| 575 |
+
http://wwwmaths.anu.edu.au/~brent/pd/RNC7-Brent.pdf
|
| 576 |
+
|
| 577 |
+
"""
|
| 578 |
+
x2 = (x*x) >> prec
|
| 579 |
+
# Compute jtheta2(x)**2
|
| 580 |
+
s = a = b = x2
|
| 581 |
+
while a:
|
| 582 |
+
b = (b*x2) >> prec
|
| 583 |
+
a = (a*b) >> prec
|
| 584 |
+
s += a
|
| 585 |
+
s += (MPZ_ONE<<prec)
|
| 586 |
+
s = (s*s)>>(prec-2)
|
| 587 |
+
s = (s*isqrt_fast(x<<prec))>>prec
|
| 588 |
+
# Compute jtheta3(x)**2
|
| 589 |
+
t = a = b = x
|
| 590 |
+
while a:
|
| 591 |
+
b = (b*x2) >> prec
|
| 592 |
+
a = (a*b) >> prec
|
| 593 |
+
t += a
|
| 594 |
+
t = (MPZ_ONE<<prec) + (t<<1)
|
| 595 |
+
t = (t*t)>>prec
|
| 596 |
+
# Final formula
|
| 597 |
+
p = agm_fixed(s, t, prec)
|
| 598 |
+
return (pi_fixed(prec) << prec) // p
|
| 599 |
+
|
| 600 |
+
def log_taylor(x, prec, r=0):
|
| 601 |
+
"""
|
| 602 |
+
Fixed-point calculation of log(x). It is assumed that x is close
|
| 603 |
+
enough to 1 for the Taylor series to converge quickly. Convergence
|
| 604 |
+
can be improved by specifying r > 0 to compute
|
| 605 |
+
log(x^(1/2^r))*2^r, at the cost of performing r square roots.
|
| 606 |
+
|
| 607 |
+
The caller must provide sufficient guard bits.
|
| 608 |
+
"""
|
| 609 |
+
for i in xrange(r):
|
| 610 |
+
x = isqrt_fast(x<<prec)
|
| 611 |
+
one = MPZ_ONE << prec
|
| 612 |
+
v = ((x-one)<<prec)//(x+one)
|
| 613 |
+
sign = v < 0
|
| 614 |
+
if sign:
|
| 615 |
+
v = -v
|
| 616 |
+
v2 = (v*v) >> prec
|
| 617 |
+
v4 = (v2*v2) >> prec
|
| 618 |
+
s0 = v
|
| 619 |
+
s1 = v//3
|
| 620 |
+
v = (v*v4) >> prec
|
| 621 |
+
k = 5
|
| 622 |
+
while v:
|
| 623 |
+
s0 += v // k
|
| 624 |
+
k += 2
|
| 625 |
+
s1 += v // k
|
| 626 |
+
v = (v*v4) >> prec
|
| 627 |
+
k += 2
|
| 628 |
+
s1 = (s1*v2) >> prec
|
| 629 |
+
s = (s0+s1) << (1+r)
|
| 630 |
+
if sign:
|
| 631 |
+
return -s
|
| 632 |
+
return s
|
| 633 |
+
|
| 634 |
+
def log_taylor_cached(x, prec):
|
| 635 |
+
"""
|
| 636 |
+
Fixed-point computation of log(x), assuming x in (0.5, 2)
|
| 637 |
+
and prec <= LOG_TAYLOR_PREC.
|
| 638 |
+
"""
|
| 639 |
+
n = x >> (prec-LOG_TAYLOR_SHIFT)
|
| 640 |
+
cached_prec = cache_prec_steps[prec]
|
| 641 |
+
dprec = cached_prec - prec
|
| 642 |
+
if (n, cached_prec) in log_taylor_cache:
|
| 643 |
+
a, log_a = log_taylor_cache[n, cached_prec]
|
| 644 |
+
else:
|
| 645 |
+
a = n << (cached_prec - LOG_TAYLOR_SHIFT)
|
| 646 |
+
log_a = log_taylor(a, cached_prec, 8)
|
| 647 |
+
log_taylor_cache[n, cached_prec] = (a, log_a)
|
| 648 |
+
a >>= dprec
|
| 649 |
+
log_a >>= dprec
|
| 650 |
+
u = ((x - a) << prec) // a
|
| 651 |
+
v = (u << prec) // ((MPZ_TWO << prec) + u)
|
| 652 |
+
v2 = (v*v) >> prec
|
| 653 |
+
v4 = (v2*v2) >> prec
|
| 654 |
+
s0 = v
|
| 655 |
+
s1 = v//3
|
| 656 |
+
v = (v*v4) >> prec
|
| 657 |
+
k = 5
|
| 658 |
+
while v:
|
| 659 |
+
s0 += v//k
|
| 660 |
+
k += 2
|
| 661 |
+
s1 += v//k
|
| 662 |
+
v = (v*v4) >> prec
|
| 663 |
+
k += 2
|
| 664 |
+
s1 = (s1*v2) >> prec
|
| 665 |
+
s = (s0+s1) << 1
|
| 666 |
+
return log_a + s
|
| 667 |
+
|
| 668 |
+
def mpf_log(x, prec, rnd=round_fast):
|
| 669 |
+
"""
|
| 670 |
+
Compute the natural logarithm of the mpf value x. If x is negative,
|
| 671 |
+
ComplexResult is raised.
|
| 672 |
+
"""
|
| 673 |
+
sign, man, exp, bc = x
|
| 674 |
+
#------------------------------------------------------------------
|
| 675 |
+
# Handle special values
|
| 676 |
+
if not man:
|
| 677 |
+
if x == fzero: return fninf
|
| 678 |
+
if x == finf: return finf
|
| 679 |
+
if x == fnan: return fnan
|
| 680 |
+
if sign:
|
| 681 |
+
raise ComplexResult("logarithm of a negative number")
|
| 682 |
+
wp = prec + 20
|
| 683 |
+
#------------------------------------------------------------------
|
| 684 |
+
# Handle log(2^n) = log(n)*2.
|
| 685 |
+
# Here we catch the only possible exact value, log(1) = 0
|
| 686 |
+
if man == 1:
|
| 687 |
+
if not exp:
|
| 688 |
+
return fzero
|
| 689 |
+
return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
|
| 690 |
+
mag = exp+bc
|
| 691 |
+
abs_mag = abs(mag)
|
| 692 |
+
#------------------------------------------------------------------
|
| 693 |
+
# Handle x = 1+eps, where log(x) ~ x. We need to check for
|
| 694 |
+
# cancellation when moving to fixed-point math and compensate
|
| 695 |
+
# by increasing the precision. Note that abs_mag in (0, 1) <=>
|
| 696 |
+
# 0.5 < x < 2 and x != 1
|
| 697 |
+
if abs_mag <= 1:
|
| 698 |
+
# Calculate t = x-1 to measure distance from 1 in bits
|
| 699 |
+
tsign = 1-abs_mag
|
| 700 |
+
if tsign:
|
| 701 |
+
tman = (MPZ_ONE<<bc) - man
|
| 702 |
+
else:
|
| 703 |
+
tman = man - (MPZ_ONE<<(bc-1))
|
| 704 |
+
tbc = bitcount(tman)
|
| 705 |
+
cancellation = bc - tbc
|
| 706 |
+
if cancellation > wp:
|
| 707 |
+
t = normalize(tsign, tman, abs_mag-bc, tbc, tbc, 'n')
|
| 708 |
+
return mpf_perturb(t, tsign, prec, rnd)
|
| 709 |
+
else:
|
| 710 |
+
wp += cancellation
|
| 711 |
+
# TODO: if close enough to 1, we could use Taylor series
|
| 712 |
+
# even in the AGM precision range, since the Taylor series
|
| 713 |
+
# converges rapidly
|
| 714 |
+
#------------------------------------------------------------------
|
| 715 |
+
# Another special case:
|
| 716 |
+
# n*log(2) is a good enough approximation
|
| 717 |
+
if abs_mag > 10000:
|
| 718 |
+
if bitcount(abs_mag) > wp:
|
| 719 |
+
return from_man_exp(exp*ln2_fixed(wp), -wp, prec, rnd)
|
| 720 |
+
#------------------------------------------------------------------
|
| 721 |
+
# General case.
|
| 722 |
+
# Perform argument reduction using log(x) = log(x*2^n) - n*log(2):
|
| 723 |
+
# If we are in the Taylor precision range, choose magnitude 0 or 1.
|
| 724 |
+
# If we are in the AGM precision range, choose magnitude -m for
|
| 725 |
+
# some large m; benchmarking on one machine showed m = prec/20 to be
|
| 726 |
+
# optimal between 1000 and 100,000 digits.
|
| 727 |
+
if wp <= LOG_TAYLOR_PREC:
|
| 728 |
+
m = log_taylor_cached(lshift(man, wp-bc), wp)
|
| 729 |
+
if mag:
|
| 730 |
+
m += mag*ln2_fixed(wp)
|
| 731 |
+
else:
|
| 732 |
+
optimal_mag = -wp//LOG_AGM_MAG_PREC_RATIO
|
| 733 |
+
n = optimal_mag - mag
|
| 734 |
+
x = mpf_shift(x, n)
|
| 735 |
+
wp += (-optimal_mag)
|
| 736 |
+
m = -log_agm(to_fixed(x, wp), wp)
|
| 737 |
+
m -= n*ln2_fixed(wp)
|
| 738 |
+
return from_man_exp(m, -wp, prec, rnd)
|
| 739 |
+
|
| 740 |
+
def mpf_log_hypot(a, b, prec, rnd):
|
| 741 |
+
"""
|
| 742 |
+
Computes log(sqrt(a^2+b^2)) accurately.
|
| 743 |
+
"""
|
| 744 |
+
# If either a or b is inf/nan/0, assume it to be a
|
| 745 |
+
if not b[1]:
|
| 746 |
+
a, b = b, a
|
| 747 |
+
# a is inf/nan/0
|
| 748 |
+
if not a[1]:
|
| 749 |
+
# both are inf/nan/0
|
| 750 |
+
if not b[1]:
|
| 751 |
+
if a == b == fzero:
|
| 752 |
+
return fninf
|
| 753 |
+
if fnan in (a, b):
|
| 754 |
+
return fnan
|
| 755 |
+
# at least one term is (+/- inf)^2
|
| 756 |
+
return finf
|
| 757 |
+
# only a is inf/nan/0
|
| 758 |
+
if a == fzero:
|
| 759 |
+
# log(sqrt(0+b^2)) = log(|b|)
|
| 760 |
+
return mpf_log(mpf_abs(b), prec, rnd)
|
| 761 |
+
if a == fnan:
|
| 762 |
+
return fnan
|
| 763 |
+
return finf
|
| 764 |
+
# Exact
|
| 765 |
+
a2 = mpf_mul(a,a)
|
| 766 |
+
b2 = mpf_mul(b,b)
|
| 767 |
+
extra = 20
|
| 768 |
+
# Not exact
|
| 769 |
+
h2 = mpf_add(a2, b2, prec+extra)
|
| 770 |
+
cancelled = mpf_add(h2, fnone, 10)
|
| 771 |
+
mag_cancelled = cancelled[2]+cancelled[3]
|
| 772 |
+
# Just redo the sum exactly if necessary (could be smarter
|
| 773 |
+
# and avoid memory allocation when a or b is precisely 1
|
| 774 |
+
# and the other is tiny...)
|
| 775 |
+
if cancelled == fzero or mag_cancelled < -extra//2:
|
| 776 |
+
h2 = mpf_add(a2, b2, prec+extra-min(a2[2],b2[2]))
|
| 777 |
+
return mpf_shift(mpf_log(h2, prec, rnd), -1)
|
| 778 |
+
|
| 779 |
+
|
| 780 |
+
#----------------------------------------------------------------------
|
| 781 |
+
# Inverse tangent
|
| 782 |
+
#
|
| 783 |
+
|
| 784 |
+
def atan_newton(x, prec):
|
| 785 |
+
if prec >= 100:
|
| 786 |
+
r = math.atan(int((x>>(prec-53)))/2.0**53)
|
| 787 |
+
else:
|
| 788 |
+
r = math.atan(int(x)/2.0**prec)
|
| 789 |
+
prevp = 50
|
| 790 |
+
r = MPZ(int(r * 2.0**53) >> (53-prevp))
|
| 791 |
+
extra_p = 50
|
| 792 |
+
for wp in giant_steps(prevp, prec):
|
| 793 |
+
wp += extra_p
|
| 794 |
+
r = r << (wp-prevp)
|
| 795 |
+
cos, sin = cos_sin_fixed(r, wp)
|
| 796 |
+
tan = (sin << wp) // cos
|
| 797 |
+
a = ((tan-rshift(x, prec-wp)) << wp) // ((MPZ_ONE<<wp) + ((tan**2)>>wp))
|
| 798 |
+
r = r - a
|
| 799 |
+
prevp = wp
|
| 800 |
+
return rshift(r, prevp-prec)
|
| 801 |
+
|
| 802 |
+
def atan_taylor_get_cached(n, prec):
|
| 803 |
+
# Taylor series with caching wins up to huge precisions
|
| 804 |
+
# To avoid unnecessary precomputation at low precision, we
|
| 805 |
+
# do it in steps
|
| 806 |
+
# Round to next power of 2
|
| 807 |
+
prec2 = (1<<(bitcount(prec-1))) + 20
|
| 808 |
+
dprec = prec2 - prec
|
| 809 |
+
if (n, prec2) in atan_taylor_cache:
|
| 810 |
+
a, atan_a = atan_taylor_cache[n, prec2]
|
| 811 |
+
else:
|
| 812 |
+
a = n << (prec2 - ATAN_TAYLOR_SHIFT)
|
| 813 |
+
atan_a = atan_newton(a, prec2)
|
| 814 |
+
atan_taylor_cache[n, prec2] = (a, atan_a)
|
| 815 |
+
return (a >> dprec), (atan_a >> dprec)
|
| 816 |
+
|
| 817 |
+
def atan_taylor(x, prec):
|
| 818 |
+
n = (x >> (prec-ATAN_TAYLOR_SHIFT))
|
| 819 |
+
a, atan_a = atan_taylor_get_cached(n, prec)
|
| 820 |
+
d = x - a
|
| 821 |
+
s0 = v = (d << prec) // ((a**2 >> prec) + (a*d >> prec) + (MPZ_ONE << prec))
|
| 822 |
+
v2 = (v**2 >> prec)
|
| 823 |
+
v4 = (v2 * v2) >> prec
|
| 824 |
+
s1 = v//3
|
| 825 |
+
v = (v * v4) >> prec
|
| 826 |
+
k = 5
|
| 827 |
+
while v:
|
| 828 |
+
s0 += v // k
|
| 829 |
+
k += 2
|
| 830 |
+
s1 += v // k
|
| 831 |
+
v = (v * v4) >> prec
|
| 832 |
+
k += 2
|
| 833 |
+
s1 = (s1 * v2) >> prec
|
| 834 |
+
s = s0 - s1
|
| 835 |
+
return atan_a + s
|
| 836 |
+
|
| 837 |
+
def atan_inf(sign, prec, rnd):
|
| 838 |
+
if not sign:
|
| 839 |
+
return mpf_shift(mpf_pi(prec, rnd), -1)
|
| 840 |
+
return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
|
| 841 |
+
|
| 842 |
+
def mpf_atan(x, prec, rnd=round_fast):
|
| 843 |
+
sign, man, exp, bc = x
|
| 844 |
+
if not man:
|
| 845 |
+
if x == fzero: return fzero
|
| 846 |
+
if x == finf: return atan_inf(0, prec, rnd)
|
| 847 |
+
if x == fninf: return atan_inf(1, prec, rnd)
|
| 848 |
+
return fnan
|
| 849 |
+
mag = exp + bc
|
| 850 |
+
# Essentially infinity
|
| 851 |
+
if mag > prec+20:
|
| 852 |
+
return atan_inf(sign, prec, rnd)
|
| 853 |
+
# Essentially ~ x
|
| 854 |
+
if -mag > prec+20:
|
| 855 |
+
return mpf_perturb(x, 1-sign, prec, rnd)
|
| 856 |
+
wp = prec + 30 + abs(mag)
|
| 857 |
+
# For large x, use atan(x) = pi/2 - atan(1/x)
|
| 858 |
+
if mag >= 2:
|
| 859 |
+
x = mpf_rdiv_int(1, x, wp)
|
| 860 |
+
reciprocal = True
|
| 861 |
+
else:
|
| 862 |
+
reciprocal = False
|
| 863 |
+
t = to_fixed(x, wp)
|
| 864 |
+
if sign:
|
| 865 |
+
t = -t
|
| 866 |
+
if wp < ATAN_TAYLOR_PREC:
|
| 867 |
+
a = atan_taylor(t, wp)
|
| 868 |
+
else:
|
| 869 |
+
a = atan_newton(t, wp)
|
| 870 |
+
if reciprocal:
|
| 871 |
+
a = ((pi_fixed(wp)>>1)+1) - a
|
| 872 |
+
if sign:
|
| 873 |
+
a = -a
|
| 874 |
+
return from_man_exp(a, -wp, prec, rnd)
|
| 875 |
+
|
| 876 |
+
# TODO: cleanup the special cases
|
| 877 |
+
def mpf_atan2(y, x, prec, rnd=round_fast):
|
| 878 |
+
xsign, xman, xexp, xbc = x
|
| 879 |
+
ysign, yman, yexp, ybc = y
|
| 880 |
+
if not yman:
|
| 881 |
+
if y == fzero and x != fnan:
|
| 882 |
+
if mpf_sign(x) >= 0:
|
| 883 |
+
return fzero
|
| 884 |
+
return mpf_pi(prec, rnd)
|
| 885 |
+
if y in (finf, fninf):
|
| 886 |
+
if x in (finf, fninf):
|
| 887 |
+
return fnan
|
| 888 |
+
# pi/2
|
| 889 |
+
if y == finf:
|
| 890 |
+
return mpf_shift(mpf_pi(prec, rnd), -1)
|
| 891 |
+
# -pi/2
|
| 892 |
+
return mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1))
|
| 893 |
+
return fnan
|
| 894 |
+
if ysign:
|
| 895 |
+
return mpf_neg(mpf_atan2(mpf_neg(y), x, prec, negative_rnd[rnd]))
|
| 896 |
+
if not xman:
|
| 897 |
+
if x == fnan:
|
| 898 |
+
return fnan
|
| 899 |
+
if x == finf:
|
| 900 |
+
return fzero
|
| 901 |
+
if x == fninf:
|
| 902 |
+
return mpf_pi(prec, rnd)
|
| 903 |
+
if y == fzero:
|
| 904 |
+
return fzero
|
| 905 |
+
return mpf_shift(mpf_pi(prec, rnd), -1)
|
| 906 |
+
tquo = mpf_atan(mpf_div(y, x, prec+4), prec+4)
|
| 907 |
+
if xsign:
|
| 908 |
+
return mpf_add(mpf_pi(prec+4), tquo, prec, rnd)
|
| 909 |
+
else:
|
| 910 |
+
return mpf_pos(tquo, prec, rnd)
|
| 911 |
+
|
| 912 |
+
def mpf_asin(x, prec, rnd=round_fast):
|
| 913 |
+
sign, man, exp, bc = x
|
| 914 |
+
if bc+exp > 0 and x not in (fone, fnone):
|
| 915 |
+
raise ComplexResult("asin(x) is real only for -1 <= x <= 1")
|
| 916 |
+
# asin(x) = 2*atan(x/(1+sqrt(1-x**2)))
|
| 917 |
+
wp = prec + 15
|
| 918 |
+
a = mpf_mul(x, x)
|
| 919 |
+
b = mpf_add(fone, mpf_sqrt(mpf_sub(fone, a, wp), wp), wp)
|
| 920 |
+
c = mpf_div(x, b, wp)
|
| 921 |
+
return mpf_shift(mpf_atan(c, prec, rnd), 1)
|
| 922 |
+
|
| 923 |
+
def mpf_acos(x, prec, rnd=round_fast):
|
| 924 |
+
# acos(x) = 2*atan(sqrt(1-x**2)/(1+x))
|
| 925 |
+
sign, man, exp, bc = x
|
| 926 |
+
if bc + exp > 0:
|
| 927 |
+
if x not in (fone, fnone):
|
| 928 |
+
raise ComplexResult("acos(x) is real only for -1 <= x <= 1")
|
| 929 |
+
if x == fnone:
|
| 930 |
+
return mpf_pi(prec, rnd)
|
| 931 |
+
wp = prec + 15
|
| 932 |
+
a = mpf_mul(x, x)
|
| 933 |
+
b = mpf_sqrt(mpf_sub(fone, a, wp), wp)
|
| 934 |
+
c = mpf_div(b, mpf_add(fone, x, wp), wp)
|
| 935 |
+
return mpf_shift(mpf_atan(c, prec, rnd), 1)
|
| 936 |
+
|
| 937 |
+
def mpf_asinh(x, prec, rnd=round_fast):
|
| 938 |
+
wp = prec + 20
|
| 939 |
+
sign, man, exp, bc = x
|
| 940 |
+
mag = exp+bc
|
| 941 |
+
if mag < -8:
|
| 942 |
+
if mag < -wp:
|
| 943 |
+
return mpf_perturb(x, 1-sign, prec, rnd)
|
| 944 |
+
wp += (-mag)
|
| 945 |
+
# asinh(x) = log(x+sqrt(x**2+1))
|
| 946 |
+
# use reflection symmetry to avoid cancellation
|
| 947 |
+
q = mpf_sqrt(mpf_add(mpf_mul(x, x), fone, wp), wp)
|
| 948 |
+
q = mpf_add(mpf_abs(x), q, wp)
|
| 949 |
+
if sign:
|
| 950 |
+
return mpf_neg(mpf_log(q, prec, negative_rnd[rnd]))
|
| 951 |
+
else:
|
| 952 |
+
return mpf_log(q, prec, rnd)
|
| 953 |
+
|
| 954 |
+
def mpf_acosh(x, prec, rnd=round_fast):
|
| 955 |
+
# acosh(x) = log(x+sqrt(x**2-1))
|
| 956 |
+
wp = prec + 15
|
| 957 |
+
if mpf_cmp(x, fone) == -1:
|
| 958 |
+
raise ComplexResult("acosh(x) is real only for x >= 1")
|
| 959 |
+
q = mpf_sqrt(mpf_add(mpf_mul(x,x), fnone, wp), wp)
|
| 960 |
+
return mpf_log(mpf_add(x, q, wp), prec, rnd)
|
| 961 |
+
|
| 962 |
+
def mpf_atanh(x, prec, rnd=round_fast):
|
| 963 |
+
# atanh(x) = log((1+x)/(1-x))/2
|
| 964 |
+
sign, man, exp, bc = x
|
| 965 |
+
if (not man) and exp:
|
| 966 |
+
if x in (fzero, fnan):
|
| 967 |
+
return x
|
| 968 |
+
raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
|
| 969 |
+
mag = bc + exp
|
| 970 |
+
if mag > 0:
|
| 971 |
+
if mag == 1 and man == 1:
|
| 972 |
+
return [finf, fninf][sign]
|
| 973 |
+
raise ComplexResult("atanh(x) is real only for -1 <= x <= 1")
|
| 974 |
+
wp = prec + 15
|
| 975 |
+
if mag < -8:
|
| 976 |
+
if mag < -wp:
|
| 977 |
+
return mpf_perturb(x, sign, prec, rnd)
|
| 978 |
+
wp += (-mag)
|
| 979 |
+
a = mpf_add(x, fone, wp)
|
| 980 |
+
b = mpf_sub(fone, x, wp)
|
| 981 |
+
return mpf_shift(mpf_log(mpf_div(a, b, wp), prec, rnd), -1)
|
| 982 |
+
|
| 983 |
+
def mpf_fibonacci(x, prec, rnd=round_fast):
|
| 984 |
+
sign, man, exp, bc = x
|
| 985 |
+
if not man:
|
| 986 |
+
if x == fninf:
|
| 987 |
+
return fnan
|
| 988 |
+
return x
|
| 989 |
+
# F(2^n) ~= 2^(2^n)
|
| 990 |
+
size = abs(exp+bc)
|
| 991 |
+
if exp >= 0:
|
| 992 |
+
# Exact
|
| 993 |
+
if size < 10 or size <= bitcount(prec):
|
| 994 |
+
return from_int(ifib(to_int(x)), prec, rnd)
|
| 995 |
+
# Use the modified Binet formula
|
| 996 |
+
wp = prec + size + 20
|
| 997 |
+
a = mpf_phi(wp)
|
| 998 |
+
b = mpf_add(mpf_shift(a, 1), fnone, wp)
|
| 999 |
+
u = mpf_pow(a, x, wp)
|
| 1000 |
+
v = mpf_cos_pi(x, wp)
|
| 1001 |
+
v = mpf_div(v, u, wp)
|
| 1002 |
+
u = mpf_sub(u, v, wp)
|
| 1003 |
+
u = mpf_div(u, b, prec, rnd)
|
| 1004 |
+
return u
|
| 1005 |
+
|
| 1006 |
+
|
| 1007 |
+
#-------------------------------------------------------------------------------
|
| 1008 |
+
# Exponential-type functions
|
| 1009 |
+
#-------------------------------------------------------------------------------
|
| 1010 |
+
|
| 1011 |
+
def exponential_series(x, prec, type=0):
|
| 1012 |
+
"""
|
| 1013 |
+
Taylor series for cosh/sinh or cos/sin.
|
| 1014 |
+
|
| 1015 |
+
type = 0 -- returns exp(x) (slightly faster than cosh+sinh)
|
| 1016 |
+
type = 1 -- returns (cosh(x), sinh(x))
|
| 1017 |
+
type = 2 -- returns (cos(x), sin(x))
|
| 1018 |
+
"""
|
| 1019 |
+
if x < 0:
|
| 1020 |
+
x = -x
|
| 1021 |
+
sign = 1
|
| 1022 |
+
else:
|
| 1023 |
+
sign = 0
|
| 1024 |
+
r = int(0.5*prec**0.5)
|
| 1025 |
+
xmag = bitcount(x) - prec
|
| 1026 |
+
r = max(0, xmag + r)
|
| 1027 |
+
extra = 10 + 2*max(r,-xmag)
|
| 1028 |
+
wp = prec + extra
|
| 1029 |
+
x <<= (extra - r)
|
| 1030 |
+
one = MPZ_ONE << wp
|
| 1031 |
+
alt = (type == 2)
|
| 1032 |
+
if prec < EXP_SERIES_U_CUTOFF:
|
| 1033 |
+
x2 = a = (x*x) >> wp
|
| 1034 |
+
x4 = (x2*x2) >> wp
|
| 1035 |
+
s0 = s1 = MPZ_ZERO
|
| 1036 |
+
k = 2
|
| 1037 |
+
while a:
|
| 1038 |
+
a //= (k-1)*k; s0 += a; k += 2
|
| 1039 |
+
a //= (k-1)*k; s1 += a; k += 2
|
| 1040 |
+
a = (a*x4) >> wp
|
| 1041 |
+
s1 = (x2*s1) >> wp
|
| 1042 |
+
if alt:
|
| 1043 |
+
c = s1 - s0 + one
|
| 1044 |
+
else:
|
| 1045 |
+
c = s1 + s0 + one
|
| 1046 |
+
else:
|
| 1047 |
+
u = int(0.3*prec**0.35)
|
| 1048 |
+
x2 = a = (x*x) >> wp
|
| 1049 |
+
xpowers = [one, x2]
|
| 1050 |
+
for i in xrange(1, u):
|
| 1051 |
+
xpowers.append((xpowers[-1]*x2)>>wp)
|
| 1052 |
+
sums = [MPZ_ZERO] * u
|
| 1053 |
+
k = 2
|
| 1054 |
+
while a:
|
| 1055 |
+
for i in xrange(u):
|
| 1056 |
+
a //= (k-1)*k
|
| 1057 |
+
if alt and k & 2: sums[i] -= a
|
| 1058 |
+
else: sums[i] += a
|
| 1059 |
+
k += 2
|
| 1060 |
+
a = (a*xpowers[-1]) >> wp
|
| 1061 |
+
for i in xrange(1, u):
|
| 1062 |
+
sums[i] = (sums[i]*xpowers[i]) >> wp
|
| 1063 |
+
c = sum(sums) + one
|
| 1064 |
+
if type == 0:
|
| 1065 |
+
s = isqrt_fast(c*c - (one<<wp))
|
| 1066 |
+
if sign:
|
| 1067 |
+
v = c - s
|
| 1068 |
+
else:
|
| 1069 |
+
v = c + s
|
| 1070 |
+
for i in xrange(r):
|
| 1071 |
+
v = (v*v) >> wp
|
| 1072 |
+
return v >> extra
|
| 1073 |
+
else:
|
| 1074 |
+
# Repeatedly apply the double-angle formula
|
| 1075 |
+
# cosh(2*x) = 2*cosh(x)^2 - 1
|
| 1076 |
+
# cos(2*x) = 2*cos(x)^2 - 1
|
| 1077 |
+
pshift = wp-1
|
| 1078 |
+
for i in xrange(r):
|
| 1079 |
+
c = ((c*c) >> pshift) - one
|
| 1080 |
+
# With the abs, this is the same for sinh and sin
|
| 1081 |
+
s = isqrt_fast(abs((one<<wp) - c*c))
|
| 1082 |
+
if sign:
|
| 1083 |
+
s = -s
|
| 1084 |
+
return (c>>extra), (s>>extra)
|
| 1085 |
+
|
| 1086 |
+
def exp_basecase(x, prec):
|
| 1087 |
+
"""
|
| 1088 |
+
Compute exp(x) as a fixed-point number. Works for any x,
|
| 1089 |
+
but for speed should have |x| < 1. For an arbitrary number,
|
| 1090 |
+
use exp(x) = exp(x-m*log(2)) * 2^m where m = floor(x/log(2)).
|
| 1091 |
+
"""
|
| 1092 |
+
if prec > EXP_COSH_CUTOFF:
|
| 1093 |
+
return exponential_series(x, prec, 0)
|
| 1094 |
+
r = int(prec**0.5)
|
| 1095 |
+
prec += r
|
| 1096 |
+
s0 = s1 = (MPZ_ONE << prec)
|
| 1097 |
+
k = 2
|
| 1098 |
+
a = x2 = (x*x) >> prec
|
| 1099 |
+
while a:
|
| 1100 |
+
a //= k; s0 += a; k += 1
|
| 1101 |
+
a //= k; s1 += a; k += 1
|
| 1102 |
+
a = (a*x2) >> prec
|
| 1103 |
+
s1 = (s1*x) >> prec
|
| 1104 |
+
s = s0 + s1
|
| 1105 |
+
u = r
|
| 1106 |
+
while r:
|
| 1107 |
+
s = (s*s) >> prec
|
| 1108 |
+
r -= 1
|
| 1109 |
+
return s >> u
|
| 1110 |
+
|
| 1111 |
+
def exp_expneg_basecase(x, prec):
|
| 1112 |
+
"""
|
| 1113 |
+
Computation of exp(x), exp(-x)
|
| 1114 |
+
"""
|
| 1115 |
+
if prec > EXP_COSH_CUTOFF:
|
| 1116 |
+
cosh, sinh = exponential_series(x, prec, 1)
|
| 1117 |
+
return cosh+sinh, cosh-sinh
|
| 1118 |
+
a = exp_basecase(x, prec)
|
| 1119 |
+
b = (MPZ_ONE << (prec+prec)) // a
|
| 1120 |
+
return a, b
|
| 1121 |
+
|
| 1122 |
+
def cos_sin_basecase(x, prec):
|
| 1123 |
+
"""
|
| 1124 |
+
Compute cos(x), sin(x) as fixed-point numbers, assuming x
|
| 1125 |
+
in [0, pi/2). For an arbitrary number, use x' = x - m*(pi/2)
|
| 1126 |
+
where m = floor(x/(pi/2)) along with quarter-period symmetries.
|
| 1127 |
+
"""
|
| 1128 |
+
if prec > COS_SIN_CACHE_PREC:
|
| 1129 |
+
return exponential_series(x, prec, 2)
|
| 1130 |
+
precs = prec - COS_SIN_CACHE_STEP
|
| 1131 |
+
t = x >> precs
|
| 1132 |
+
n = int(t)
|
| 1133 |
+
if n not in cos_sin_cache:
|
| 1134 |
+
w = t<<(10+COS_SIN_CACHE_PREC-COS_SIN_CACHE_STEP)
|
| 1135 |
+
cos_t, sin_t = exponential_series(w, 10+COS_SIN_CACHE_PREC, 2)
|
| 1136 |
+
cos_sin_cache[n] = (cos_t>>10), (sin_t>>10)
|
| 1137 |
+
cos_t, sin_t = cos_sin_cache[n]
|
| 1138 |
+
offset = COS_SIN_CACHE_PREC - prec
|
| 1139 |
+
cos_t >>= offset
|
| 1140 |
+
sin_t >>= offset
|
| 1141 |
+
x -= t << precs
|
| 1142 |
+
cos = MPZ_ONE << prec
|
| 1143 |
+
sin = x
|
| 1144 |
+
k = 2
|
| 1145 |
+
a = -((x*x) >> prec)
|
| 1146 |
+
while a:
|
| 1147 |
+
a //= k; cos += a; k += 1; a = (a*x) >> prec
|
| 1148 |
+
a //= k; sin += a; k += 1; a = -((a*x) >> prec)
|
| 1149 |
+
return ((cos*cos_t-sin*sin_t) >> prec), ((sin*cos_t+cos*sin_t) >> prec)
|
| 1150 |
+
|
| 1151 |
+
def mpf_exp(x, prec, rnd=round_fast):
|
| 1152 |
+
sign, man, exp, bc = x
|
| 1153 |
+
if man:
|
| 1154 |
+
mag = bc + exp
|
| 1155 |
+
wp = prec + 14
|
| 1156 |
+
if sign:
|
| 1157 |
+
man = -man
|
| 1158 |
+
# TODO: the best cutoff depends on both x and the precision.
|
| 1159 |
+
if prec > 600 and exp >= 0:
|
| 1160 |
+
# Need about log2(exp(n)) ~= 1.45*mag extra precision
|
| 1161 |
+
e = mpf_e(wp+int(1.45*mag))
|
| 1162 |
+
return mpf_pow_int(e, man<<exp, prec, rnd)
|
| 1163 |
+
if mag < -wp:
|
| 1164 |
+
return mpf_perturb(fone, sign, prec, rnd)
|
| 1165 |
+
# |x| >= 2
|
| 1166 |
+
if mag > 1:
|
| 1167 |
+
# For large arguments: exp(2^mag*(1+eps)) =
|
| 1168 |
+
# exp(2^mag)*exp(2^mag*eps) = exp(2^mag)*(1 + 2^mag*eps + ...)
|
| 1169 |
+
# so about mag extra bits is required.
|
| 1170 |
+
wpmod = wp + mag
|
| 1171 |
+
offset = exp + wpmod
|
| 1172 |
+
if offset >= 0:
|
| 1173 |
+
t = man << offset
|
| 1174 |
+
else:
|
| 1175 |
+
t = man >> (-offset)
|
| 1176 |
+
lg2 = ln2_fixed(wpmod)
|
| 1177 |
+
n, t = divmod(t, lg2)
|
| 1178 |
+
n = int(n)
|
| 1179 |
+
t >>= mag
|
| 1180 |
+
else:
|
| 1181 |
+
offset = exp + wp
|
| 1182 |
+
if offset >= 0:
|
| 1183 |
+
t = man << offset
|
| 1184 |
+
else:
|
| 1185 |
+
t = man >> (-offset)
|
| 1186 |
+
n = 0
|
| 1187 |
+
man = exp_basecase(t, wp)
|
| 1188 |
+
return from_man_exp(man, n-wp, prec, rnd)
|
| 1189 |
+
if not exp:
|
| 1190 |
+
return fone
|
| 1191 |
+
if x == fninf:
|
| 1192 |
+
return fzero
|
| 1193 |
+
return x
|
| 1194 |
+
|
| 1195 |
+
|
| 1196 |
+
def mpf_cosh_sinh(x, prec, rnd=round_fast, tanh=0):
|
| 1197 |
+
"""Simultaneously compute (cosh(x), sinh(x)) for real x"""
|
| 1198 |
+
sign, man, exp, bc = x
|
| 1199 |
+
if (not man) and exp:
|
| 1200 |
+
if tanh:
|
| 1201 |
+
if x == finf: return fone
|
| 1202 |
+
if x == fninf: return fnone
|
| 1203 |
+
return fnan
|
| 1204 |
+
if x == finf: return (finf, finf)
|
| 1205 |
+
if x == fninf: return (finf, fninf)
|
| 1206 |
+
return fnan, fnan
|
| 1207 |
+
mag = exp+bc
|
| 1208 |
+
wp = prec+14
|
| 1209 |
+
if mag < -4:
|
| 1210 |
+
# Extremely close to 0, sinh(x) ~= x and cosh(x) ~= 1
|
| 1211 |
+
if mag < -wp:
|
| 1212 |
+
if tanh:
|
| 1213 |
+
return mpf_perturb(x, 1-sign, prec, rnd)
|
| 1214 |
+
cosh = mpf_perturb(fone, 0, prec, rnd)
|
| 1215 |
+
sinh = mpf_perturb(x, sign, prec, rnd)
|
| 1216 |
+
return cosh, sinh
|
| 1217 |
+
# Fix for cancellation when computing sinh
|
| 1218 |
+
wp += (-mag)
|
| 1219 |
+
# Does exp(-2*x) vanish?
|
| 1220 |
+
if mag > 10:
|
| 1221 |
+
if 3*(1<<(mag-1)) > wp:
|
| 1222 |
+
# XXX: rounding
|
| 1223 |
+
if tanh:
|
| 1224 |
+
return mpf_perturb([fone,fnone][sign], 1-sign, prec, rnd)
|
| 1225 |
+
c = s = mpf_shift(mpf_exp(mpf_abs(x), prec, rnd), -1)
|
| 1226 |
+
if sign:
|
| 1227 |
+
s = mpf_neg(s)
|
| 1228 |
+
return c, s
|
| 1229 |
+
# |x| > 1
|
| 1230 |
+
if mag > 1:
|
| 1231 |
+
wpmod = wp + mag
|
| 1232 |
+
offset = exp + wpmod
|
| 1233 |
+
if offset >= 0:
|
| 1234 |
+
t = man << offset
|
| 1235 |
+
else:
|
| 1236 |
+
t = man >> (-offset)
|
| 1237 |
+
lg2 = ln2_fixed(wpmod)
|
| 1238 |
+
n, t = divmod(t, lg2)
|
| 1239 |
+
n = int(n)
|
| 1240 |
+
t >>= mag
|
| 1241 |
+
else:
|
| 1242 |
+
offset = exp + wp
|
| 1243 |
+
if offset >= 0:
|
| 1244 |
+
t = man << offset
|
| 1245 |
+
else:
|
| 1246 |
+
t = man >> (-offset)
|
| 1247 |
+
n = 0
|
| 1248 |
+
a, b = exp_expneg_basecase(t, wp)
|
| 1249 |
+
# TODO: optimize division precision
|
| 1250 |
+
cosh = a + (b>>(2*n))
|
| 1251 |
+
sinh = a - (b>>(2*n))
|
| 1252 |
+
if sign:
|
| 1253 |
+
sinh = -sinh
|
| 1254 |
+
if tanh:
|
| 1255 |
+
man = (sinh << wp) // cosh
|
| 1256 |
+
return from_man_exp(man, -wp, prec, rnd)
|
| 1257 |
+
else:
|
| 1258 |
+
cosh = from_man_exp(cosh, n-wp-1, prec, rnd)
|
| 1259 |
+
sinh = from_man_exp(sinh, n-wp-1, prec, rnd)
|
| 1260 |
+
return cosh, sinh
|
| 1261 |
+
|
| 1262 |
+
|
| 1263 |
+
def mod_pi2(man, exp, mag, wp):
|
| 1264 |
+
# Reduce to standard interval
|
| 1265 |
+
if mag > 0:
|
| 1266 |
+
i = 0
|
| 1267 |
+
while 1:
|
| 1268 |
+
cancellation_prec = 20 << i
|
| 1269 |
+
wpmod = wp + mag + cancellation_prec
|
| 1270 |
+
pi2 = pi_fixed(wpmod-1)
|
| 1271 |
+
pi4 = pi2 >> 1
|
| 1272 |
+
offset = wpmod + exp
|
| 1273 |
+
if offset >= 0:
|
| 1274 |
+
t = man << offset
|
| 1275 |
+
else:
|
| 1276 |
+
t = man >> (-offset)
|
| 1277 |
+
n, y = divmod(t, pi2)
|
| 1278 |
+
if y > pi4:
|
| 1279 |
+
small = pi2 - y
|
| 1280 |
+
else:
|
| 1281 |
+
small = y
|
| 1282 |
+
if small >> (wp+mag-10):
|
| 1283 |
+
n = int(n)
|
| 1284 |
+
t = y >> mag
|
| 1285 |
+
wp = wpmod - mag
|
| 1286 |
+
break
|
| 1287 |
+
i += 1
|
| 1288 |
+
else:
|
| 1289 |
+
wp += (-mag)
|
| 1290 |
+
offset = exp + wp
|
| 1291 |
+
if offset >= 0:
|
| 1292 |
+
t = man << offset
|
| 1293 |
+
else:
|
| 1294 |
+
t = man >> (-offset)
|
| 1295 |
+
n = 0
|
| 1296 |
+
return t, n, wp
|
| 1297 |
+
|
| 1298 |
+
|
| 1299 |
+
def mpf_cos_sin(x, prec, rnd=round_fast, which=0, pi=False):
|
| 1300 |
+
"""
|
| 1301 |
+
which:
|
| 1302 |
+
0 -- return cos(x), sin(x)
|
| 1303 |
+
1 -- return cos(x)
|
| 1304 |
+
2 -- return sin(x)
|
| 1305 |
+
3 -- return tan(x)
|
| 1306 |
+
|
| 1307 |
+
if pi=True, compute for pi*x
|
| 1308 |
+
"""
|
| 1309 |
+
sign, man, exp, bc = x
|
| 1310 |
+
if not man:
|
| 1311 |
+
if exp:
|
| 1312 |
+
c, s = fnan, fnan
|
| 1313 |
+
else:
|
| 1314 |
+
c, s = fone, fzero
|
| 1315 |
+
if which == 0: return c, s
|
| 1316 |
+
if which == 1: return c
|
| 1317 |
+
if which == 2: return s
|
| 1318 |
+
if which == 3: return s
|
| 1319 |
+
|
| 1320 |
+
mag = bc + exp
|
| 1321 |
+
wp = prec + 10
|
| 1322 |
+
|
| 1323 |
+
# Extremely small?
|
| 1324 |
+
if mag < 0:
|
| 1325 |
+
if mag < -wp:
|
| 1326 |
+
if pi:
|
| 1327 |
+
x = mpf_mul(x, mpf_pi(wp))
|
| 1328 |
+
c = mpf_perturb(fone, 1, prec, rnd)
|
| 1329 |
+
s = mpf_perturb(x, 1-sign, prec, rnd)
|
| 1330 |
+
if which == 0: return c, s
|
| 1331 |
+
if which == 1: return c
|
| 1332 |
+
if which == 2: return s
|
| 1333 |
+
if which == 3: return mpf_perturb(x, sign, prec, rnd)
|
| 1334 |
+
if pi:
|
| 1335 |
+
if exp >= -1:
|
| 1336 |
+
if exp == -1:
|
| 1337 |
+
c = fzero
|
| 1338 |
+
s = (fone, fnone)[bool(man & 2) ^ sign]
|
| 1339 |
+
elif exp == 0:
|
| 1340 |
+
c, s = (fnone, fzero)
|
| 1341 |
+
else:
|
| 1342 |
+
c, s = (fone, fzero)
|
| 1343 |
+
if which == 0: return c, s
|
| 1344 |
+
if which == 1: return c
|
| 1345 |
+
if which == 2: return s
|
| 1346 |
+
if which == 3: return mpf_div(s, c, prec, rnd)
|
| 1347 |
+
# Subtract nearest half-integer (= mod by pi/2)
|
| 1348 |
+
n = ((man >> (-exp-2)) + 1) >> 1
|
| 1349 |
+
man = man - (n << (-exp-1))
|
| 1350 |
+
mag2 = bitcount(man) + exp
|
| 1351 |
+
wp = prec + 10 - mag2
|
| 1352 |
+
offset = exp + wp
|
| 1353 |
+
if offset >= 0:
|
| 1354 |
+
t = man << offset
|
| 1355 |
+
else:
|
| 1356 |
+
t = man >> (-offset)
|
| 1357 |
+
t = (t*pi_fixed(wp)) >> wp
|
| 1358 |
+
else:
|
| 1359 |
+
t, n, wp = mod_pi2(man, exp, mag, wp)
|
| 1360 |
+
c, s = cos_sin_basecase(t, wp)
|
| 1361 |
+
m = n & 3
|
| 1362 |
+
if m == 1: c, s = -s, c
|
| 1363 |
+
elif m == 2: c, s = -c, -s
|
| 1364 |
+
elif m == 3: c, s = s, -c
|
| 1365 |
+
if sign:
|
| 1366 |
+
s = -s
|
| 1367 |
+
if which == 0:
|
| 1368 |
+
c = from_man_exp(c, -wp, prec, rnd)
|
| 1369 |
+
s = from_man_exp(s, -wp, prec, rnd)
|
| 1370 |
+
return c, s
|
| 1371 |
+
if which == 1:
|
| 1372 |
+
return from_man_exp(c, -wp, prec, rnd)
|
| 1373 |
+
if which == 2:
|
| 1374 |
+
return from_man_exp(s, -wp, prec, rnd)
|
| 1375 |
+
if which == 3:
|
| 1376 |
+
return from_rational(s, c, prec, rnd)
|
| 1377 |
+
|
| 1378 |
+
def mpf_cos(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1)
|
| 1379 |
+
def mpf_sin(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2)
|
| 1380 |
+
def mpf_tan(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 3)
|
| 1381 |
+
def mpf_cos_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 0, 1)
|
| 1382 |
+
def mpf_cos_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 1, 1)
|
| 1383 |
+
def mpf_sin_pi(x, prec, rnd=round_fast): return mpf_cos_sin(x, prec, rnd, 2, 1)
|
| 1384 |
+
def mpf_cosh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[0]
|
| 1385 |
+
def mpf_sinh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd)[1]
|
| 1386 |
+
def mpf_tanh(x, prec, rnd=round_fast): return mpf_cosh_sinh(x, prec, rnd, tanh=1)
|
| 1387 |
+
|
| 1388 |
+
|
| 1389 |
+
# Low-overhead fixed-point versions
|
| 1390 |
+
|
| 1391 |
+
def cos_sin_fixed(x, prec, pi2=None):
|
| 1392 |
+
if pi2 is None:
|
| 1393 |
+
pi2 = pi_fixed(prec-1)
|
| 1394 |
+
n, t = divmod(x, pi2)
|
| 1395 |
+
n = int(n)
|
| 1396 |
+
c, s = cos_sin_basecase(t, prec)
|
| 1397 |
+
m = n & 3
|
| 1398 |
+
if m == 0: return c, s
|
| 1399 |
+
if m == 1: return -s, c
|
| 1400 |
+
if m == 2: return -c, -s
|
| 1401 |
+
if m == 3: return s, -c
|
| 1402 |
+
|
| 1403 |
+
def exp_fixed(x, prec, ln2=None):
|
| 1404 |
+
if ln2 is None:
|
| 1405 |
+
ln2 = ln2_fixed(prec)
|
| 1406 |
+
n, t = divmod(x, ln2)
|
| 1407 |
+
n = int(n)
|
| 1408 |
+
v = exp_basecase(t, prec)
|
| 1409 |
+
if n >= 0:
|
| 1410 |
+
return v << n
|
| 1411 |
+
else:
|
| 1412 |
+
return v >> (-n)
|
| 1413 |
+
|
| 1414 |
+
|
| 1415 |
+
if BACKEND == 'sage':
|
| 1416 |
+
try:
|
| 1417 |
+
import sage.libs.mpmath.ext_libmp as _lbmp
|
| 1418 |
+
mpf_sqrt = _lbmp.mpf_sqrt
|
| 1419 |
+
mpf_exp = _lbmp.mpf_exp
|
| 1420 |
+
mpf_log = _lbmp.mpf_log
|
| 1421 |
+
mpf_cos = _lbmp.mpf_cos
|
| 1422 |
+
mpf_sin = _lbmp.mpf_sin
|
| 1423 |
+
mpf_pow = _lbmp.mpf_pow
|
| 1424 |
+
exp_fixed = _lbmp.exp_fixed
|
| 1425 |
+
cos_sin_fixed = _lbmp.cos_sin_fixed
|
| 1426 |
+
log_int_fixed = _lbmp.log_int_fixed
|
| 1427 |
+
except (ImportError, AttributeError):
|
| 1428 |
+
print("Warning: Sage imports in libelefun failed")
|
.venv/lib/python3.11/site-packages/mpmath/tests/__init__.py
ADDED
|
File without changes
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (185 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/extratest_gamma.cpython-311.pyc
ADDED
|
Binary file (11.4 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-311.pyc
ADDED
|
Binary file (1.75 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-311.pyc
ADDED
|
Binary file (39.2 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_calculus.cpython-311.pyc
ADDED
|
Binary file (14 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_convert.cpython-311.pyc
ADDED
|
Binary file (21.4 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_diff.cpython-311.pyc
ADDED
|
Binary file (8.56 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_division.cpython-311.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_eigen.cpython-311.pyc
ADDED
|
Binary file (8.45 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-311.pyc
ADDED
|
Binary file (21 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_functions.cpython-311.pyc
ADDED
|
Binary file (87.5 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-311.pyc
ADDED
|
Binary file (67.7 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_identify.cpython-311.pyc
ADDED
|
Binary file (2.03 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_interval.cpython-311.pyc
ADDED
|
Binary file (53.7 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_levin.cpython-311.pyc
ADDED
|
Binary file (13.4 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-311.pyc
ADDED
|
Binary file (19 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-311.pyc
ADDED
|
Binary file (678 Bytes). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_ode.cpython-311.pyc
ADDED
|
Binary file (2.69 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_power.cpython-311.pyc
ADDED
|
Binary file (9.13 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_quad.cpython-311.pyc
ADDED
|
Binary file (16.8 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_rootfinding.cpython-311.pyc
ADDED
|
Binary file (10.7 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_str.cpython-311.pyc
ADDED
|
Binary file (1.03 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_summation.cpython-311.pyc
ADDED
|
Binary file (7.75 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_trig.cpython-311.pyc
ADDED
|
Binary file (11 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/torture.cpython-311.pyc
ADDED
|
Binary file (11.1 kB). View file
|
|
|
.venv/lib/python3.11/site-packages/mpmath/tests/extratest_gamma.py
ADDED
|
@@ -0,0 +1,215 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from mpmath import *
|
| 2 |
+
from mpmath.libmp import ifac
|
| 3 |
+
|
| 4 |
+
import sys
|
| 5 |
+
if "-dps" in sys.argv:
|
| 6 |
+
maxdps = int(sys.argv[sys.argv.index("-dps")+1])
|
| 7 |
+
else:
|
| 8 |
+
maxdps = 1000
|
| 9 |
+
|
| 10 |
+
raise_ = "-raise" in sys.argv
|
| 11 |
+
|
| 12 |
+
errcount = 0
|
| 13 |
+
|
| 14 |
+
def check(name, func, z, y):
|
| 15 |
+
global errcount
|
| 16 |
+
try:
|
| 17 |
+
x = func(z)
|
| 18 |
+
except:
|
| 19 |
+
errcount += 1
|
| 20 |
+
if raise_:
|
| 21 |
+
raise
|
| 22 |
+
print()
|
| 23 |
+
print(name)
|
| 24 |
+
print("EXCEPTION")
|
| 25 |
+
import traceback
|
| 26 |
+
traceback.print_tb(sys.exc_info()[2])
|
| 27 |
+
print()
|
| 28 |
+
return
|
| 29 |
+
xre = x.real
|
| 30 |
+
xim = x.imag
|
| 31 |
+
yre = y.real
|
| 32 |
+
yim = y.imag
|
| 33 |
+
tol = eps*8
|
| 34 |
+
err = 0
|
| 35 |
+
if abs(xre-yre) > abs(yre)*tol:
|
| 36 |
+
err = 1
|
| 37 |
+
print()
|
| 38 |
+
print("Error! %s (re = %s, wanted %s, err=%s)" % (name, nstr(xre,10), nstr(yre,10), nstr(abs(xre-yre))))
|
| 39 |
+
errcount += 1
|
| 40 |
+
if raise_:
|
| 41 |
+
raise SystemExit
|
| 42 |
+
if abs(xim-yim) > abs(yim)*tol:
|
| 43 |
+
err = 1
|
| 44 |
+
print()
|
| 45 |
+
print("Error! %s (im = %s, wanted %s, err=%s)" % (name, nstr(xim,10), nstr(yim,10), nstr(abs(xim-yim))))
|
| 46 |
+
errcount += 1
|
| 47 |
+
if raise_:
|
| 48 |
+
raise SystemExit
|
| 49 |
+
if not err:
|
| 50 |
+
sys.stdout.write("%s ok; " % name)
|
| 51 |
+
|
| 52 |
+
def testcase(case):
|
| 53 |
+
z, result = case
|
| 54 |
+
print("Testing z =", z)
|
| 55 |
+
mp.dps = 1010
|
| 56 |
+
z = eval(z)
|
| 57 |
+
mp.dps = maxdps + 50
|
| 58 |
+
if result is None:
|
| 59 |
+
gamma_val = gamma(z)
|
| 60 |
+
loggamma_val = loggamma(z)
|
| 61 |
+
factorial_val = factorial(z)
|
| 62 |
+
rgamma_val = rgamma(z)
|
| 63 |
+
else:
|
| 64 |
+
loggamma_val = eval(result)
|
| 65 |
+
gamma_val = exp(loggamma_val)
|
| 66 |
+
factorial_val = z * gamma_val
|
| 67 |
+
rgamma_val = 1/gamma_val
|
| 68 |
+
for dps in [5, 10, 15, 25, 40, 60, 90, 120, 250, 600, 1000, 1800, 3600]:
|
| 69 |
+
if dps > maxdps:
|
| 70 |
+
break
|
| 71 |
+
mp.dps = dps
|
| 72 |
+
print("dps = %s" % dps)
|
| 73 |
+
check("gamma", gamma, z, gamma_val)
|
| 74 |
+
check("rgamma", rgamma, z, rgamma_val)
|
| 75 |
+
check("loggamma", loggamma, z, loggamma_val)
|
| 76 |
+
check("factorial", factorial, z, factorial_val)
|
| 77 |
+
print()
|
| 78 |
+
mp.dps = 15
|
| 79 |
+
|
| 80 |
+
testcases = []
|
| 81 |
+
|
| 82 |
+
# Basic values
|
| 83 |
+
for n in list(range(1,200)) + list(range(201,2000,17)):
|
| 84 |
+
testcases.append(["%s" % n, None])
|
| 85 |
+
for n in range(-200,200):
|
| 86 |
+
testcases.append(["%s+0.5" % n, None])
|
| 87 |
+
testcases.append(["%s+0.37" % n, None])
|
| 88 |
+
|
| 89 |
+
testcases += [\
|
| 90 |
+
["(0.1+1j)", None],
|
| 91 |
+
["(-0.1+1j)", None],
|
| 92 |
+
["(0.1-1j)", None],
|
| 93 |
+
["(-0.1-1j)", None],
|
| 94 |
+
["10j", None],
|
| 95 |
+
["-10j", None],
|
| 96 |
+
["100j", None],
|
| 97 |
+
["10000j", None],
|
| 98 |
+
["-10000000j", None],
|
| 99 |
+
["(10**100)*j", None],
|
| 100 |
+
["125+(10**100)*j", None],
|
| 101 |
+
["-125+(10**100)*j", None],
|
| 102 |
+
["(10**10)*(1+j)", None],
|
| 103 |
+
["(10**10)*(-1+j)", None],
|
| 104 |
+
["(10**100)*(1+j)", None],
|
| 105 |
+
["(10**100)*(-1+j)", None],
|
| 106 |
+
["(1.5-1j)", None],
|
| 107 |
+
["(6+4j)", None],
|
| 108 |
+
["(4+1j)", None],
|
| 109 |
+
["(3.5+2j)", None],
|
| 110 |
+
["(1.5-1j)", None],
|
| 111 |
+
["(-6-4j)", None],
|
| 112 |
+
["(-2-3j)", None],
|
| 113 |
+
["(-2.5-2j)", None],
|
| 114 |
+
["(4+1j)", None],
|
| 115 |
+
["(3+3j)", None],
|
| 116 |
+
["(2-2j)", None],
|
| 117 |
+
["1", "0"],
|
| 118 |
+
["2", "0"],
|
| 119 |
+
["3", "log(2)"],
|
| 120 |
+
["4", "log(6)"],
|
| 121 |
+
["5", "log(24)"],
|
| 122 |
+
["0.5", "log(pi)/2"],
|
| 123 |
+
["1.5", "log(sqrt(pi)/2)"],
|
| 124 |
+
["2.5", "log(3*sqrt(pi)/4)"],
|
| 125 |
+
["mpf('0.37')", None],
|
| 126 |
+
["0.25", "log(sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2))))"],
|
| 127 |
+
["-0.4", None],
|
| 128 |
+
["mpf('-1.9')", None],
|
| 129 |
+
["mpf('12.8')", None],
|
| 130 |
+
["mpf('33.7')", None],
|
| 131 |
+
["mpf('95.2')", None],
|
| 132 |
+
["mpf('160.3')", None],
|
| 133 |
+
["mpf('2057.8')", None],
|
| 134 |
+
["25", "log(ifac(24))"],
|
| 135 |
+
["80", "log(ifac(79))"],
|
| 136 |
+
["500", "log(ifac(500-1))"],
|
| 137 |
+
["8000", "log(ifac(8000-1))"],
|
| 138 |
+
["8000.5", None],
|
| 139 |
+
["mpf('8000.1')", None],
|
| 140 |
+
["mpf('1.37e10')", None],
|
| 141 |
+
["mpf('1.37e10')*(1+j)", None],
|
| 142 |
+
["mpf('1.37e10')*(-1+j)", None],
|
| 143 |
+
["mpf('1.37e10')*(-1-j)", None],
|
| 144 |
+
["mpf('1.37e10')*(-1+j)", None],
|
| 145 |
+
["mpf('1.37e100')", None],
|
| 146 |
+
["mpf('1.37e100')*(1+j)", None],
|
| 147 |
+
["mpf('1.37e100')*(-1+j)", None],
|
| 148 |
+
["mpf('1.37e100')*(-1-j)", None],
|
| 149 |
+
["mpf('1.37e100')*(-1+j)", None],
|
| 150 |
+
["3+4j",
|
| 151 |
+
"mpc('"
|
| 152 |
+
"-1.7566267846037841105306041816232757851567066070613445016197619371316057169"
|
| 153 |
+
"4723618263960834804618463052988607348289672535780644470689771115236512106002"
|
| 154 |
+
"5970873471563240537307638968509556191696167970488390423963867031934333890838"
|
| 155 |
+
"8009531786948197210025029725361069435208930363494971027388382086721660805397"
|
| 156 |
+
"9163230643216054580167976201709951509519218635460317367338612500626714783631"
|
| 157 |
+
"7498317478048447525674016344322545858832610325861086336204591943822302971823"
|
| 158 |
+
"5161814175530618223688296232894588415495615809337292518431903058265147109853"
|
| 159 |
+
"1710568942184987827643886816200452860853873815413367529829631430146227470517"
|
| 160 |
+
"6579967222200868632179482214312673161276976117132204633283806161971389519137"
|
| 161 |
+
"1243359764435612951384238091232760634271570950240717650166551484551654327989"
|
| 162 |
+
"9360285030081716934130446150245110557038117075172576825490035434069388648124"
|
| 163 |
+
"6678152254554001586736120762641422590778766100376515737713938521275749049949"
|
| 164 |
+
"1284143906816424244705094759339932733567910991920631339597278805393743140853"
|
| 165 |
+
"391550313363278558195609260225928','"
|
| 166 |
+
"4.74266443803465792819488940755002274088830335171164611359052405215840070271"
|
| 167 |
+
"5906813009373171139767051863542508136875688550817670379002790304870822775498"
|
| 168 |
+
"2809996675877564504192565392367259119610438951593128982646945990372179860613"
|
| 169 |
+
"4294436498090428077839141927485901735557543641049637962003652638924845391650"
|
| 170 |
+
"9546290137755550107224907606529385248390667634297183361902055842228798984200"
|
| 171 |
+
"9591180450211798341715874477629099687609819466457990642030707080894518168924"
|
| 172 |
+
"6805549314043258530272479246115112769957368212585759640878745385160943755234"
|
| 173 |
+
"9398036774908108204370323896757543121853650025529763655312360354244898913463"
|
| 174 |
+
"7115955702828838923393113618205074162812089732064414530813087483533203244056"
|
| 175 |
+
"0546577484241423134079056537777170351934430586103623577814746004431994179990"
|
| 176 |
+
"5318522939077992613855205801498201930221975721246498720895122345420698451980"
|
| 177 |
+
"0051215797310305885845964334761831751370672996984756815410977750799748813563"
|
| 178 |
+
"8784405288158432214886648743541773208808731479748217023665577802702269468013"
|
| 179 |
+
"673719173759245720489020315779001')"],
|
| 180 |
+
]
|
| 181 |
+
|
| 182 |
+
for z in [4, 14, 34, 64]:
|
| 183 |
+
testcases.append(["(2+j)*%s/3" % z, None])
|
| 184 |
+
testcases.append(["(-2+j)*%s/3" % z, None])
|
| 185 |
+
testcases.append(["(1+2*j)*%s/3" % z, None])
|
| 186 |
+
testcases.append(["(2-j)*%s/3" % z, None])
|
| 187 |
+
testcases.append(["(20+j)*%s/3" % z, None])
|
| 188 |
+
testcases.append(["(-20+j)*%s/3" % z, None])
|
| 189 |
+
testcases.append(["(1+20*j)*%s/3" % z, None])
|
| 190 |
+
testcases.append(["(20-j)*%s/3" % z, None])
|
| 191 |
+
testcases.append(["(200+j)*%s/3" % z, None])
|
| 192 |
+
testcases.append(["(-200+j)*%s/3" % z, None])
|
| 193 |
+
testcases.append(["(1+200*j)*%s/3" % z, None])
|
| 194 |
+
testcases.append(["(200-j)*%s/3" % z, None])
|
| 195 |
+
|
| 196 |
+
# Poles
|
| 197 |
+
for n in [0,1,2,3,4,25,-1,-2,-3,-4,-20,-21,-50,-51,-200,-201,-20000,-20001]:
|
| 198 |
+
for t in ['1e-5', '1e-20', '1e-100', '1e-10000']:
|
| 199 |
+
testcases.append(["fadd(%s,'%s',exact=True)" % (n, t), None])
|
| 200 |
+
testcases.append(["fsub(%s,'%s',exact=True)" % (n, t), None])
|
| 201 |
+
testcases.append(["fadd(%s,'%sj',exact=True)" % (n, t), None])
|
| 202 |
+
testcases.append(["fsub(%s,'%sj',exact=True)" % (n, t), None])
|
| 203 |
+
|
| 204 |
+
if __name__ == "__main__":
|
| 205 |
+
from timeit import default_timer as clock
|
| 206 |
+
tot_time = 0.0
|
| 207 |
+
for case in testcases:
|
| 208 |
+
t1 = clock()
|
| 209 |
+
testcase(case)
|
| 210 |
+
t2 = clock()
|
| 211 |
+
print("Test time:", t2-t1)
|
| 212 |
+
print()
|
| 213 |
+
tot_time += (t2-t1)
|
| 214 |
+
print("Total time:", tot_time)
|
| 215 |
+
print("Errors:", errcount)
|