Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- .gitattributes +1 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.cpython-311-x86_64-linux-gnu.so +3 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/__init__.py +468 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/__init__.py +6 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/calculus.py +6 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/differentiation.py +647 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/extrapolation.py +2115 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/odes.py +288 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/optimization.py +1102 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/polynomials.py +213 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/quadrature.py +1115 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/function_docs.py +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/theta.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/elliptic.py +1431 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/expintegrals.py +425 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/rszeta.py +1403 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/signals.py +32 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/zeta.py +1154 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/__init__.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/backend.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/backend.py +115 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/gammazeta.py +2167 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpf.py +1414 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpi.py +935 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/math2.py +672 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/__init__.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/runtests.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_convert.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_diff.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_division.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_functions.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_hp.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_identify.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_interval.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_levin.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_ode.cpython-311.pyc +0 -0
- tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-311.pyc +0 -0
.gitattributes
CHANGED
|
@@ -47,3 +47,4 @@ tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Utils.cpyt
|
|
| 47 |
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/__pycache__/typing_extensions.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 48 |
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Optimize.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 49 |
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_tempita.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
| 47 |
tuning-competition-baseline/.venv/lib/python3.11/site-packages/pip/_vendor/__pycache__/typing_extensions.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 48 |
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Compiler/__pycache__/Optimize.cpython-311.pyc filter=lfs diff=lfs merge=lfs -text
|
| 49 |
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Tempita/_tempita.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
| 50 |
+
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.cpython-311-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/Cython/Plex/Transitions.cpython-311-x86_64-linux-gnu.so
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:192452f13adcd96e2be790b21fd30b593a41a9b969754499e3b19ff5f94d4786
|
| 3 |
+
size 138408
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/__init__.py
ADDED
|
@@ -0,0 +1,468 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
__version__ = '1.3.0'
|
| 2 |
+
|
| 3 |
+
from .usertools import monitor, timing
|
| 4 |
+
|
| 5 |
+
from .ctx_fp import FPContext
|
| 6 |
+
from .ctx_mp import MPContext
|
| 7 |
+
from .ctx_iv import MPIntervalContext
|
| 8 |
+
|
| 9 |
+
fp = FPContext()
|
| 10 |
+
mp = MPContext()
|
| 11 |
+
iv = MPIntervalContext()
|
| 12 |
+
|
| 13 |
+
fp._mp = mp
|
| 14 |
+
mp._mp = mp
|
| 15 |
+
iv._mp = mp
|
| 16 |
+
mp._fp = fp
|
| 17 |
+
fp._fp = fp
|
| 18 |
+
mp._iv = iv
|
| 19 |
+
fp._iv = iv
|
| 20 |
+
iv._iv = iv
|
| 21 |
+
|
| 22 |
+
# XXX: extremely bad pickle hack
|
| 23 |
+
from . import ctx_mp as _ctx_mp
|
| 24 |
+
_ctx_mp._mpf_module.mpf = mp.mpf
|
| 25 |
+
_ctx_mp._mpf_module.mpc = mp.mpc
|
| 26 |
+
|
| 27 |
+
make_mpf = mp.make_mpf
|
| 28 |
+
make_mpc = mp.make_mpc
|
| 29 |
+
|
| 30 |
+
extraprec = mp.extraprec
|
| 31 |
+
extradps = mp.extradps
|
| 32 |
+
workprec = mp.workprec
|
| 33 |
+
workdps = mp.workdps
|
| 34 |
+
autoprec = mp.autoprec
|
| 35 |
+
maxcalls = mp.maxcalls
|
| 36 |
+
memoize = mp.memoize
|
| 37 |
+
|
| 38 |
+
mag = mp.mag
|
| 39 |
+
|
| 40 |
+
bernfrac = mp.bernfrac
|
| 41 |
+
|
| 42 |
+
qfrom = mp.qfrom
|
| 43 |
+
mfrom = mp.mfrom
|
| 44 |
+
kfrom = mp.kfrom
|
| 45 |
+
taufrom = mp.taufrom
|
| 46 |
+
qbarfrom = mp.qbarfrom
|
| 47 |
+
ellipfun = mp.ellipfun
|
| 48 |
+
jtheta = mp.jtheta
|
| 49 |
+
kleinj = mp.kleinj
|
| 50 |
+
eta = mp.eta
|
| 51 |
+
|
| 52 |
+
qp = mp.qp
|
| 53 |
+
qhyper = mp.qhyper
|
| 54 |
+
qgamma = mp.qgamma
|
| 55 |
+
qfac = mp.qfac
|
| 56 |
+
|
| 57 |
+
nint_distance = mp.nint_distance
|
| 58 |
+
|
| 59 |
+
plot = mp.plot
|
| 60 |
+
cplot = mp.cplot
|
| 61 |
+
splot = mp.splot
|
| 62 |
+
|
| 63 |
+
odefun = mp.odefun
|
| 64 |
+
|
| 65 |
+
jacobian = mp.jacobian
|
| 66 |
+
findroot = mp.findroot
|
| 67 |
+
multiplicity = mp.multiplicity
|
| 68 |
+
|
| 69 |
+
isinf = mp.isinf
|
| 70 |
+
isnan = mp.isnan
|
| 71 |
+
isnormal = mp.isnormal
|
| 72 |
+
isint = mp.isint
|
| 73 |
+
isfinite = mp.isfinite
|
| 74 |
+
almosteq = mp.almosteq
|
| 75 |
+
nan = mp.nan
|
| 76 |
+
rand = mp.rand
|
| 77 |
+
|
| 78 |
+
absmin = mp.absmin
|
| 79 |
+
absmax = mp.absmax
|
| 80 |
+
|
| 81 |
+
fraction = mp.fraction
|
| 82 |
+
|
| 83 |
+
linspace = mp.linspace
|
| 84 |
+
arange = mp.arange
|
| 85 |
+
|
| 86 |
+
mpmathify = convert = mp.convert
|
| 87 |
+
mpc = mp.mpc
|
| 88 |
+
|
| 89 |
+
mpi = iv._mpi
|
| 90 |
+
|
| 91 |
+
nstr = mp.nstr
|
| 92 |
+
nprint = mp.nprint
|
| 93 |
+
chop = mp.chop
|
| 94 |
+
|
| 95 |
+
fneg = mp.fneg
|
| 96 |
+
fadd = mp.fadd
|
| 97 |
+
fsub = mp.fsub
|
| 98 |
+
fmul = mp.fmul
|
| 99 |
+
fdiv = mp.fdiv
|
| 100 |
+
fprod = mp.fprod
|
| 101 |
+
|
| 102 |
+
quad = mp.quad
|
| 103 |
+
quadgl = mp.quadgl
|
| 104 |
+
quadts = mp.quadts
|
| 105 |
+
quadosc = mp.quadosc
|
| 106 |
+
quadsubdiv = mp.quadsubdiv
|
| 107 |
+
|
| 108 |
+
invertlaplace = mp.invertlaplace
|
| 109 |
+
invlaptalbot = mp.invlaptalbot
|
| 110 |
+
invlapstehfest = mp.invlapstehfest
|
| 111 |
+
invlapdehoog = mp.invlapdehoog
|
| 112 |
+
|
| 113 |
+
pslq = mp.pslq
|
| 114 |
+
identify = mp.identify
|
| 115 |
+
findpoly = mp.findpoly
|
| 116 |
+
|
| 117 |
+
richardson = mp.richardson
|
| 118 |
+
shanks = mp.shanks
|
| 119 |
+
levin = mp.levin
|
| 120 |
+
cohen_alt = mp.cohen_alt
|
| 121 |
+
nsum = mp.nsum
|
| 122 |
+
nprod = mp.nprod
|
| 123 |
+
difference = mp.difference
|
| 124 |
+
diff = mp.diff
|
| 125 |
+
diffs = mp.diffs
|
| 126 |
+
diffs_prod = mp.diffs_prod
|
| 127 |
+
diffs_exp = mp.diffs_exp
|
| 128 |
+
diffun = mp.diffun
|
| 129 |
+
differint = mp.differint
|
| 130 |
+
taylor = mp.taylor
|
| 131 |
+
pade = mp.pade
|
| 132 |
+
polyval = mp.polyval
|
| 133 |
+
polyroots = mp.polyroots
|
| 134 |
+
fourier = mp.fourier
|
| 135 |
+
fourierval = mp.fourierval
|
| 136 |
+
sumem = mp.sumem
|
| 137 |
+
sumap = mp.sumap
|
| 138 |
+
chebyfit = mp.chebyfit
|
| 139 |
+
limit = mp.limit
|
| 140 |
+
|
| 141 |
+
matrix = mp.matrix
|
| 142 |
+
eye = mp.eye
|
| 143 |
+
diag = mp.diag
|
| 144 |
+
zeros = mp.zeros
|
| 145 |
+
ones = mp.ones
|
| 146 |
+
hilbert = mp.hilbert
|
| 147 |
+
randmatrix = mp.randmatrix
|
| 148 |
+
swap_row = mp.swap_row
|
| 149 |
+
extend = mp.extend
|
| 150 |
+
norm = mp.norm
|
| 151 |
+
mnorm = mp.mnorm
|
| 152 |
+
|
| 153 |
+
lu_solve = mp.lu_solve
|
| 154 |
+
lu = mp.lu
|
| 155 |
+
qr = mp.qr
|
| 156 |
+
unitvector = mp.unitvector
|
| 157 |
+
inverse = mp.inverse
|
| 158 |
+
residual = mp.residual
|
| 159 |
+
qr_solve = mp.qr_solve
|
| 160 |
+
cholesky = mp.cholesky
|
| 161 |
+
cholesky_solve = mp.cholesky_solve
|
| 162 |
+
det = mp.det
|
| 163 |
+
cond = mp.cond
|
| 164 |
+
hessenberg = mp.hessenberg
|
| 165 |
+
schur = mp.schur
|
| 166 |
+
eig = mp.eig
|
| 167 |
+
eig_sort = mp.eig_sort
|
| 168 |
+
eigsy = mp.eigsy
|
| 169 |
+
eighe = mp.eighe
|
| 170 |
+
eigh = mp.eigh
|
| 171 |
+
svd_r = mp.svd_r
|
| 172 |
+
svd_c = mp.svd_c
|
| 173 |
+
svd = mp.svd
|
| 174 |
+
gauss_quadrature = mp.gauss_quadrature
|
| 175 |
+
|
| 176 |
+
expm = mp.expm
|
| 177 |
+
sqrtm = mp.sqrtm
|
| 178 |
+
powm = mp.powm
|
| 179 |
+
logm = mp.logm
|
| 180 |
+
sinm = mp.sinm
|
| 181 |
+
cosm = mp.cosm
|
| 182 |
+
|
| 183 |
+
mpf = mp.mpf
|
| 184 |
+
j = mp.j
|
| 185 |
+
exp = mp.exp
|
| 186 |
+
expj = mp.expj
|
| 187 |
+
expjpi = mp.expjpi
|
| 188 |
+
ln = mp.ln
|
| 189 |
+
im = mp.im
|
| 190 |
+
re = mp.re
|
| 191 |
+
inf = mp.inf
|
| 192 |
+
ninf = mp.ninf
|
| 193 |
+
sign = mp.sign
|
| 194 |
+
|
| 195 |
+
eps = mp.eps
|
| 196 |
+
pi = mp.pi
|
| 197 |
+
ln2 = mp.ln2
|
| 198 |
+
ln10 = mp.ln10
|
| 199 |
+
phi = mp.phi
|
| 200 |
+
e = mp.e
|
| 201 |
+
euler = mp.euler
|
| 202 |
+
catalan = mp.catalan
|
| 203 |
+
khinchin = mp.khinchin
|
| 204 |
+
glaisher = mp.glaisher
|
| 205 |
+
apery = mp.apery
|
| 206 |
+
degree = mp.degree
|
| 207 |
+
twinprime = mp.twinprime
|
| 208 |
+
mertens = mp.mertens
|
| 209 |
+
|
| 210 |
+
ldexp = mp.ldexp
|
| 211 |
+
frexp = mp.frexp
|
| 212 |
+
|
| 213 |
+
fsum = mp.fsum
|
| 214 |
+
fdot = mp.fdot
|
| 215 |
+
|
| 216 |
+
sqrt = mp.sqrt
|
| 217 |
+
cbrt = mp.cbrt
|
| 218 |
+
exp = mp.exp
|
| 219 |
+
ln = mp.ln
|
| 220 |
+
log = mp.log
|
| 221 |
+
log10 = mp.log10
|
| 222 |
+
power = mp.power
|
| 223 |
+
cos = mp.cos
|
| 224 |
+
sin = mp.sin
|
| 225 |
+
tan = mp.tan
|
| 226 |
+
cosh = mp.cosh
|
| 227 |
+
sinh = mp.sinh
|
| 228 |
+
tanh = mp.tanh
|
| 229 |
+
acos = mp.acos
|
| 230 |
+
asin = mp.asin
|
| 231 |
+
atan = mp.atan
|
| 232 |
+
asinh = mp.asinh
|
| 233 |
+
acosh = mp.acosh
|
| 234 |
+
atanh = mp.atanh
|
| 235 |
+
sec = mp.sec
|
| 236 |
+
csc = mp.csc
|
| 237 |
+
cot = mp.cot
|
| 238 |
+
sech = mp.sech
|
| 239 |
+
csch = mp.csch
|
| 240 |
+
coth = mp.coth
|
| 241 |
+
asec = mp.asec
|
| 242 |
+
acsc = mp.acsc
|
| 243 |
+
acot = mp.acot
|
| 244 |
+
asech = mp.asech
|
| 245 |
+
acsch = mp.acsch
|
| 246 |
+
acoth = mp.acoth
|
| 247 |
+
cospi = mp.cospi
|
| 248 |
+
sinpi = mp.sinpi
|
| 249 |
+
sinc = mp.sinc
|
| 250 |
+
sincpi = mp.sincpi
|
| 251 |
+
cos_sin = mp.cos_sin
|
| 252 |
+
cospi_sinpi = mp.cospi_sinpi
|
| 253 |
+
fabs = mp.fabs
|
| 254 |
+
re = mp.re
|
| 255 |
+
im = mp.im
|
| 256 |
+
conj = mp.conj
|
| 257 |
+
floor = mp.floor
|
| 258 |
+
ceil = mp.ceil
|
| 259 |
+
nint = mp.nint
|
| 260 |
+
frac = mp.frac
|
| 261 |
+
root = mp.root
|
| 262 |
+
nthroot = mp.nthroot
|
| 263 |
+
hypot = mp.hypot
|
| 264 |
+
fmod = mp.fmod
|
| 265 |
+
ldexp = mp.ldexp
|
| 266 |
+
frexp = mp.frexp
|
| 267 |
+
sign = mp.sign
|
| 268 |
+
arg = mp.arg
|
| 269 |
+
phase = mp.phase
|
| 270 |
+
polar = mp.polar
|
| 271 |
+
rect = mp.rect
|
| 272 |
+
degrees = mp.degrees
|
| 273 |
+
radians = mp.radians
|
| 274 |
+
atan2 = mp.atan2
|
| 275 |
+
fib = mp.fib
|
| 276 |
+
fibonacci = mp.fibonacci
|
| 277 |
+
lambertw = mp.lambertw
|
| 278 |
+
zeta = mp.zeta
|
| 279 |
+
altzeta = mp.altzeta
|
| 280 |
+
gamma = mp.gamma
|
| 281 |
+
rgamma = mp.rgamma
|
| 282 |
+
factorial = mp.factorial
|
| 283 |
+
fac = mp.fac
|
| 284 |
+
fac2 = mp.fac2
|
| 285 |
+
beta = mp.beta
|
| 286 |
+
betainc = mp.betainc
|
| 287 |
+
psi = mp.psi
|
| 288 |
+
#psi0 = mp.psi0
|
| 289 |
+
#psi1 = mp.psi1
|
| 290 |
+
#psi2 = mp.psi2
|
| 291 |
+
#psi3 = mp.psi3
|
| 292 |
+
polygamma = mp.polygamma
|
| 293 |
+
digamma = mp.digamma
|
| 294 |
+
#trigamma = mp.trigamma
|
| 295 |
+
#tetragamma = mp.tetragamma
|
| 296 |
+
#pentagamma = mp.pentagamma
|
| 297 |
+
harmonic = mp.harmonic
|
| 298 |
+
bernoulli = mp.bernoulli
|
| 299 |
+
bernfrac = mp.bernfrac
|
| 300 |
+
stieltjes = mp.stieltjes
|
| 301 |
+
hurwitz = mp.hurwitz
|
| 302 |
+
dirichlet = mp.dirichlet
|
| 303 |
+
bernpoly = mp.bernpoly
|
| 304 |
+
eulerpoly = mp.eulerpoly
|
| 305 |
+
eulernum = mp.eulernum
|
| 306 |
+
polylog = mp.polylog
|
| 307 |
+
clsin = mp.clsin
|
| 308 |
+
clcos = mp.clcos
|
| 309 |
+
gammainc = mp.gammainc
|
| 310 |
+
gammaprod = mp.gammaprod
|
| 311 |
+
binomial = mp.binomial
|
| 312 |
+
rf = mp.rf
|
| 313 |
+
ff = mp.ff
|
| 314 |
+
hyper = mp.hyper
|
| 315 |
+
hyp0f1 = mp.hyp0f1
|
| 316 |
+
hyp1f1 = mp.hyp1f1
|
| 317 |
+
hyp1f2 = mp.hyp1f2
|
| 318 |
+
hyp2f1 = mp.hyp2f1
|
| 319 |
+
hyp2f2 = mp.hyp2f2
|
| 320 |
+
hyp2f0 = mp.hyp2f0
|
| 321 |
+
hyp2f3 = mp.hyp2f3
|
| 322 |
+
hyp3f2 = mp.hyp3f2
|
| 323 |
+
hyperu = mp.hyperu
|
| 324 |
+
hypercomb = mp.hypercomb
|
| 325 |
+
meijerg = mp.meijerg
|
| 326 |
+
appellf1 = mp.appellf1
|
| 327 |
+
appellf2 = mp.appellf2
|
| 328 |
+
appellf3 = mp.appellf3
|
| 329 |
+
appellf4 = mp.appellf4
|
| 330 |
+
hyper2d = mp.hyper2d
|
| 331 |
+
bihyper = mp.bihyper
|
| 332 |
+
erf = mp.erf
|
| 333 |
+
erfc = mp.erfc
|
| 334 |
+
erfi = mp.erfi
|
| 335 |
+
erfinv = mp.erfinv
|
| 336 |
+
npdf = mp.npdf
|
| 337 |
+
ncdf = mp.ncdf
|
| 338 |
+
expint = mp.expint
|
| 339 |
+
e1 = mp.e1
|
| 340 |
+
ei = mp.ei
|
| 341 |
+
li = mp.li
|
| 342 |
+
ci = mp.ci
|
| 343 |
+
si = mp.si
|
| 344 |
+
chi = mp.chi
|
| 345 |
+
shi = mp.shi
|
| 346 |
+
fresnels = mp.fresnels
|
| 347 |
+
fresnelc = mp.fresnelc
|
| 348 |
+
airyai = mp.airyai
|
| 349 |
+
airybi = mp.airybi
|
| 350 |
+
airyaizero = mp.airyaizero
|
| 351 |
+
airybizero = mp.airybizero
|
| 352 |
+
scorergi = mp.scorergi
|
| 353 |
+
scorerhi = mp.scorerhi
|
| 354 |
+
ellipk = mp.ellipk
|
| 355 |
+
ellipe = mp.ellipe
|
| 356 |
+
ellipf = mp.ellipf
|
| 357 |
+
ellippi = mp.ellippi
|
| 358 |
+
elliprc = mp.elliprc
|
| 359 |
+
elliprj = mp.elliprj
|
| 360 |
+
elliprf = mp.elliprf
|
| 361 |
+
elliprd = mp.elliprd
|
| 362 |
+
elliprg = mp.elliprg
|
| 363 |
+
agm = mp.agm
|
| 364 |
+
jacobi = mp.jacobi
|
| 365 |
+
chebyt = mp.chebyt
|
| 366 |
+
chebyu = mp.chebyu
|
| 367 |
+
legendre = mp.legendre
|
| 368 |
+
legenp = mp.legenp
|
| 369 |
+
legenq = mp.legenq
|
| 370 |
+
hermite = mp.hermite
|
| 371 |
+
pcfd = mp.pcfd
|
| 372 |
+
pcfu = mp.pcfu
|
| 373 |
+
pcfv = mp.pcfv
|
| 374 |
+
pcfw = mp.pcfw
|
| 375 |
+
gegenbauer = mp.gegenbauer
|
| 376 |
+
laguerre = mp.laguerre
|
| 377 |
+
spherharm = mp.spherharm
|
| 378 |
+
besselj = mp.besselj
|
| 379 |
+
j0 = mp.j0
|
| 380 |
+
j1 = mp.j1
|
| 381 |
+
besseli = mp.besseli
|
| 382 |
+
bessely = mp.bessely
|
| 383 |
+
besselk = mp.besselk
|
| 384 |
+
besseljzero = mp.besseljzero
|
| 385 |
+
besselyzero = mp.besselyzero
|
| 386 |
+
hankel1 = mp.hankel1
|
| 387 |
+
hankel2 = mp.hankel2
|
| 388 |
+
struveh = mp.struveh
|
| 389 |
+
struvel = mp.struvel
|
| 390 |
+
angerj = mp.angerj
|
| 391 |
+
webere = mp.webere
|
| 392 |
+
lommels1 = mp.lommels1
|
| 393 |
+
lommels2 = mp.lommels2
|
| 394 |
+
whitm = mp.whitm
|
| 395 |
+
whitw = mp.whitw
|
| 396 |
+
ber = mp.ber
|
| 397 |
+
bei = mp.bei
|
| 398 |
+
ker = mp.ker
|
| 399 |
+
kei = mp.kei
|
| 400 |
+
coulombc = mp.coulombc
|
| 401 |
+
coulombf = mp.coulombf
|
| 402 |
+
coulombg = mp.coulombg
|
| 403 |
+
barnesg = mp.barnesg
|
| 404 |
+
superfac = mp.superfac
|
| 405 |
+
hyperfac = mp.hyperfac
|
| 406 |
+
loggamma = mp.loggamma
|
| 407 |
+
siegeltheta = mp.siegeltheta
|
| 408 |
+
siegelz = mp.siegelz
|
| 409 |
+
grampoint = mp.grampoint
|
| 410 |
+
zetazero = mp.zetazero
|
| 411 |
+
riemannr = mp.riemannr
|
| 412 |
+
primepi = mp.primepi
|
| 413 |
+
primepi2 = mp.primepi2
|
| 414 |
+
primezeta = mp.primezeta
|
| 415 |
+
bell = mp.bell
|
| 416 |
+
polyexp = mp.polyexp
|
| 417 |
+
expm1 = mp.expm1
|
| 418 |
+
log1p = mp.log1p
|
| 419 |
+
powm1 = mp.powm1
|
| 420 |
+
unitroots = mp.unitroots
|
| 421 |
+
cyclotomic = mp.cyclotomic
|
| 422 |
+
mangoldt = mp.mangoldt
|
| 423 |
+
secondzeta = mp.secondzeta
|
| 424 |
+
nzeros = mp.nzeros
|
| 425 |
+
backlunds = mp.backlunds
|
| 426 |
+
lerchphi = mp.lerchphi
|
| 427 |
+
stirling1 = mp.stirling1
|
| 428 |
+
stirling2 = mp.stirling2
|
| 429 |
+
squarew = mp.squarew
|
| 430 |
+
trianglew = mp.trianglew
|
| 431 |
+
sawtoothw = mp.sawtoothw
|
| 432 |
+
unit_triangle = mp.unit_triangle
|
| 433 |
+
sigmoid = mp.sigmoid
|
| 434 |
+
|
| 435 |
+
# be careful when changing this name, don't use test*!
|
| 436 |
+
def runtests():
|
| 437 |
+
"""
|
| 438 |
+
Run all mpmath tests and print output.
|
| 439 |
+
"""
|
| 440 |
+
import os.path
|
| 441 |
+
from inspect import getsourcefile
|
| 442 |
+
from .tests import runtests as tests
|
| 443 |
+
testdir = os.path.dirname(os.path.abspath(getsourcefile(tests)))
|
| 444 |
+
importdir = os.path.abspath(testdir + '/../..')
|
| 445 |
+
tests.testit(importdir, testdir)
|
| 446 |
+
|
| 447 |
+
def doctests(filter=[]):
|
| 448 |
+
import sys
|
| 449 |
+
from timeit import default_timer as clock
|
| 450 |
+
for i, arg in enumerate(sys.argv):
|
| 451 |
+
if '__init__.py' in arg:
|
| 452 |
+
filter = [sn for sn in sys.argv[i+1:] if not sn.startswith("-")]
|
| 453 |
+
break
|
| 454 |
+
import doctest
|
| 455 |
+
globs = globals().copy()
|
| 456 |
+
for obj in globs: #sorted(globs.keys()):
|
| 457 |
+
if filter:
|
| 458 |
+
if not sum([pat in obj for pat in filter]):
|
| 459 |
+
continue
|
| 460 |
+
sys.stdout.write(str(obj) + " ")
|
| 461 |
+
sys.stdout.flush()
|
| 462 |
+
t1 = clock()
|
| 463 |
+
doctest.run_docstring_examples(globs[obj], {}, verbose=("-v" in sys.argv))
|
| 464 |
+
t2 = clock()
|
| 465 |
+
print(round(t2-t1, 3))
|
| 466 |
+
|
| 467 |
+
if __name__ == '__main__':
|
| 468 |
+
doctests()
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/__init__.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from . import calculus
|
| 2 |
+
# XXX: hack to set methods
|
| 3 |
+
from . import approximation
|
| 4 |
+
from . import differentiation
|
| 5 |
+
from . import extrapolation
|
| 6 |
+
from . import polynomials
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/calculus.py
ADDED
|
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class CalculusMethods(object):
|
| 2 |
+
pass
|
| 3 |
+
|
| 4 |
+
def defun(f):
|
| 5 |
+
setattr(CalculusMethods, f.__name__, f)
|
| 6 |
+
return f
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/differentiation.py
ADDED
|
@@ -0,0 +1,647 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..libmp.backend import xrange
|
| 2 |
+
from .calculus import defun
|
| 3 |
+
|
| 4 |
+
try:
|
| 5 |
+
iteritems = dict.iteritems
|
| 6 |
+
except AttributeError:
|
| 7 |
+
iteritems = dict.items
|
| 8 |
+
|
| 9 |
+
#----------------------------------------------------------------------------#
|
| 10 |
+
# Differentiation #
|
| 11 |
+
#----------------------------------------------------------------------------#
|
| 12 |
+
|
| 13 |
+
@defun
|
| 14 |
+
def difference(ctx, s, n):
|
| 15 |
+
r"""
|
| 16 |
+
Given a sequence `(s_k)` containing at least `n+1` items, returns the
|
| 17 |
+
`n`-th forward difference,
|
| 18 |
+
|
| 19 |
+
.. math ::
|
| 20 |
+
|
| 21 |
+
\Delta^n = \sum_{k=0}^{\infty} (-1)^{k+n} {n \choose k} s_k.
|
| 22 |
+
"""
|
| 23 |
+
n = int(n)
|
| 24 |
+
d = ctx.zero
|
| 25 |
+
b = (-1) ** (n & 1)
|
| 26 |
+
for k in xrange(n+1):
|
| 27 |
+
d += b * s[k]
|
| 28 |
+
b = (b * (k-n)) // (k+1)
|
| 29 |
+
return d
|
| 30 |
+
|
| 31 |
+
def hsteps(ctx, f, x, n, prec, **options):
|
| 32 |
+
singular = options.get('singular')
|
| 33 |
+
addprec = options.get('addprec', 10)
|
| 34 |
+
direction = options.get('direction', 0)
|
| 35 |
+
workprec = (prec+2*addprec) * (n+1)
|
| 36 |
+
orig = ctx.prec
|
| 37 |
+
try:
|
| 38 |
+
ctx.prec = workprec
|
| 39 |
+
h = options.get('h')
|
| 40 |
+
if h is None:
|
| 41 |
+
if options.get('relative'):
|
| 42 |
+
hextramag = int(ctx.mag(x))
|
| 43 |
+
else:
|
| 44 |
+
hextramag = 0
|
| 45 |
+
h = ctx.ldexp(1, -prec-addprec-hextramag)
|
| 46 |
+
else:
|
| 47 |
+
h = ctx.convert(h)
|
| 48 |
+
# Directed: steps x, x+h, ... x+n*h
|
| 49 |
+
direction = options.get('direction', 0)
|
| 50 |
+
if direction:
|
| 51 |
+
h *= ctx.sign(direction)
|
| 52 |
+
steps = xrange(n+1)
|
| 53 |
+
norm = h
|
| 54 |
+
# Central: steps x-n*h, x-(n-2)*h ..., x, ..., x+(n-2)*h, x+n*h
|
| 55 |
+
else:
|
| 56 |
+
steps = xrange(-n, n+1, 2)
|
| 57 |
+
norm = (2*h)
|
| 58 |
+
# Perturb
|
| 59 |
+
if singular:
|
| 60 |
+
x += 0.5*h
|
| 61 |
+
values = [f(x+k*h) for k in steps]
|
| 62 |
+
return values, norm, workprec
|
| 63 |
+
finally:
|
| 64 |
+
ctx.prec = orig
|
| 65 |
+
|
| 66 |
+
|
| 67 |
+
@defun
|
| 68 |
+
def diff(ctx, f, x, n=1, **options):
|
| 69 |
+
r"""
|
| 70 |
+
Numerically computes the derivative of `f`, `f'(x)`, or generally for
|
| 71 |
+
an integer `n \ge 0`, the `n`-th derivative `f^{(n)}(x)`.
|
| 72 |
+
A few basic examples are::
|
| 73 |
+
|
| 74 |
+
>>> from mpmath import *
|
| 75 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 76 |
+
>>> diff(lambda x: x**2 + x, 1.0)
|
| 77 |
+
3.0
|
| 78 |
+
>>> diff(lambda x: x**2 + x, 1.0, 2)
|
| 79 |
+
2.0
|
| 80 |
+
>>> diff(lambda x: x**2 + x, 1.0, 3)
|
| 81 |
+
0.0
|
| 82 |
+
>>> nprint([diff(exp, 3, n) for n in range(5)]) # exp'(x) = exp(x)
|
| 83 |
+
[20.0855, 20.0855, 20.0855, 20.0855, 20.0855]
|
| 84 |
+
|
| 85 |
+
Even more generally, given a tuple of arguments `(x_1, \ldots, x_k)`
|
| 86 |
+
and order `(n_1, \ldots, n_k)`, the partial derivative
|
| 87 |
+
`f^{(n_1,\ldots,n_k)}(x_1,\ldots,x_k)` is evaluated. For example::
|
| 88 |
+
|
| 89 |
+
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (0,1))
|
| 90 |
+
2.75
|
| 91 |
+
>>> diff(lambda x,y: 3*x*y + 2*y - x, (0.25, 0.5), (1,1))
|
| 92 |
+
3.0
|
| 93 |
+
|
| 94 |
+
**Options**
|
| 95 |
+
|
| 96 |
+
The following optional keyword arguments are recognized:
|
| 97 |
+
|
| 98 |
+
``method``
|
| 99 |
+
Supported methods are ``'step'`` or ``'quad'``: derivatives may be
|
| 100 |
+
computed using either a finite difference with a small step
|
| 101 |
+
size `h` (default), or numerical quadrature.
|
| 102 |
+
``direction``
|
| 103 |
+
Direction of finite difference: can be -1 for a left
|
| 104 |
+
difference, 0 for a central difference (default), or +1
|
| 105 |
+
for a right difference; more generally can be any complex number.
|
| 106 |
+
``addprec``
|
| 107 |
+
Extra precision for `h` used to account for the function's
|
| 108 |
+
sensitivity to perturbations (default = 10).
|
| 109 |
+
``relative``
|
| 110 |
+
Choose `h` relative to the magnitude of `x`, rather than an
|
| 111 |
+
absolute value; useful for large or tiny `x` (default = False).
|
| 112 |
+
``h``
|
| 113 |
+
As an alternative to ``addprec`` and ``relative``, manually
|
| 114 |
+
select the step size `h`.
|
| 115 |
+
``singular``
|
| 116 |
+
If True, evaluation exactly at the point `x` is avoided; this is
|
| 117 |
+
useful for differentiating functions with removable singularities.
|
| 118 |
+
Default = False.
|
| 119 |
+
``radius``
|
| 120 |
+
Radius of integration contour (with ``method = 'quad'``).
|
| 121 |
+
Default = 0.25. A larger radius typically is faster and more
|
| 122 |
+
accurate, but it must be chosen so that `f` has no
|
| 123 |
+
singularities within the radius from the evaluation point.
|
| 124 |
+
|
| 125 |
+
A finite difference requires `n+1` function evaluations and must be
|
| 126 |
+
performed at `(n+1)` times the target precision. Accordingly, `f` must
|
| 127 |
+
support fast evaluation at high precision.
|
| 128 |
+
|
| 129 |
+
With integration, a larger number of function evaluations is
|
| 130 |
+
required, but not much extra precision is required. For high order
|
| 131 |
+
derivatives, this method may thus be faster if f is very expensive to
|
| 132 |
+
evaluate at high precision.
|
| 133 |
+
|
| 134 |
+
**Further examples**
|
| 135 |
+
|
| 136 |
+
The direction option is useful for computing left- or right-sided
|
| 137 |
+
derivatives of nonsmooth functions::
|
| 138 |
+
|
| 139 |
+
>>> diff(abs, 0, direction=0)
|
| 140 |
+
0.0
|
| 141 |
+
>>> diff(abs, 0, direction=1)
|
| 142 |
+
1.0
|
| 143 |
+
>>> diff(abs, 0, direction=-1)
|
| 144 |
+
-1.0
|
| 145 |
+
|
| 146 |
+
More generally, if the direction is nonzero, a right difference
|
| 147 |
+
is computed where the step size is multiplied by sign(direction).
|
| 148 |
+
For example, with direction=+j, the derivative from the positive
|
| 149 |
+
imaginary direction will be computed::
|
| 150 |
+
|
| 151 |
+
>>> diff(abs, 0, direction=j)
|
| 152 |
+
(0.0 - 1.0j)
|
| 153 |
+
|
| 154 |
+
With integration, the result may have a small imaginary part
|
| 155 |
+
even even if the result is purely real::
|
| 156 |
+
|
| 157 |
+
>>> diff(sqrt, 1, method='quad') # doctest:+ELLIPSIS
|
| 158 |
+
(0.5 - 4.59...e-26j)
|
| 159 |
+
>>> chop(_)
|
| 160 |
+
0.5
|
| 161 |
+
|
| 162 |
+
Adding precision to obtain an accurate value::
|
| 163 |
+
|
| 164 |
+
>>> diff(cos, 1e-30)
|
| 165 |
+
0.0
|
| 166 |
+
>>> diff(cos, 1e-30, h=0.0001)
|
| 167 |
+
-9.99999998328279e-31
|
| 168 |
+
>>> diff(cos, 1e-30, addprec=100)
|
| 169 |
+
-1.0e-30
|
| 170 |
+
|
| 171 |
+
"""
|
| 172 |
+
partial = False
|
| 173 |
+
try:
|
| 174 |
+
orders = list(n)
|
| 175 |
+
x = list(x)
|
| 176 |
+
partial = True
|
| 177 |
+
except TypeError:
|
| 178 |
+
pass
|
| 179 |
+
if partial:
|
| 180 |
+
x = [ctx.convert(_) for _ in x]
|
| 181 |
+
return _partial_diff(ctx, f, x, orders, options)
|
| 182 |
+
method = options.get('method', 'step')
|
| 183 |
+
if n == 0 and method != 'quad' and not options.get('singular'):
|
| 184 |
+
return f(ctx.convert(x))
|
| 185 |
+
prec = ctx.prec
|
| 186 |
+
try:
|
| 187 |
+
if method == 'step':
|
| 188 |
+
values, norm, workprec = hsteps(ctx, f, x, n, prec, **options)
|
| 189 |
+
ctx.prec = workprec
|
| 190 |
+
v = ctx.difference(values, n) / norm**n
|
| 191 |
+
elif method == 'quad':
|
| 192 |
+
ctx.prec += 10
|
| 193 |
+
radius = ctx.convert(options.get('radius', 0.25))
|
| 194 |
+
def g(t):
|
| 195 |
+
rei = radius*ctx.expj(t)
|
| 196 |
+
z = x + rei
|
| 197 |
+
return f(z) / rei**n
|
| 198 |
+
d = ctx.quadts(g, [0, 2*ctx.pi])
|
| 199 |
+
v = d * ctx.factorial(n) / (2*ctx.pi)
|
| 200 |
+
else:
|
| 201 |
+
raise ValueError("unknown method: %r" % method)
|
| 202 |
+
finally:
|
| 203 |
+
ctx.prec = prec
|
| 204 |
+
return +v
|
| 205 |
+
|
| 206 |
+
def _partial_diff(ctx, f, xs, orders, options):
|
| 207 |
+
if not orders:
|
| 208 |
+
return f()
|
| 209 |
+
if not sum(orders):
|
| 210 |
+
return f(*xs)
|
| 211 |
+
i = 0
|
| 212 |
+
for i in range(len(orders)):
|
| 213 |
+
if orders[i]:
|
| 214 |
+
break
|
| 215 |
+
order = orders[i]
|
| 216 |
+
def fdiff_inner(*f_args):
|
| 217 |
+
def inner(t):
|
| 218 |
+
return f(*(f_args[:i] + (t,) + f_args[i+1:]))
|
| 219 |
+
return ctx.diff(inner, f_args[i], order, **options)
|
| 220 |
+
orders[i] = 0
|
| 221 |
+
return _partial_diff(ctx, fdiff_inner, xs, orders, options)
|
| 222 |
+
|
| 223 |
+
@defun
|
| 224 |
+
def diffs(ctx, f, x, n=None, **options):
|
| 225 |
+
r"""
|
| 226 |
+
Returns a generator that yields the sequence of derivatives
|
| 227 |
+
|
| 228 |
+
.. math ::
|
| 229 |
+
|
| 230 |
+
f(x), f'(x), f''(x), \ldots, f^{(k)}(x), \ldots
|
| 231 |
+
|
| 232 |
+
With ``method='step'``, :func:`~mpmath.diffs` uses only `O(k)`
|
| 233 |
+
function evaluations to generate the first `k` derivatives,
|
| 234 |
+
rather than the roughly `O(k^2)` evaluations
|
| 235 |
+
required if one calls :func:`~mpmath.diff` `k` separate times.
|
| 236 |
+
|
| 237 |
+
With `n < \infty`, the generator stops as soon as the
|
| 238 |
+
`n`-th derivative has been generated. If the exact number of
|
| 239 |
+
needed derivatives is known in advance, this is further
|
| 240 |
+
slightly more efficient.
|
| 241 |
+
|
| 242 |
+
Options are the same as for :func:`~mpmath.diff`.
|
| 243 |
+
|
| 244 |
+
**Examples**
|
| 245 |
+
|
| 246 |
+
>>> from mpmath import *
|
| 247 |
+
>>> mp.dps = 15
|
| 248 |
+
>>> nprint(list(diffs(cos, 1, 5)))
|
| 249 |
+
[0.540302, -0.841471, -0.540302, 0.841471, 0.540302, -0.841471]
|
| 250 |
+
>>> for i, d in zip(range(6), diffs(cos, 1)):
|
| 251 |
+
... print("%s %s" % (i, d))
|
| 252 |
+
...
|
| 253 |
+
0 0.54030230586814
|
| 254 |
+
1 -0.841470984807897
|
| 255 |
+
2 -0.54030230586814
|
| 256 |
+
3 0.841470984807897
|
| 257 |
+
4 0.54030230586814
|
| 258 |
+
5 -0.841470984807897
|
| 259 |
+
|
| 260 |
+
"""
|
| 261 |
+
if n is None:
|
| 262 |
+
n = ctx.inf
|
| 263 |
+
else:
|
| 264 |
+
n = int(n)
|
| 265 |
+
if options.get('method', 'step') != 'step':
|
| 266 |
+
k = 0
|
| 267 |
+
while k < n + 1:
|
| 268 |
+
yield ctx.diff(f, x, k, **options)
|
| 269 |
+
k += 1
|
| 270 |
+
return
|
| 271 |
+
singular = options.get('singular')
|
| 272 |
+
if singular:
|
| 273 |
+
yield ctx.diff(f, x, 0, singular=True)
|
| 274 |
+
else:
|
| 275 |
+
yield f(ctx.convert(x))
|
| 276 |
+
if n < 1:
|
| 277 |
+
return
|
| 278 |
+
if n == ctx.inf:
|
| 279 |
+
A, B = 1, 2
|
| 280 |
+
else:
|
| 281 |
+
A, B = 1, n+1
|
| 282 |
+
while 1:
|
| 283 |
+
callprec = ctx.prec
|
| 284 |
+
y, norm, workprec = hsteps(ctx, f, x, B, callprec, **options)
|
| 285 |
+
for k in xrange(A, B):
|
| 286 |
+
try:
|
| 287 |
+
ctx.prec = workprec
|
| 288 |
+
d = ctx.difference(y, k) / norm**k
|
| 289 |
+
finally:
|
| 290 |
+
ctx.prec = callprec
|
| 291 |
+
yield +d
|
| 292 |
+
if k >= n:
|
| 293 |
+
return
|
| 294 |
+
A, B = B, int(A*1.4+1)
|
| 295 |
+
B = min(B, n)
|
| 296 |
+
|
| 297 |
+
def iterable_to_function(gen):
|
| 298 |
+
gen = iter(gen)
|
| 299 |
+
data = []
|
| 300 |
+
def f(k):
|
| 301 |
+
for i in xrange(len(data), k+1):
|
| 302 |
+
data.append(next(gen))
|
| 303 |
+
return data[k]
|
| 304 |
+
return f
|
| 305 |
+
|
| 306 |
+
@defun
|
| 307 |
+
def diffs_prod(ctx, factors):
|
| 308 |
+
r"""
|
| 309 |
+
Given a list of `N` iterables or generators yielding
|
| 310 |
+
`f_k(x), f'_k(x), f''_k(x), \ldots` for `k = 1, \ldots, N`,
|
| 311 |
+
generate `g(x), g'(x), g''(x), \ldots` where
|
| 312 |
+
`g(x) = f_1(x) f_2(x) \cdots f_N(x)`.
|
| 313 |
+
|
| 314 |
+
At high precision and for large orders, this is typically more efficient
|
| 315 |
+
than numerical differentiation if the derivatives of each `f_k(x)`
|
| 316 |
+
admit direct computation.
|
| 317 |
+
|
| 318 |
+
Note: This function does not increase the working precision internally,
|
| 319 |
+
so guard digits may have to be added externally for full accuracy.
|
| 320 |
+
|
| 321 |
+
**Examples**
|
| 322 |
+
|
| 323 |
+
>>> from mpmath import *
|
| 324 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 325 |
+
>>> f = lambda x: exp(x)*cos(x)*sin(x)
|
| 326 |
+
>>> u = diffs(f, 1)
|
| 327 |
+
>>> v = mp.diffs_prod([diffs(exp,1), diffs(cos,1), diffs(sin,1)])
|
| 328 |
+
>>> next(u); next(v)
|
| 329 |
+
1.23586333600241
|
| 330 |
+
1.23586333600241
|
| 331 |
+
>>> next(u); next(v)
|
| 332 |
+
0.104658952245596
|
| 333 |
+
0.104658952245596
|
| 334 |
+
>>> next(u); next(v)
|
| 335 |
+
-5.96999877552086
|
| 336 |
+
-5.96999877552086
|
| 337 |
+
>>> next(u); next(v)
|
| 338 |
+
-12.4632923122697
|
| 339 |
+
-12.4632923122697
|
| 340 |
+
|
| 341 |
+
"""
|
| 342 |
+
N = len(factors)
|
| 343 |
+
if N == 1:
|
| 344 |
+
for c in factors[0]:
|
| 345 |
+
yield c
|
| 346 |
+
else:
|
| 347 |
+
u = iterable_to_function(ctx.diffs_prod(factors[:N//2]))
|
| 348 |
+
v = iterable_to_function(ctx.diffs_prod(factors[N//2:]))
|
| 349 |
+
n = 0
|
| 350 |
+
while 1:
|
| 351 |
+
#yield sum(binomial(n,k)*u(n-k)*v(k) for k in xrange(n+1))
|
| 352 |
+
s = u(n) * v(0)
|
| 353 |
+
a = 1
|
| 354 |
+
for k in xrange(1,n+1):
|
| 355 |
+
a = a * (n-k+1) // k
|
| 356 |
+
s += a * u(n-k) * v(k)
|
| 357 |
+
yield s
|
| 358 |
+
n += 1
|
| 359 |
+
|
| 360 |
+
def dpoly(n, _cache={}):
|
| 361 |
+
"""
|
| 362 |
+
nth differentiation polynomial for exp (Faa di Bruno's formula).
|
| 363 |
+
|
| 364 |
+
TODO: most exponents are zero, so maybe a sparse representation
|
| 365 |
+
would be better.
|
| 366 |
+
"""
|
| 367 |
+
if n in _cache:
|
| 368 |
+
return _cache[n]
|
| 369 |
+
if not _cache:
|
| 370 |
+
_cache[0] = {(0,):1}
|
| 371 |
+
R = dpoly(n-1)
|
| 372 |
+
R = dict((c+(0,),v) for (c,v) in iteritems(R))
|
| 373 |
+
Ra = {}
|
| 374 |
+
for powers, count in iteritems(R):
|
| 375 |
+
powers1 = (powers[0]+1,) + powers[1:]
|
| 376 |
+
if powers1 in Ra:
|
| 377 |
+
Ra[powers1] += count
|
| 378 |
+
else:
|
| 379 |
+
Ra[powers1] = count
|
| 380 |
+
for powers, count in iteritems(R):
|
| 381 |
+
if not sum(powers):
|
| 382 |
+
continue
|
| 383 |
+
for k,p in enumerate(powers):
|
| 384 |
+
if p:
|
| 385 |
+
powers2 = powers[:k] + (p-1,powers[k+1]+1) + powers[k+2:]
|
| 386 |
+
if powers2 in Ra:
|
| 387 |
+
Ra[powers2] += p*count
|
| 388 |
+
else:
|
| 389 |
+
Ra[powers2] = p*count
|
| 390 |
+
_cache[n] = Ra
|
| 391 |
+
return _cache[n]
|
| 392 |
+
|
| 393 |
+
@defun
|
| 394 |
+
def diffs_exp(ctx, fdiffs):
|
| 395 |
+
r"""
|
| 396 |
+
Given an iterable or generator yielding `f(x), f'(x), f''(x), \ldots`
|
| 397 |
+
generate `g(x), g'(x), g''(x), \ldots` where `g(x) = \exp(f(x))`.
|
| 398 |
+
|
| 399 |
+
At high precision and for large orders, this is typically more efficient
|
| 400 |
+
than numerical differentiation if the derivatives of `f(x)`
|
| 401 |
+
admit direct computation.
|
| 402 |
+
|
| 403 |
+
Note: This function does not increase the working precision internally,
|
| 404 |
+
so guard digits may have to be added externally for full accuracy.
|
| 405 |
+
|
| 406 |
+
**Examples**
|
| 407 |
+
|
| 408 |
+
The derivatives of the gamma function can be computed using
|
| 409 |
+
logarithmic differentiation::
|
| 410 |
+
|
| 411 |
+
>>> from mpmath import *
|
| 412 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 413 |
+
>>>
|
| 414 |
+
>>> def diffs_loggamma(x):
|
| 415 |
+
... yield loggamma(x)
|
| 416 |
+
... i = 0
|
| 417 |
+
... while 1:
|
| 418 |
+
... yield psi(i,x)
|
| 419 |
+
... i += 1
|
| 420 |
+
...
|
| 421 |
+
>>> u = diffs_exp(diffs_loggamma(3))
|
| 422 |
+
>>> v = diffs(gamma, 3)
|
| 423 |
+
>>> next(u); next(v)
|
| 424 |
+
2.0
|
| 425 |
+
2.0
|
| 426 |
+
>>> next(u); next(v)
|
| 427 |
+
1.84556867019693
|
| 428 |
+
1.84556867019693
|
| 429 |
+
>>> next(u); next(v)
|
| 430 |
+
2.49292999190269
|
| 431 |
+
2.49292999190269
|
| 432 |
+
>>> next(u); next(v)
|
| 433 |
+
3.44996501352367
|
| 434 |
+
3.44996501352367
|
| 435 |
+
|
| 436 |
+
"""
|
| 437 |
+
fn = iterable_to_function(fdiffs)
|
| 438 |
+
f0 = ctx.exp(fn(0))
|
| 439 |
+
yield f0
|
| 440 |
+
i = 1
|
| 441 |
+
while 1:
|
| 442 |
+
s = ctx.mpf(0)
|
| 443 |
+
for powers, c in iteritems(dpoly(i)):
|
| 444 |
+
s += c*ctx.fprod(fn(k+1)**p for (k,p) in enumerate(powers) if p)
|
| 445 |
+
yield s * f0
|
| 446 |
+
i += 1
|
| 447 |
+
|
| 448 |
+
@defun
|
| 449 |
+
def differint(ctx, f, x, n=1, x0=0):
|
| 450 |
+
r"""
|
| 451 |
+
Calculates the Riemann-Liouville differintegral, or fractional
|
| 452 |
+
derivative, defined by
|
| 453 |
+
|
| 454 |
+
.. math ::
|
| 455 |
+
|
| 456 |
+
\,_{x_0}{\mathbb{D}}^n_xf(x) = \frac{1}{\Gamma(m-n)} \frac{d^m}{dx^m}
|
| 457 |
+
\int_{x_0}^{x}(x-t)^{m-n-1}f(t)dt
|
| 458 |
+
|
| 459 |
+
where `f` is a given (presumably well-behaved) function,
|
| 460 |
+
`x` is the evaluation point, `n` is the order, and `x_0` is
|
| 461 |
+
the reference point of integration (`m` is an arbitrary
|
| 462 |
+
parameter selected automatically).
|
| 463 |
+
|
| 464 |
+
With `n = 1`, this is just the standard derivative `f'(x)`; with `n = 2`,
|
| 465 |
+
the second derivative `f''(x)`, etc. With `n = -1`, it gives
|
| 466 |
+
`\int_{x_0}^x f(t) dt`, with `n = -2`
|
| 467 |
+
it gives `\int_{x_0}^x \left( \int_{x_0}^t f(u) du \right) dt`, etc.
|
| 468 |
+
|
| 469 |
+
As `n` is permitted to be any number, this operator generalizes
|
| 470 |
+
iterated differentiation and iterated integration to a single
|
| 471 |
+
operator with a continuous order parameter.
|
| 472 |
+
|
| 473 |
+
**Examples**
|
| 474 |
+
|
| 475 |
+
There is an exact formula for the fractional derivative of a
|
| 476 |
+
monomial `x^p`, which may be used as a reference. For example,
|
| 477 |
+
the following gives a half-derivative (order 0.5)::
|
| 478 |
+
|
| 479 |
+
>>> from mpmath import *
|
| 480 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 481 |
+
>>> x = mpf(3); p = 2; n = 0.5
|
| 482 |
+
>>> differint(lambda t: t**p, x, n)
|
| 483 |
+
7.81764019044672
|
| 484 |
+
>>> gamma(p+1)/gamma(p-n+1) * x**(p-n)
|
| 485 |
+
7.81764019044672
|
| 486 |
+
|
| 487 |
+
Another useful test function is the exponential function, whose
|
| 488 |
+
integration / differentiation formula easy generalizes
|
| 489 |
+
to arbitrary order. Here we first compute a third derivative,
|
| 490 |
+
and then a triply nested integral. (The reference point `x_0`
|
| 491 |
+
is set to `-\infty` to avoid nonzero endpoint terms.)::
|
| 492 |
+
|
| 493 |
+
>>> differint(lambda x: exp(pi*x), -1.5, 3)
|
| 494 |
+
0.278538406900792
|
| 495 |
+
>>> exp(pi*-1.5) * pi**3
|
| 496 |
+
0.278538406900792
|
| 497 |
+
>>> differint(lambda x: exp(pi*x), 3.5, -3, -inf)
|
| 498 |
+
1922.50563031149
|
| 499 |
+
>>> exp(pi*3.5) / pi**3
|
| 500 |
+
1922.50563031149
|
| 501 |
+
|
| 502 |
+
However, for noninteger `n`, the differentiation formula for the
|
| 503 |
+
exponential function must be modified to give the same result as the
|
| 504 |
+
Riemann-Liouville differintegral::
|
| 505 |
+
|
| 506 |
+
>>> x = mpf(3.5)
|
| 507 |
+
>>> c = pi
|
| 508 |
+
>>> n = 1+2*j
|
| 509 |
+
>>> differint(lambda x: exp(c*x), x, n)
|
| 510 |
+
(-123295.005390743 + 140955.117867654j)
|
| 511 |
+
>>> x**(-n) * exp(c)**x * (x*c)**n * gammainc(-n, 0, x*c) / gamma(-n)
|
| 512 |
+
(-123295.005390743 + 140955.117867654j)
|
| 513 |
+
|
| 514 |
+
|
| 515 |
+
"""
|
| 516 |
+
m = max(int(ctx.ceil(ctx.re(n)))+1, 1)
|
| 517 |
+
r = m-n-1
|
| 518 |
+
g = lambda x: ctx.quad(lambda t: (x-t)**r * f(t), [x0, x])
|
| 519 |
+
return ctx.diff(g, x, m) / ctx.gamma(m-n)
|
| 520 |
+
|
| 521 |
+
@defun
|
| 522 |
+
def diffun(ctx, f, n=1, **options):
|
| 523 |
+
r"""
|
| 524 |
+
Given a function `f`, returns a function `g(x)` that evaluates the nth
|
| 525 |
+
derivative `f^{(n)}(x)`::
|
| 526 |
+
|
| 527 |
+
>>> from mpmath import *
|
| 528 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 529 |
+
>>> cos2 = diffun(sin)
|
| 530 |
+
>>> sin2 = diffun(sin, 4)
|
| 531 |
+
>>> cos(1.3), cos2(1.3)
|
| 532 |
+
(0.267498828624587, 0.267498828624587)
|
| 533 |
+
>>> sin(1.3), sin2(1.3)
|
| 534 |
+
(0.963558185417193, 0.963558185417193)
|
| 535 |
+
|
| 536 |
+
The function `f` must support arbitrary precision evaluation.
|
| 537 |
+
See :func:`~mpmath.diff` for additional details and supported
|
| 538 |
+
keyword options.
|
| 539 |
+
"""
|
| 540 |
+
if n == 0:
|
| 541 |
+
return f
|
| 542 |
+
def g(x):
|
| 543 |
+
return ctx.diff(f, x, n, **options)
|
| 544 |
+
return g
|
| 545 |
+
|
| 546 |
+
@defun
|
| 547 |
+
def taylor(ctx, f, x, n, **options):
|
| 548 |
+
r"""
|
| 549 |
+
Produces a degree-`n` Taylor polynomial around the point `x` of the
|
| 550 |
+
given function `f`. The coefficients are returned as a list.
|
| 551 |
+
|
| 552 |
+
>>> from mpmath import *
|
| 553 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 554 |
+
>>> nprint(chop(taylor(sin, 0, 5)))
|
| 555 |
+
[0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333]
|
| 556 |
+
|
| 557 |
+
The coefficients are computed using high-order numerical
|
| 558 |
+
differentiation. The function must be possible to evaluate
|
| 559 |
+
to arbitrary precision. See :func:`~mpmath.diff` for additional details
|
| 560 |
+
and supported keyword options.
|
| 561 |
+
|
| 562 |
+
Note that to evaluate the Taylor polynomial as an approximation
|
| 563 |
+
of `f`, e.g. with :func:`~mpmath.polyval`, the coefficients must be reversed,
|
| 564 |
+
and the point of the Taylor expansion must be subtracted from
|
| 565 |
+
the argument:
|
| 566 |
+
|
| 567 |
+
>>> p = taylor(exp, 2.0, 10)
|
| 568 |
+
>>> polyval(p[::-1], 2.5 - 2.0)
|
| 569 |
+
12.1824939606092
|
| 570 |
+
>>> exp(2.5)
|
| 571 |
+
12.1824939607035
|
| 572 |
+
|
| 573 |
+
"""
|
| 574 |
+
gen = enumerate(ctx.diffs(f, x, n, **options))
|
| 575 |
+
if options.get("chop", True):
|
| 576 |
+
return [ctx.chop(d)/ctx.factorial(i) for i, d in gen]
|
| 577 |
+
else:
|
| 578 |
+
return [d/ctx.factorial(i) for i, d in gen]
|
| 579 |
+
|
| 580 |
+
@defun
|
| 581 |
+
def pade(ctx, a, L, M):
|
| 582 |
+
r"""
|
| 583 |
+
Computes a Pade approximation of degree `(L, M)` to a function.
|
| 584 |
+
Given at least `L+M+1` Taylor coefficients `a` approximating
|
| 585 |
+
a function `A(x)`, :func:`~mpmath.pade` returns coefficients of
|
| 586 |
+
polynomials `P, Q` satisfying
|
| 587 |
+
|
| 588 |
+
.. math ::
|
| 589 |
+
|
| 590 |
+
P = \sum_{k=0}^L p_k x^k
|
| 591 |
+
|
| 592 |
+
Q = \sum_{k=0}^M q_k x^k
|
| 593 |
+
|
| 594 |
+
Q_0 = 1
|
| 595 |
+
|
| 596 |
+
A(x) Q(x) = P(x) + O(x^{L+M+1})
|
| 597 |
+
|
| 598 |
+
`P(x)/Q(x)` can provide a good approximation to an analytic function
|
| 599 |
+
beyond the radius of convergence of its Taylor series (example
|
| 600 |
+
from G.A. Baker 'Essentials of Pade Approximants' Academic Press,
|
| 601 |
+
Ch.1A)::
|
| 602 |
+
|
| 603 |
+
>>> from mpmath import *
|
| 604 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 605 |
+
>>> one = mpf(1)
|
| 606 |
+
>>> def f(x):
|
| 607 |
+
... return sqrt((one + 2*x)/(one + x))
|
| 608 |
+
...
|
| 609 |
+
>>> a = taylor(f, 0, 6)
|
| 610 |
+
>>> p, q = pade(a, 3, 3)
|
| 611 |
+
>>> x = 10
|
| 612 |
+
>>> polyval(p[::-1], x)/polyval(q[::-1], x)
|
| 613 |
+
1.38169105566806
|
| 614 |
+
>>> f(x)
|
| 615 |
+
1.38169855941551
|
| 616 |
+
|
| 617 |
+
"""
|
| 618 |
+
# To determine L+1 coefficients of P and M coefficients of Q
|
| 619 |
+
# L+M+1 coefficients of A must be provided
|
| 620 |
+
if len(a) < L+M+1:
|
| 621 |
+
raise ValueError("L+M+1 Coefficients should be provided")
|
| 622 |
+
|
| 623 |
+
if M == 0:
|
| 624 |
+
if L == 0:
|
| 625 |
+
return [ctx.one], [ctx.one]
|
| 626 |
+
else:
|
| 627 |
+
return a[:L+1], [ctx.one]
|
| 628 |
+
|
| 629 |
+
# Solve first
|
| 630 |
+
# a[L]*q[1] + ... + a[L-M+1]*q[M] = -a[L+1]
|
| 631 |
+
# ...
|
| 632 |
+
# a[L+M-1]*q[1] + ... + a[L]*q[M] = -a[L+M]
|
| 633 |
+
A = ctx.matrix(M)
|
| 634 |
+
for j in range(M):
|
| 635 |
+
for i in range(min(M, L+j+1)):
|
| 636 |
+
A[j, i] = a[L+j-i]
|
| 637 |
+
v = -ctx.matrix(a[(L+1):(L+M+1)])
|
| 638 |
+
x = ctx.lu_solve(A, v)
|
| 639 |
+
q = [ctx.one] + list(x)
|
| 640 |
+
# compute p
|
| 641 |
+
p = [0]*(L+1)
|
| 642 |
+
for i in range(L+1):
|
| 643 |
+
s = a[i]
|
| 644 |
+
for j in range(1, min(M,i) + 1):
|
| 645 |
+
s += q[j]*a[i-j]
|
| 646 |
+
p[i] = s
|
| 647 |
+
return p, q
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/extrapolation.py
ADDED
|
@@ -0,0 +1,2115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
try:
|
| 2 |
+
from itertools import izip
|
| 3 |
+
except ImportError:
|
| 4 |
+
izip = zip
|
| 5 |
+
|
| 6 |
+
from ..libmp.backend import xrange
|
| 7 |
+
from .calculus import defun
|
| 8 |
+
|
| 9 |
+
try:
|
| 10 |
+
next = next
|
| 11 |
+
except NameError:
|
| 12 |
+
next = lambda _: _.next()
|
| 13 |
+
|
| 14 |
+
@defun
|
| 15 |
+
def richardson(ctx, seq):
|
| 16 |
+
r"""
|
| 17 |
+
Given a list ``seq`` of the first `N` elements of a slowly convergent
|
| 18 |
+
infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
|
| 19 |
+
Richardson extrapolate for the limit.
|
| 20 |
+
|
| 21 |
+
:func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
|
| 22 |
+
limit and `c` is the magnitude of the largest weight used during the
|
| 23 |
+
computation. The weight provides an estimate of the precision
|
| 24 |
+
lost to cancellation. Due to cancellation effects, the sequence must
|
| 25 |
+
be typically be computed at a much higher precision than the target
|
| 26 |
+
accuracy of the extrapolation.
|
| 27 |
+
|
| 28 |
+
**Applicability and issues**
|
| 29 |
+
|
| 30 |
+
The `N`-step Richardson extrapolation algorithm used by
|
| 31 |
+
:func:`~mpmath.richardson` is described in [1].
|
| 32 |
+
|
| 33 |
+
Richardson extrapolation only works for a specific type of sequence,
|
| 34 |
+
namely one converging like partial sums of
|
| 35 |
+
`P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
|
| 36 |
+
When the sequence does not convergence at such a rate
|
| 37 |
+
:func:`~mpmath.richardson` generally produces garbage.
|
| 38 |
+
|
| 39 |
+
Richardson extrapolation has the advantage of being fast: the `N`-term
|
| 40 |
+
extrapolate requires only `O(N)` arithmetic operations, and usually
|
| 41 |
+
produces an estimate that is accurate to `O(N)` digits. Contrast with
|
| 42 |
+
the Shanks transformation (see :func:`~mpmath.shanks`), which requires
|
| 43 |
+
`O(N^2)` operations.
|
| 44 |
+
|
| 45 |
+
:func:`~mpmath.richardson` is unable to produce an estimate for the
|
| 46 |
+
approximation error. One way to estimate the error is to perform
|
| 47 |
+
two extrapolations with slightly different `N` and comparing the
|
| 48 |
+
results.
|
| 49 |
+
|
| 50 |
+
Richardson extrapolation does not work for oscillating sequences.
|
| 51 |
+
As a simple workaround, :func:`~mpmath.richardson` detects if the last
|
| 52 |
+
three elements do not differ monotonically, and in that case
|
| 53 |
+
applies extrapolation only to the even-index elements.
|
| 54 |
+
|
| 55 |
+
**Example**
|
| 56 |
+
|
| 57 |
+
Applying Richardson extrapolation to the Leibniz series for `\pi`::
|
| 58 |
+
|
| 59 |
+
>>> from mpmath import *
|
| 60 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 61 |
+
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
|
| 62 |
+
... for m in range(1,30)]
|
| 63 |
+
>>> v, c = richardson(S[:10])
|
| 64 |
+
>>> v
|
| 65 |
+
3.2126984126984126984126984127
|
| 66 |
+
>>> nprint([v-pi, c])
|
| 67 |
+
[0.0711058, 2.0]
|
| 68 |
+
|
| 69 |
+
>>> v, c = richardson(S[:30])
|
| 70 |
+
>>> v
|
| 71 |
+
3.14159265468624052829954206226
|
| 72 |
+
>>> nprint([v-pi, c])
|
| 73 |
+
[1.09645e-9, 20833.3]
|
| 74 |
+
|
| 75 |
+
**References**
|
| 76 |
+
|
| 77 |
+
1. [BenderOrszag]_ pp. 375-376
|
| 78 |
+
|
| 79 |
+
"""
|
| 80 |
+
if len(seq) < 3:
|
| 81 |
+
raise ValueError("seq should be of minimum length 3")
|
| 82 |
+
if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
|
| 83 |
+
seq = seq[::2]
|
| 84 |
+
N = len(seq)//2-1
|
| 85 |
+
s = ctx.zero
|
| 86 |
+
# The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
|
| 87 |
+
# To avoid repeated factorials, we simplify the quotient
|
| 88 |
+
# of successive weights to obtain a recurrence relation
|
| 89 |
+
c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
|
| 90 |
+
maxc = 1
|
| 91 |
+
for k in xrange(N+1):
|
| 92 |
+
s += c * seq[N+k]
|
| 93 |
+
maxc = max(abs(c), maxc)
|
| 94 |
+
c *= (k-N)*ctx.mpf(k+N+1)**N
|
| 95 |
+
c /= ((1+k)*ctx.mpf(k+N)**N)
|
| 96 |
+
return s, maxc
|
| 97 |
+
|
| 98 |
+
@defun
|
| 99 |
+
def shanks(ctx, seq, table=None, randomized=False):
|
| 100 |
+
r"""
|
| 101 |
+
Given a list ``seq`` of the first `N` elements of a slowly
|
| 102 |
+
convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
|
| 103 |
+
Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
|
| 104 |
+
transformation often provides strong convergence acceleration,
|
| 105 |
+
especially if the sequence is oscillating.
|
| 106 |
+
|
| 107 |
+
The iterated Shanks transformation is computed using the Wynn
|
| 108 |
+
epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
|
| 109 |
+
epsilon table generated by Wynn's algorithm, which can be read
|
| 110 |
+
off as follows:
|
| 111 |
+
|
| 112 |
+
* The table is a list of lists forming a lower triangular matrix,
|
| 113 |
+
where higher row and column indices correspond to more accurate
|
| 114 |
+
values.
|
| 115 |
+
* The columns with even index hold dummy entries (required for the
|
| 116 |
+
computation) and the columns with odd index hold the actual
|
| 117 |
+
extrapolates.
|
| 118 |
+
* The last element in the last row is typically the most
|
| 119 |
+
accurate estimate of the limit.
|
| 120 |
+
* The difference to the third last element in the last row
|
| 121 |
+
provides an estimate of the approximation error.
|
| 122 |
+
* The magnitude of the second last element provides an estimate
|
| 123 |
+
of the numerical accuracy lost to cancellation.
|
| 124 |
+
|
| 125 |
+
For convenience, so the extrapolation is stopped at an odd index
|
| 126 |
+
so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
|
| 127 |
+
limit.
|
| 128 |
+
|
| 129 |
+
Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
|
| 130 |
+
This can be used to efficiently extend a previous computation after
|
| 131 |
+
new elements have been appended to the sequence. The table will
|
| 132 |
+
then be updated in-place.
|
| 133 |
+
|
| 134 |
+
**The Shanks transformation**
|
| 135 |
+
|
| 136 |
+
The Shanks transformation is defined as follows (see [2]): given
|
| 137 |
+
the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
|
| 138 |
+
given by
|
| 139 |
+
|
| 140 |
+
.. math ::
|
| 141 |
+
|
| 142 |
+
S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
|
| 143 |
+
|
| 144 |
+
The Shanks transformation gives the exact limit `A_{\infty}` in a
|
| 145 |
+
single step if `A_k = A + a q^k`. Note in particular that it
|
| 146 |
+
extrapolates the exact sum of a geometric series in a single step.
|
| 147 |
+
|
| 148 |
+
Applying the Shanks transformation once often improves convergence
|
| 149 |
+
substantially for an arbitrary sequence, but the optimal effect is
|
| 150 |
+
obtained by applying it iteratively:
|
| 151 |
+
`S(S(A_k)), S(S(S(A_k))), \ldots`.
|
| 152 |
+
|
| 153 |
+
Wynn's epsilon algorithm provides an efficient way to generate
|
| 154 |
+
the table of iterated Shanks transformations. It reduces the
|
| 155 |
+
computation of each element to essentially a single division, at
|
| 156 |
+
the cost of requiring dummy elements in the table. See [1] for
|
| 157 |
+
details.
|
| 158 |
+
|
| 159 |
+
**Precision issues**
|
| 160 |
+
|
| 161 |
+
Due to cancellation effects, the sequence must be typically be
|
| 162 |
+
computed at a much higher precision than the target accuracy
|
| 163 |
+
of the extrapolation.
|
| 164 |
+
|
| 165 |
+
If the Shanks transformation converges to the exact limit (such
|
| 166 |
+
as if the sequence is a geometric series), then a division by
|
| 167 |
+
zero occurs. By default, :func:`~mpmath.shanks` handles this case by
|
| 168 |
+
terminating the iteration and returning the table it has
|
| 169 |
+
generated so far. With *randomized=True*, it will instead
|
| 170 |
+
replace the zero by a pseudorandom number close to zero.
|
| 171 |
+
(TODO: find a better solution to this problem.)
|
| 172 |
+
|
| 173 |
+
**Examples**
|
| 174 |
+
|
| 175 |
+
We illustrate by applying Shanks transformation to the Leibniz
|
| 176 |
+
series for `\pi`::
|
| 177 |
+
|
| 178 |
+
>>> from mpmath import *
|
| 179 |
+
>>> mp.dps = 50
|
| 180 |
+
>>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
|
| 181 |
+
... for m in range(1,30)]
|
| 182 |
+
>>>
|
| 183 |
+
>>> T = shanks(S[:7])
|
| 184 |
+
>>> for row in T:
|
| 185 |
+
... nprint(row)
|
| 186 |
+
...
|
| 187 |
+
[-0.75]
|
| 188 |
+
[1.25, 3.16667]
|
| 189 |
+
[-1.75, 3.13333, -28.75]
|
| 190 |
+
[2.25, 3.14524, 82.25, 3.14234]
|
| 191 |
+
[-2.75, 3.13968, -177.75, 3.14139, -969.937]
|
| 192 |
+
[3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
|
| 193 |
+
|
| 194 |
+
The extrapolated accuracy is about 4 digits, and about 4 digits
|
| 195 |
+
may have been lost due to cancellation::
|
| 196 |
+
|
| 197 |
+
>>> L = T[-1]
|
| 198 |
+
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
|
| 199 |
+
[2.22532e-5, 4.78309e-5, 3515.06]
|
| 200 |
+
|
| 201 |
+
Now we extend the computation::
|
| 202 |
+
|
| 203 |
+
>>> T = shanks(S[:25], T)
|
| 204 |
+
>>> L = T[-1]
|
| 205 |
+
>>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
|
| 206 |
+
[3.75527e-19, 1.48478e-19, 2.96014e+17]
|
| 207 |
+
|
| 208 |
+
The value for pi is now accurate to 18 digits. About 18 digits may
|
| 209 |
+
also have been lost to cancellation.
|
| 210 |
+
|
| 211 |
+
Here is an example with a geometric series, where the convergence
|
| 212 |
+
is immediate (the sum is exactly 1)::
|
| 213 |
+
|
| 214 |
+
>>> mp.dps = 15
|
| 215 |
+
>>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
|
| 216 |
+
... nprint(row)
|
| 217 |
+
[4.0]
|
| 218 |
+
[8.0, 1.0]
|
| 219 |
+
|
| 220 |
+
**References**
|
| 221 |
+
|
| 222 |
+
1. [GravesMorris]_
|
| 223 |
+
|
| 224 |
+
2. [BenderOrszag]_ pp. 368-375
|
| 225 |
+
|
| 226 |
+
"""
|
| 227 |
+
if len(seq) < 2:
|
| 228 |
+
raise ValueError("seq should be of minimum length 2")
|
| 229 |
+
if table:
|
| 230 |
+
START = len(table)
|
| 231 |
+
else:
|
| 232 |
+
START = 0
|
| 233 |
+
table = []
|
| 234 |
+
STOP = len(seq) - 1
|
| 235 |
+
if STOP & 1:
|
| 236 |
+
STOP -= 1
|
| 237 |
+
one = ctx.one
|
| 238 |
+
eps = +ctx.eps
|
| 239 |
+
if randomized:
|
| 240 |
+
from random import Random
|
| 241 |
+
rnd = Random()
|
| 242 |
+
rnd.seed(START)
|
| 243 |
+
for i in xrange(START, STOP):
|
| 244 |
+
row = []
|
| 245 |
+
for j in xrange(i+1):
|
| 246 |
+
if j == 0:
|
| 247 |
+
a, b = 0, seq[i+1]-seq[i]
|
| 248 |
+
else:
|
| 249 |
+
if j == 1:
|
| 250 |
+
a = seq[i]
|
| 251 |
+
else:
|
| 252 |
+
a = table[i-1][j-2]
|
| 253 |
+
b = row[j-1] - table[i-1][j-1]
|
| 254 |
+
if not b:
|
| 255 |
+
if randomized:
|
| 256 |
+
b = (1 + rnd.getrandbits(10))*eps
|
| 257 |
+
elif i & 1:
|
| 258 |
+
return table[:-1]
|
| 259 |
+
else:
|
| 260 |
+
return table
|
| 261 |
+
row.append(a + one/b)
|
| 262 |
+
table.append(row)
|
| 263 |
+
return table
|
| 264 |
+
|
| 265 |
+
|
| 266 |
+
class levin_class:
|
| 267 |
+
# levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
|
| 268 |
+
r"""
|
| 269 |
+
This interface implements Levin's (nonlinear) sequence transformation for
|
| 270 |
+
convergence acceleration and summation of divergent series. It performs
|
| 271 |
+
better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
|
| 272 |
+
or alternating divergent series.
|
| 273 |
+
|
| 274 |
+
Let *A* be the series we want to sum:
|
| 275 |
+
|
| 276 |
+
.. math ::
|
| 277 |
+
|
| 278 |
+
A = \sum_{k=0}^{\infty} a_k
|
| 279 |
+
|
| 280 |
+
Attention: all `a_k` must be non-zero!
|
| 281 |
+
|
| 282 |
+
Let `s_n` be the partial sums of this series:
|
| 283 |
+
|
| 284 |
+
.. math ::
|
| 285 |
+
|
| 286 |
+
s_n = \sum_{k=0}^n a_k.
|
| 287 |
+
|
| 288 |
+
**Methods**
|
| 289 |
+
|
| 290 |
+
Calling ``levin`` returns an object with the following methods.
|
| 291 |
+
|
| 292 |
+
``update(...)`` works with the list of individual terms `a_k` of *A*, and
|
| 293 |
+
``update_step(...)`` works with the list of partial sums `s_k` of *A*:
|
| 294 |
+
|
| 295 |
+
.. code ::
|
| 296 |
+
|
| 297 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 298 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 299 |
+
|
| 300 |
+
``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
|
| 301 |
+
works with the partial sums `s_k`:
|
| 302 |
+
|
| 303 |
+
.. code ::
|
| 304 |
+
|
| 305 |
+
v, e = ...step(a_k)
|
| 306 |
+
v, e = ...step_psum(s_k)
|
| 307 |
+
|
| 308 |
+
*v* is the current estimate for *A*, and *e* is an error estimate which is
|
| 309 |
+
simply the difference between the current estimate and the last estimate.
|
| 310 |
+
One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
|
| 311 |
+
|
| 312 |
+
**A word of caution**
|
| 313 |
+
|
| 314 |
+
One can only hope for good results (i.e. convergence acceleration or
|
| 315 |
+
resummation) if the `s_n` have some well defind asymptotic behavior for
|
| 316 |
+
large `n` and are not erratic or random. Furthermore one usually needs very
|
| 317 |
+
high working precision because of the numerical cancellation. If the working
|
| 318 |
+
precision is insufficient, levin may produce silently numerical garbage.
|
| 319 |
+
Furthermore even if the Levin-transformation converges, in the general case
|
| 320 |
+
there is no proof that the result is mathematically sound. Only for very
|
| 321 |
+
special classes of problems one can prove that the Levin-transformation
|
| 322 |
+
converges to the expected result (for example Stieltjes-type integrals).
|
| 323 |
+
Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
|
| 324 |
+
to Shanks/Wynn-epsilon, Richardson & co.
|
| 325 |
+
In summary one can say that the Levin-transformation is powerful but
|
| 326 |
+
unreliable and that it may need a copious amount of working precision.
|
| 327 |
+
|
| 328 |
+
The Levin transform has several variants differing in the choice of weights.
|
| 329 |
+
Some variants are better suited for the possible flavours of convergence
|
| 330 |
+
behaviour of *A* than other variants:
|
| 331 |
+
|
| 332 |
+
.. code ::
|
| 333 |
+
|
| 334 |
+
convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
|
| 335 |
+
|
| 336 |
+
logarithmic + - + -
|
| 337 |
+
linear + + + +
|
| 338 |
+
alternating divergent + + + +
|
| 339 |
+
|
| 340 |
+
"+" means the variant is suitable,"-" means the variant is not suitable;
|
| 341 |
+
for comparison the Shanks/Wynn-epsilon transform is listed, too.
|
| 342 |
+
|
| 343 |
+
The variant is controlled though the variant keyword (i.e. ``variant="u"``,
|
| 344 |
+
``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
|
| 345 |
+
|
| 346 |
+
Finally it is possible to use the Sidi-S transform instead of the Levin transform
|
| 347 |
+
by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
|
| 348 |
+
Levin transformation for some divergent series (see the examples).
|
| 349 |
+
|
| 350 |
+
Parameters:
|
| 351 |
+
|
| 352 |
+
.. code ::
|
| 353 |
+
|
| 354 |
+
method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
|
| 355 |
+
variant "u","t" or "v" chooses the weight variant.
|
| 356 |
+
|
| 357 |
+
The Levin transform is also accessible through the nsum interface.
|
| 358 |
+
``method="l"`` or ``method="levin"`` select the normal Levin transform while
|
| 359 |
+
``method="sidi"``
|
| 360 |
+
selects the Sidi-S transform. The variant is in both cases selected through the
|
| 361 |
+
levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
|
| 362 |
+
it will miss the point where the Levin transform converges resulting in numerical
|
| 363 |
+
overflow/garbage. For highly divergent series a copious amount of working precision
|
| 364 |
+
must be chosen.
|
| 365 |
+
|
| 366 |
+
**Examples**
|
| 367 |
+
|
| 368 |
+
First we sum the zeta function::
|
| 369 |
+
|
| 370 |
+
>>> from mpmath import mp
|
| 371 |
+
>>> mp.prec = 53
|
| 372 |
+
>>> eps = mp.mpf(mp.eps)
|
| 373 |
+
>>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
|
| 374 |
+
... L = mp.levin(method = "levin", variant = "u")
|
| 375 |
+
... S, s, n = [], 0, 1
|
| 376 |
+
... while 1:
|
| 377 |
+
... s += mp.one / (n * n)
|
| 378 |
+
... n += 1
|
| 379 |
+
... S.append(s)
|
| 380 |
+
... v, e = L.update_psum(S)
|
| 381 |
+
... if e < eps:
|
| 382 |
+
... break
|
| 383 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 384 |
+
>>> print(mp.chop(v - mp.pi ** 2 / 6))
|
| 385 |
+
0.0
|
| 386 |
+
>>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
|
| 387 |
+
>>> print(mp.chop(v - w))
|
| 388 |
+
0.0
|
| 389 |
+
|
| 390 |
+
Now we sum the zeta function outside its range of convergence
|
| 391 |
+
(attention: This does not work at the negative integers!)::
|
| 392 |
+
|
| 393 |
+
>>> eps = mp.mpf(mp.eps)
|
| 394 |
+
>>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
|
| 395 |
+
... L = mp.levin(method = "levin", variant = "v")
|
| 396 |
+
... A, n = [], 1
|
| 397 |
+
... while 1:
|
| 398 |
+
... s = mp.mpf(n) ** (2 + 3j)
|
| 399 |
+
... n += 1
|
| 400 |
+
... A.append(s)
|
| 401 |
+
... v, e = L.update(A)
|
| 402 |
+
... if e < eps:
|
| 403 |
+
... break
|
| 404 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 405 |
+
>>> print(mp.chop(v - mp.zeta(-2-3j)))
|
| 406 |
+
0.0
|
| 407 |
+
>>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
|
| 408 |
+
>>> print(mp.chop(v - w))
|
| 409 |
+
0.0
|
| 410 |
+
|
| 411 |
+
Now we sum the divergent asymptotic expansion of an integral related to the
|
| 412 |
+
exponential integral (see also [2] p.373). The Sidi-S transform works best here::
|
| 413 |
+
|
| 414 |
+
>>> z = mp.mpf(10)
|
| 415 |
+
>>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
|
| 416 |
+
>>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
|
| 417 |
+
>>> eps = mp.mpf(mp.eps)
|
| 418 |
+
>>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
|
| 419 |
+
... L = mp.levin(method = "sidi", variant = "t")
|
| 420 |
+
... n = 0
|
| 421 |
+
... while 1:
|
| 422 |
+
... s = (-1)**n * mp.fac(n) * z ** (-n)
|
| 423 |
+
... v, e = L.step(s)
|
| 424 |
+
... n += 1
|
| 425 |
+
... if e < eps:
|
| 426 |
+
... break
|
| 427 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 428 |
+
>>> print(mp.chop(v - exact))
|
| 429 |
+
0.0
|
| 430 |
+
>>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
|
| 431 |
+
>>> print(mp.chop(v - w))
|
| 432 |
+
0.0
|
| 433 |
+
|
| 434 |
+
Another highly divergent integral is also summable::
|
| 435 |
+
|
| 436 |
+
>>> z = mp.mpf(2)
|
| 437 |
+
>>> eps = mp.mpf(mp.eps)
|
| 438 |
+
>>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
|
| 439 |
+
>>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
|
| 440 |
+
>>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
|
| 441 |
+
... L = mp.levin(method = "levin", variant = "t")
|
| 442 |
+
... n, s = 0, 0
|
| 443 |
+
... while 1:
|
| 444 |
+
... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
|
| 445 |
+
... n += 1
|
| 446 |
+
... v, e = L.step_psum(s)
|
| 447 |
+
... if e < eps:
|
| 448 |
+
... break
|
| 449 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 450 |
+
>>> print(mp.chop(v - exact))
|
| 451 |
+
0.0
|
| 452 |
+
>>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
|
| 453 |
+
... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
|
| 454 |
+
>>> print(mp.chop(v - w))
|
| 455 |
+
0.0
|
| 456 |
+
|
| 457 |
+
These examples run with 15-20 decimal digits precision. For higher precision the
|
| 458 |
+
working precision must be raised.
|
| 459 |
+
|
| 460 |
+
**Examples for nsum**
|
| 461 |
+
|
| 462 |
+
Here we calculate Euler's constant as the constant term in the Laurent
|
| 463 |
+
expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
|
| 464 |
+
the logarithmic convergence behaviour of the Dirichlet series for zeta::
|
| 465 |
+
|
| 466 |
+
>>> mp.dps = 30
|
| 467 |
+
>>> z = mp.mpf(10) ** (-10)
|
| 468 |
+
>>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
|
| 469 |
+
>>> print(mp.chop(a - mp.euler, tol = 1e-10))
|
| 470 |
+
0.0
|
| 471 |
+
|
| 472 |
+
The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
|
| 473 |
+
|
| 474 |
+
>>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
|
| 475 |
+
>>> print(mp.chop(a - mp.log(2)))
|
| 476 |
+
0.0
|
| 477 |
+
|
| 478 |
+
Hypergeometric series can also be summed outside their range of convergence.
|
| 479 |
+
The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
|
| 480 |
+
point where the Levin transform converges resulting in numerical overflow/garbage::
|
| 481 |
+
|
| 482 |
+
>>> z = 2 + 1j
|
| 483 |
+
>>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
|
| 484 |
+
>>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
|
| 485 |
+
>>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
|
| 486 |
+
>>> print(mp.chop(exact-v))
|
| 487 |
+
0.0
|
| 488 |
+
|
| 489 |
+
References:
|
| 490 |
+
|
| 491 |
+
[1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
|
| 492 |
+
Convergence and the Summation of Divergent Series" arXiv:math/0306302
|
| 493 |
+
|
| 494 |
+
[2] A. Sidi - "Pratical Extrapolation Methods"
|
| 495 |
+
|
| 496 |
+
[3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
|
| 497 |
+
|
| 498 |
+
"""
|
| 499 |
+
|
| 500 |
+
def __init__(self, method = "levin", variant = "u"):
|
| 501 |
+
self.variant = variant
|
| 502 |
+
self.n = 0
|
| 503 |
+
self.a0 = 0
|
| 504 |
+
self.theta = 1
|
| 505 |
+
self.A = []
|
| 506 |
+
self.B = []
|
| 507 |
+
self.last = 0
|
| 508 |
+
self.last_s = False
|
| 509 |
+
|
| 510 |
+
if method == "levin":
|
| 511 |
+
self.factor = self.factor_levin
|
| 512 |
+
elif method == "sidi":
|
| 513 |
+
self.factor = self.factor_sidi
|
| 514 |
+
else:
|
| 515 |
+
raise ValueError("levin: unknown method \"%s\"" % method)
|
| 516 |
+
|
| 517 |
+
def factor_levin(self, i):
|
| 518 |
+
# original levin
|
| 519 |
+
# [1] p.50,e.7.5-7 (with n-j replaced by i)
|
| 520 |
+
return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
|
| 521 |
+
|
| 522 |
+
def factor_sidi(self, i):
|
| 523 |
+
# sidi analogon to levin (factorial series)
|
| 524 |
+
# [1] p.59,e.8.3-16 (with n-j replaced by i)
|
| 525 |
+
return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
|
| 526 |
+
|
| 527 |
+
def run(self, s, a0, a1 = 0):
|
| 528 |
+
if self.variant=="t":
|
| 529 |
+
# levin t
|
| 530 |
+
w=a0
|
| 531 |
+
elif self.variant=="u":
|
| 532 |
+
# levin u
|
| 533 |
+
w=a0*(self.theta+self.n)
|
| 534 |
+
elif self.variant=="v":
|
| 535 |
+
# levin v
|
| 536 |
+
w=a0*a1/(a0-a1)
|
| 537 |
+
else:
|
| 538 |
+
assert False, "unknown variant"
|
| 539 |
+
|
| 540 |
+
if w==0:
|
| 541 |
+
raise ValueError("levin: zero weight")
|
| 542 |
+
|
| 543 |
+
self.A.append(s/w)
|
| 544 |
+
self.B.append(1/w)
|
| 545 |
+
|
| 546 |
+
for i in range(self.n-1,-1,-1):
|
| 547 |
+
if i==self.n-1:
|
| 548 |
+
f=1
|
| 549 |
+
else:
|
| 550 |
+
f=self.factor(i)
|
| 551 |
+
|
| 552 |
+
self.A[i]=self.A[i+1]-f*self.A[i]
|
| 553 |
+
self.B[i]=self.B[i+1]-f*self.B[i]
|
| 554 |
+
|
| 555 |
+
self.n+=1
|
| 556 |
+
|
| 557 |
+
###########################################################################
|
| 558 |
+
|
| 559 |
+
def update_psum(self,S):
|
| 560 |
+
"""
|
| 561 |
+
This routine applies the convergence acceleration to the list of partial sums.
|
| 562 |
+
|
| 563 |
+
A = sum(a_k, k = 0..infinity)
|
| 564 |
+
s_n = sum(a_k, k = 0..n)
|
| 565 |
+
|
| 566 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 567 |
+
|
| 568 |
+
output:
|
| 569 |
+
v current estimate of the series A
|
| 570 |
+
e an error estimate which is simply the difference between the current
|
| 571 |
+
estimate and the last estimate.
|
| 572 |
+
"""
|
| 573 |
+
|
| 574 |
+
if self.variant!="v":
|
| 575 |
+
if self.n==0:
|
| 576 |
+
self.run(S[0],S[0])
|
| 577 |
+
while self.n<len(S):
|
| 578 |
+
self.run(S[self.n],S[self.n]-S[self.n-1])
|
| 579 |
+
else:
|
| 580 |
+
if len(S)==1:
|
| 581 |
+
self.last=0
|
| 582 |
+
return S[0],abs(S[0])
|
| 583 |
+
|
| 584 |
+
if self.n==0:
|
| 585 |
+
self.a1=S[1]-S[0]
|
| 586 |
+
self.run(S[0],S[0],self.a1)
|
| 587 |
+
|
| 588 |
+
while self.n<len(S)-1:
|
| 589 |
+
na1=S[self.n+1]-S[self.n]
|
| 590 |
+
self.run(S[self.n],self.a1,na1)
|
| 591 |
+
self.a1=na1
|
| 592 |
+
|
| 593 |
+
value=self.A[0]/self.B[0]
|
| 594 |
+
err=abs(value-self.last)
|
| 595 |
+
self.last=value
|
| 596 |
+
|
| 597 |
+
return value,err
|
| 598 |
+
|
| 599 |
+
def update(self,X):
|
| 600 |
+
"""
|
| 601 |
+
This routine applies the convergence acceleration to the list of individual terms.
|
| 602 |
+
|
| 603 |
+
A = sum(a_k, k = 0..infinity)
|
| 604 |
+
|
| 605 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 606 |
+
|
| 607 |
+
output:
|
| 608 |
+
v current estimate of the series A
|
| 609 |
+
e an error estimate which is simply the difference between the current
|
| 610 |
+
estimate and the last estimate.
|
| 611 |
+
"""
|
| 612 |
+
|
| 613 |
+
if self.variant!="v":
|
| 614 |
+
if self.n==0:
|
| 615 |
+
self.s=X[0]
|
| 616 |
+
self.run(self.s,X[0])
|
| 617 |
+
while self.n<len(X):
|
| 618 |
+
self.s+=X[self.n]
|
| 619 |
+
self.run(self.s,X[self.n])
|
| 620 |
+
else:
|
| 621 |
+
if len(X)==1:
|
| 622 |
+
self.last=0
|
| 623 |
+
return X[0],abs(X[0])
|
| 624 |
+
|
| 625 |
+
if self.n==0:
|
| 626 |
+
self.s=X[0]
|
| 627 |
+
self.run(self.s,X[0],X[1])
|
| 628 |
+
|
| 629 |
+
while self.n<len(X)-1:
|
| 630 |
+
self.s+=X[self.n]
|
| 631 |
+
self.run(self.s,X[self.n],X[self.n+1])
|
| 632 |
+
|
| 633 |
+
value=self.A[0]/self.B[0]
|
| 634 |
+
err=abs(value-self.last)
|
| 635 |
+
self.last=value
|
| 636 |
+
|
| 637 |
+
return value,err
|
| 638 |
+
|
| 639 |
+
###########################################################################
|
| 640 |
+
|
| 641 |
+
def step_psum(self,s):
|
| 642 |
+
"""
|
| 643 |
+
This routine applies the convergence acceleration to the partial sums.
|
| 644 |
+
|
| 645 |
+
A = sum(a_k, k = 0..infinity)
|
| 646 |
+
s_n = sum(a_k, k = 0..n)
|
| 647 |
+
|
| 648 |
+
v, e = ...step_psum(s_k)
|
| 649 |
+
|
| 650 |
+
output:
|
| 651 |
+
v current estimate of the series A
|
| 652 |
+
e an error estimate which is simply the difference between the current
|
| 653 |
+
estimate and the last estimate.
|
| 654 |
+
"""
|
| 655 |
+
|
| 656 |
+
if self.variant!="v":
|
| 657 |
+
if self.n==0:
|
| 658 |
+
self.last_s=s
|
| 659 |
+
self.run(s,s)
|
| 660 |
+
else:
|
| 661 |
+
self.run(s,s-self.last_s)
|
| 662 |
+
self.last_s=s
|
| 663 |
+
else:
|
| 664 |
+
if isinstance(self.last_s,bool):
|
| 665 |
+
self.last_s=s
|
| 666 |
+
self.last_w=s
|
| 667 |
+
self.last=0
|
| 668 |
+
return s,abs(s)
|
| 669 |
+
|
| 670 |
+
na1=s-self.last_s
|
| 671 |
+
self.run(self.last_s,self.last_w,na1)
|
| 672 |
+
self.last_w=na1
|
| 673 |
+
self.last_s=s
|
| 674 |
+
|
| 675 |
+
value=self.A[0]/self.B[0]
|
| 676 |
+
err=abs(value-self.last)
|
| 677 |
+
self.last=value
|
| 678 |
+
|
| 679 |
+
return value,err
|
| 680 |
+
|
| 681 |
+
def step(self,x):
|
| 682 |
+
"""
|
| 683 |
+
This routine applies the convergence acceleration to the individual terms.
|
| 684 |
+
|
| 685 |
+
A = sum(a_k, k = 0..infinity)
|
| 686 |
+
|
| 687 |
+
v, e = ...step(a_k)
|
| 688 |
+
|
| 689 |
+
output:
|
| 690 |
+
v current estimate of the series A
|
| 691 |
+
e an error estimate which is simply the difference between the current
|
| 692 |
+
estimate and the last estimate.
|
| 693 |
+
"""
|
| 694 |
+
|
| 695 |
+
if self.variant!="v":
|
| 696 |
+
if self.n==0:
|
| 697 |
+
self.s=x
|
| 698 |
+
self.run(self.s,x)
|
| 699 |
+
else:
|
| 700 |
+
self.s+=x
|
| 701 |
+
self.run(self.s,x)
|
| 702 |
+
else:
|
| 703 |
+
if isinstance(self.last_s,bool):
|
| 704 |
+
self.last_s=x
|
| 705 |
+
self.s=0
|
| 706 |
+
self.last=0
|
| 707 |
+
return x,abs(x)
|
| 708 |
+
|
| 709 |
+
self.s+=self.last_s
|
| 710 |
+
self.run(self.s,self.last_s,x)
|
| 711 |
+
self.last_s=x
|
| 712 |
+
|
| 713 |
+
value=self.A[0]/self.B[0]
|
| 714 |
+
err=abs(value-self.last)
|
| 715 |
+
self.last=value
|
| 716 |
+
|
| 717 |
+
return value,err
|
| 718 |
+
|
| 719 |
+
def levin(ctx, method = "levin", variant = "u"):
|
| 720 |
+
L = levin_class(method = method, variant = variant)
|
| 721 |
+
L.ctx = ctx
|
| 722 |
+
return L
|
| 723 |
+
|
| 724 |
+
levin.__doc__ = levin_class.__doc__
|
| 725 |
+
defun(levin)
|
| 726 |
+
|
| 727 |
+
|
| 728 |
+
class cohen_alt_class:
|
| 729 |
+
# cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
|
| 730 |
+
r"""
|
| 731 |
+
This interface implements the convergence acceleration of alternating series
|
| 732 |
+
as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
|
| 733 |
+
of Alternating Series". This series transformation works only well if the
|
| 734 |
+
individual terms of the series have an alternating sign. It belongs to the
|
| 735 |
+
class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
|
| 736 |
+
or Levin transform). This series transformation is also able to sum some types
|
| 737 |
+
of divergent series. See the paper under which conditions this resummation is
|
| 738 |
+
mathematical sound.
|
| 739 |
+
|
| 740 |
+
Let *A* be the series we want to sum:
|
| 741 |
+
|
| 742 |
+
.. math ::
|
| 743 |
+
|
| 744 |
+
A = \sum_{k=0}^{\infty} a_k
|
| 745 |
+
|
| 746 |
+
Let `s_n` be the partial sums of this series:
|
| 747 |
+
|
| 748 |
+
.. math ::
|
| 749 |
+
|
| 750 |
+
s_n = \sum_{k=0}^n a_k.
|
| 751 |
+
|
| 752 |
+
|
| 753 |
+
**Interface**
|
| 754 |
+
|
| 755 |
+
Calling ``cohen_alt`` returns an object with the following methods.
|
| 756 |
+
|
| 757 |
+
Then ``update(...)`` works with the list of individual terms `a_k` and
|
| 758 |
+
``update_psum(...)`` works with the list of partial sums `s_k`:
|
| 759 |
+
|
| 760 |
+
.. code ::
|
| 761 |
+
|
| 762 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 763 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 764 |
+
|
| 765 |
+
*v* is the current estimate for *A*, and *e* is an error estimate which is
|
| 766 |
+
simply the difference between the current estimate and the last estimate.
|
| 767 |
+
|
| 768 |
+
**Examples**
|
| 769 |
+
|
| 770 |
+
Here we compute the alternating zeta function using ``update_psum``::
|
| 771 |
+
|
| 772 |
+
>>> from mpmath import mp
|
| 773 |
+
>>> AC = mp.cohen_alt()
|
| 774 |
+
>>> S, s, n = [], 0, 1
|
| 775 |
+
>>> while 1:
|
| 776 |
+
... s += -((-1) ** n) * mp.one / (n * n)
|
| 777 |
+
... n += 1
|
| 778 |
+
... S.append(s)
|
| 779 |
+
... v, e = AC.update_psum(S)
|
| 780 |
+
... if e < mp.eps:
|
| 781 |
+
... break
|
| 782 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 783 |
+
>>> print(mp.chop(v - mp.pi ** 2 / 12))
|
| 784 |
+
0.0
|
| 785 |
+
|
| 786 |
+
Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
|
| 787 |
+
|
| 788 |
+
>>> A = []
|
| 789 |
+
>>> AC = mp.cohen_alt()
|
| 790 |
+
>>> n = 1
|
| 791 |
+
>>> while 1:
|
| 792 |
+
... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
|
| 793 |
+
... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
|
| 794 |
+
... n += 1
|
| 795 |
+
... v, e = AC.update(A)
|
| 796 |
+
... if e < mp.eps:
|
| 797 |
+
... break
|
| 798 |
+
... if n > 1000: raise RuntimeError("iteration limit exceeded")
|
| 799 |
+
>>> v = mp.exp(v)
|
| 800 |
+
>>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
|
| 801 |
+
0.0
|
| 802 |
+
|
| 803 |
+
``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
|
| 804 |
+
|
| 805 |
+
>>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
|
| 806 |
+
>>> print(mp.chop(v - mp.log(2)))
|
| 807 |
+
0.0
|
| 808 |
+
>>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
|
| 809 |
+
>>> print(mp.chop(v - mp.pi / 4))
|
| 810 |
+
0.0
|
| 811 |
+
>>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
|
| 812 |
+
>>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
|
| 813 |
+
0.0
|
| 814 |
+
|
| 815 |
+
"""
|
| 816 |
+
|
| 817 |
+
def __init__(self):
|
| 818 |
+
self.last=0
|
| 819 |
+
|
| 820 |
+
def update(self, A):
|
| 821 |
+
"""
|
| 822 |
+
This routine applies the convergence acceleration to the list of individual terms.
|
| 823 |
+
|
| 824 |
+
A = sum(a_k, k = 0..infinity)
|
| 825 |
+
|
| 826 |
+
v, e = ...update([a_0, a_1,..., a_k])
|
| 827 |
+
|
| 828 |
+
output:
|
| 829 |
+
v current estimate of the series A
|
| 830 |
+
e an error estimate which is simply the difference between the current
|
| 831 |
+
estimate and the last estimate.
|
| 832 |
+
"""
|
| 833 |
+
|
| 834 |
+
n = len(A)
|
| 835 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
| 836 |
+
d = (d + 1 / d) / 2
|
| 837 |
+
b = -self.ctx.one
|
| 838 |
+
c = -d
|
| 839 |
+
s = 0
|
| 840 |
+
|
| 841 |
+
for k in xrange(n):
|
| 842 |
+
c = b - c
|
| 843 |
+
if k % 2 == 0:
|
| 844 |
+
s = s + c * A[k]
|
| 845 |
+
else:
|
| 846 |
+
s = s - c * A[k]
|
| 847 |
+
b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
|
| 848 |
+
|
| 849 |
+
value = s / d
|
| 850 |
+
|
| 851 |
+
err = abs(value - self.last)
|
| 852 |
+
self.last = value
|
| 853 |
+
|
| 854 |
+
return value, err
|
| 855 |
+
|
| 856 |
+
def update_psum(self, S):
|
| 857 |
+
"""
|
| 858 |
+
This routine applies the convergence acceleration to the list of partial sums.
|
| 859 |
+
|
| 860 |
+
A = sum(a_k, k = 0..infinity)
|
| 861 |
+
s_n = sum(a_k ,k = 0..n)
|
| 862 |
+
|
| 863 |
+
v, e = ...update_psum([s_0, s_1,..., s_k])
|
| 864 |
+
|
| 865 |
+
output:
|
| 866 |
+
v current estimate of the series A
|
| 867 |
+
e an error estimate which is simply the difference between the current
|
| 868 |
+
estimate and the last estimate.
|
| 869 |
+
"""
|
| 870 |
+
|
| 871 |
+
n = len(S)
|
| 872 |
+
d = (3 + self.ctx.sqrt(8)) ** n
|
| 873 |
+
d = (d + 1 / d) / 2
|
| 874 |
+
b = self.ctx.one
|
| 875 |
+
s = 0
|
| 876 |
+
|
| 877 |
+
for k in xrange(n):
|
| 878 |
+
b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
|
| 879 |
+
s += b * S[k]
|
| 880 |
+
|
| 881 |
+
value = s / d
|
| 882 |
+
|
| 883 |
+
err = abs(value - self.last)
|
| 884 |
+
self.last = value
|
| 885 |
+
|
| 886 |
+
return value, err
|
| 887 |
+
|
| 888 |
+
def cohen_alt(ctx):
|
| 889 |
+
L = cohen_alt_class()
|
| 890 |
+
L.ctx = ctx
|
| 891 |
+
return L
|
| 892 |
+
|
| 893 |
+
cohen_alt.__doc__ = cohen_alt_class.__doc__
|
| 894 |
+
defun(cohen_alt)
|
| 895 |
+
|
| 896 |
+
|
| 897 |
+
@defun
|
| 898 |
+
def sumap(ctx, f, interval, integral=None, error=False):
|
| 899 |
+
r"""
|
| 900 |
+
Evaluates an infinite series of an analytic summand *f* using the
|
| 901 |
+
Abel-Plana formula
|
| 902 |
+
|
| 903 |
+
.. math ::
|
| 904 |
+
|
| 905 |
+
\sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
|
| 906 |
+
i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
|
| 907 |
+
|
| 908 |
+
Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
|
| 909 |
+
the Abel-Plana formula does not require derivatives. However,
|
| 910 |
+
it only works when `|f(it)-f(-it)|` does not
|
| 911 |
+
increase too rapidly with `t`.
|
| 912 |
+
|
| 913 |
+
**Examples**
|
| 914 |
+
|
| 915 |
+
The Abel-Plana formula is particularly useful when the summand
|
| 916 |
+
decreases like a power of `k`; for example when the sum is a pure
|
| 917 |
+
zeta function::
|
| 918 |
+
|
| 919 |
+
>>> from mpmath import *
|
| 920 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 921 |
+
>>> sumap(lambda k: 1/k**2.5, [1,inf])
|
| 922 |
+
1.34148725725091717975677
|
| 923 |
+
>>> zeta(2.5)
|
| 924 |
+
1.34148725725091717975677
|
| 925 |
+
>>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
|
| 926 |
+
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
|
| 927 |
+
>>> zeta(2.5+2.5j, 1+1j)
|
| 928 |
+
(-3.385361068546473342286084 - 0.7432082105196321803869551j)
|
| 929 |
+
|
| 930 |
+
If the series is alternating, numerical quadrature along the real
|
| 931 |
+
line is likely to give poor results, so it is better to evaluate
|
| 932 |
+
the first term symbolically whenever possible:
|
| 933 |
+
|
| 934 |
+
>>> n=3; z=-0.75
|
| 935 |
+
>>> I = expint(n,-log(z))
|
| 936 |
+
>>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
|
| 937 |
+
-0.6917036036904594510141448
|
| 938 |
+
>>> polylog(n,z)
|
| 939 |
+
-0.6917036036904594510141448
|
| 940 |
+
|
| 941 |
+
"""
|
| 942 |
+
prec = ctx.prec
|
| 943 |
+
try:
|
| 944 |
+
ctx.prec += 10
|
| 945 |
+
a, b = interval
|
| 946 |
+
if b != ctx.inf:
|
| 947 |
+
raise ValueError("b should be equal to ctx.inf")
|
| 948 |
+
g = lambda x: f(x+a)
|
| 949 |
+
if integral is None:
|
| 950 |
+
i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
|
| 951 |
+
else:
|
| 952 |
+
i1, err1 = integral, 0
|
| 953 |
+
j = ctx.j
|
| 954 |
+
p = ctx.pi * 2
|
| 955 |
+
if ctx._is_real_type(i1):
|
| 956 |
+
h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
|
| 957 |
+
else:
|
| 958 |
+
h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
|
| 959 |
+
i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
|
| 960 |
+
err = err1+err2
|
| 961 |
+
v = i1+i2+0.5*g(ctx.mpf(0))
|
| 962 |
+
finally:
|
| 963 |
+
ctx.prec = prec
|
| 964 |
+
if error:
|
| 965 |
+
return +v, err
|
| 966 |
+
return +v
|
| 967 |
+
|
| 968 |
+
|
| 969 |
+
@defun
|
| 970 |
+
def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
|
| 971 |
+
adiffs=None, bdiffs=None, verbose=False, error=False,
|
| 972 |
+
_fast_abort=False):
|
| 973 |
+
r"""
|
| 974 |
+
Uses the Euler-Maclaurin formula to compute an approximation accurate
|
| 975 |
+
to within ``tol`` (which defaults to the present epsilon) of the sum
|
| 976 |
+
|
| 977 |
+
.. math ::
|
| 978 |
+
|
| 979 |
+
S = \sum_{k=a}^b f(k)
|
| 980 |
+
|
| 981 |
+
where `(a,b)` are given by ``interval`` and `a` or `b` may be
|
| 982 |
+
infinite. The approximation is
|
| 983 |
+
|
| 984 |
+
.. math ::
|
| 985 |
+
|
| 986 |
+
S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
|
| 987 |
+
\sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
|
| 988 |
+
\left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
|
| 989 |
+
|
| 990 |
+
The last sum in the Euler-Maclaurin formula is not generally
|
| 991 |
+
convergent (a notable exception is if `f` is a polynomial, in
|
| 992 |
+
which case Euler-Maclaurin actually gives an exact result).
|
| 993 |
+
|
| 994 |
+
The summation is stopped as soon as the quotient between two
|
| 995 |
+
consecutive terms falls below *reject*. That is, by default
|
| 996 |
+
(*reject* = 10), the summation is continued as long as each
|
| 997 |
+
term adds at least one decimal.
|
| 998 |
+
|
| 999 |
+
Although not convergent, convergence to a given tolerance can
|
| 1000 |
+
often be "forced" if `b = \infty` by summing up to `a+N` and then
|
| 1001 |
+
applying the Euler-Maclaurin formula to the sum over the range
|
| 1002 |
+
`(a+N+1, \ldots, \infty)`. This procedure is implemented by
|
| 1003 |
+
:func:`~mpmath.nsum`.
|
| 1004 |
+
|
| 1005 |
+
By default numerical quadrature and differentiation is used.
|
| 1006 |
+
If the symbolic values of the integral and endpoint derivatives
|
| 1007 |
+
are known, it is more efficient to pass the value of the
|
| 1008 |
+
integral explicitly as ``integral`` and the derivatives
|
| 1009 |
+
explicitly as ``adiffs`` and ``bdiffs``. The derivatives
|
| 1010 |
+
should be given as iterables that yield
|
| 1011 |
+
`f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
|
| 1012 |
+
|
| 1013 |
+
**Examples**
|
| 1014 |
+
|
| 1015 |
+
Summation of an infinite series, with automatic and symbolic
|
| 1016 |
+
integral and derivative values (the second should be much faster)::
|
| 1017 |
+
|
| 1018 |
+
>>> from mpmath import *
|
| 1019 |
+
>>> mp.dps = 50; mp.pretty = True
|
| 1020 |
+
>>> sumem(lambda n: 1/n**2, [32, inf])
|
| 1021 |
+
0.03174336652030209012658168043874142714132886413417
|
| 1022 |
+
>>> I = mpf(1)/32
|
| 1023 |
+
>>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
|
| 1024 |
+
>>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
|
| 1025 |
+
0.03174336652030209012658168043874142714132886413417
|
| 1026 |
+
|
| 1027 |
+
An exact evaluation of a finite polynomial sum::
|
| 1028 |
+
|
| 1029 |
+
>>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
|
| 1030 |
+
10500155000624963999742499550000.0
|
| 1031 |
+
>>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
|
| 1032 |
+
10500155000624963999742499550000
|
| 1033 |
+
|
| 1034 |
+
"""
|
| 1035 |
+
tol = tol or +ctx.eps
|
| 1036 |
+
interval = ctx._as_points(interval)
|
| 1037 |
+
a = ctx.convert(interval[0])
|
| 1038 |
+
b = ctx.convert(interval[-1])
|
| 1039 |
+
err = ctx.zero
|
| 1040 |
+
prev = 0
|
| 1041 |
+
M = 10000
|
| 1042 |
+
if a == ctx.ninf: adiffs = (0 for n in xrange(M))
|
| 1043 |
+
else: adiffs = adiffs or ctx.diffs(f, a)
|
| 1044 |
+
if b == ctx.inf: bdiffs = (0 for n in xrange(M))
|
| 1045 |
+
else: bdiffs = bdiffs or ctx.diffs(f, b)
|
| 1046 |
+
orig = ctx.prec
|
| 1047 |
+
#verbose = 1
|
| 1048 |
+
try:
|
| 1049 |
+
ctx.prec += 10
|
| 1050 |
+
s = ctx.zero
|
| 1051 |
+
for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
|
| 1052 |
+
if k & 1:
|
| 1053 |
+
term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
|
| 1054 |
+
mag = abs(term)
|
| 1055 |
+
if verbose:
|
| 1056 |
+
print("term", k, "magnitude =", ctx.nstr(mag))
|
| 1057 |
+
if k > 4 and mag < tol:
|
| 1058 |
+
s += term
|
| 1059 |
+
break
|
| 1060 |
+
elif k > 4 and abs(prev) / mag < reject:
|
| 1061 |
+
err += mag
|
| 1062 |
+
if _fast_abort:
|
| 1063 |
+
return [s, (s, err)][error]
|
| 1064 |
+
if verbose:
|
| 1065 |
+
print("Failed to converge")
|
| 1066 |
+
break
|
| 1067 |
+
else:
|
| 1068 |
+
s += term
|
| 1069 |
+
prev = term
|
| 1070 |
+
# Endpoint correction
|
| 1071 |
+
if a != ctx.ninf: s += f(a)/2
|
| 1072 |
+
if b != ctx.inf: s += f(b)/2
|
| 1073 |
+
# Tail integral
|
| 1074 |
+
if verbose:
|
| 1075 |
+
print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
|
| 1076 |
+
if integral:
|
| 1077 |
+
s += integral
|
| 1078 |
+
else:
|
| 1079 |
+
integral, ierr = ctx.quad(f, interval, error=True)
|
| 1080 |
+
if verbose:
|
| 1081 |
+
print("Integration error:", ierr)
|
| 1082 |
+
s += integral
|
| 1083 |
+
err += ierr
|
| 1084 |
+
finally:
|
| 1085 |
+
ctx.prec = orig
|
| 1086 |
+
if error:
|
| 1087 |
+
return s, err
|
| 1088 |
+
else:
|
| 1089 |
+
return s
|
| 1090 |
+
|
| 1091 |
+
@defun
|
| 1092 |
+
def adaptive_extrapolation(ctx, update, emfun, kwargs):
|
| 1093 |
+
option = kwargs.get
|
| 1094 |
+
if ctx._fixed_precision:
|
| 1095 |
+
tol = option('tol', ctx.eps*2**10)
|
| 1096 |
+
else:
|
| 1097 |
+
tol = option('tol', ctx.eps/2**10)
|
| 1098 |
+
verbose = option('verbose', False)
|
| 1099 |
+
maxterms = option('maxterms', ctx.dps*10)
|
| 1100 |
+
method = set(option('method', 'r+s').split('+'))
|
| 1101 |
+
skip = option('skip', 0)
|
| 1102 |
+
steps = iter(option('steps', xrange(10, 10**9, 10)))
|
| 1103 |
+
strict = option('strict')
|
| 1104 |
+
#steps = (10 for i in xrange(1000))
|
| 1105 |
+
summer=[]
|
| 1106 |
+
if 'd' in method or 'direct' in method:
|
| 1107 |
+
TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
|
| 1108 |
+
else:
|
| 1109 |
+
TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
|
| 1110 |
+
TRY_SHANKS = ('s' in method) or ('shanks' in method)
|
| 1111 |
+
TRY_EULER_MACLAURIN = ('e' in method) or \
|
| 1112 |
+
('euler-maclaurin' in method)
|
| 1113 |
+
|
| 1114 |
+
def init_levin(m):
|
| 1115 |
+
variant = kwargs.get("levin_variant", "u")
|
| 1116 |
+
if isinstance(variant, str):
|
| 1117 |
+
if variant == "all":
|
| 1118 |
+
variant = ["u", "v", "t"]
|
| 1119 |
+
else:
|
| 1120 |
+
variant = [variant]
|
| 1121 |
+
for s in variant:
|
| 1122 |
+
L = levin_class(method = m, variant = s)
|
| 1123 |
+
L.ctx = ctx
|
| 1124 |
+
L.name = m + "(" + s + ")"
|
| 1125 |
+
summer.append(L)
|
| 1126 |
+
|
| 1127 |
+
if ('l' in method) or ('levin' in method):
|
| 1128 |
+
init_levin("levin")
|
| 1129 |
+
|
| 1130 |
+
if ('sidi' in method):
|
| 1131 |
+
init_levin("sidi")
|
| 1132 |
+
|
| 1133 |
+
if ('a' in method) or ('alternating' in method):
|
| 1134 |
+
L = cohen_alt_class()
|
| 1135 |
+
L.ctx = ctx
|
| 1136 |
+
L.name = "alternating"
|
| 1137 |
+
summer.append(L)
|
| 1138 |
+
|
| 1139 |
+
last_richardson_value = 0
|
| 1140 |
+
shanks_table = []
|
| 1141 |
+
index = 0
|
| 1142 |
+
step = 10
|
| 1143 |
+
partial = []
|
| 1144 |
+
best = ctx.zero
|
| 1145 |
+
orig = ctx.prec
|
| 1146 |
+
try:
|
| 1147 |
+
if 'workprec' in kwargs:
|
| 1148 |
+
ctx.prec = kwargs['workprec']
|
| 1149 |
+
elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
|
| 1150 |
+
ctx.prec = (ctx.prec+10) * 4
|
| 1151 |
+
else:
|
| 1152 |
+
ctx.prec += 30
|
| 1153 |
+
while 1:
|
| 1154 |
+
if index >= maxterms:
|
| 1155 |
+
break
|
| 1156 |
+
|
| 1157 |
+
# Get new batch of terms
|
| 1158 |
+
try:
|
| 1159 |
+
step = next(steps)
|
| 1160 |
+
except StopIteration:
|
| 1161 |
+
pass
|
| 1162 |
+
if verbose:
|
| 1163 |
+
print("-"*70)
|
| 1164 |
+
print("Adding terms #%i-#%i" % (index, index+step))
|
| 1165 |
+
update(partial, xrange(index, index+step))
|
| 1166 |
+
index += step
|
| 1167 |
+
|
| 1168 |
+
# Check direct error
|
| 1169 |
+
best = partial[-1]
|
| 1170 |
+
error = abs(best - partial[-2])
|
| 1171 |
+
if verbose:
|
| 1172 |
+
print("Direct error: %s" % ctx.nstr(error))
|
| 1173 |
+
if error <= tol:
|
| 1174 |
+
return best
|
| 1175 |
+
|
| 1176 |
+
# Check each extrapolation method
|
| 1177 |
+
if TRY_RICHARDSON:
|
| 1178 |
+
value, maxc = ctx.richardson(partial)
|
| 1179 |
+
# Convergence
|
| 1180 |
+
richardson_error = abs(value - last_richardson_value)
|
| 1181 |
+
if verbose:
|
| 1182 |
+
print("Richardson error: %s" % ctx.nstr(richardson_error))
|
| 1183 |
+
# Convergence
|
| 1184 |
+
if richardson_error <= tol:
|
| 1185 |
+
return value
|
| 1186 |
+
last_richardson_value = value
|
| 1187 |
+
# Unreliable due to cancellation
|
| 1188 |
+
if ctx.eps*maxc > tol:
|
| 1189 |
+
if verbose:
|
| 1190 |
+
print("Ran out of precision for Richardson")
|
| 1191 |
+
TRY_RICHARDSON = False
|
| 1192 |
+
if richardson_error < error:
|
| 1193 |
+
error = richardson_error
|
| 1194 |
+
best = value
|
| 1195 |
+
if TRY_SHANKS:
|
| 1196 |
+
shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
|
| 1197 |
+
row = shanks_table[-1]
|
| 1198 |
+
if len(row) == 2:
|
| 1199 |
+
est1 = row[-1]
|
| 1200 |
+
shanks_error = 0
|
| 1201 |
+
else:
|
| 1202 |
+
est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
|
| 1203 |
+
shanks_error = abs(est1-est2)
|
| 1204 |
+
if verbose:
|
| 1205 |
+
print("Shanks error: %s" % ctx.nstr(shanks_error))
|
| 1206 |
+
if shanks_error <= tol:
|
| 1207 |
+
return est1
|
| 1208 |
+
if ctx.eps*maxc > tol:
|
| 1209 |
+
if verbose:
|
| 1210 |
+
print("Ran out of precision for Shanks")
|
| 1211 |
+
TRY_SHANKS = False
|
| 1212 |
+
if shanks_error < error:
|
| 1213 |
+
error = shanks_error
|
| 1214 |
+
best = est1
|
| 1215 |
+
for L in summer:
|
| 1216 |
+
est, lerror = L.update_psum(partial)
|
| 1217 |
+
if verbose:
|
| 1218 |
+
print("%s error: %s" % (L.name, ctx.nstr(lerror)))
|
| 1219 |
+
if lerror <= tol:
|
| 1220 |
+
return est
|
| 1221 |
+
if lerror < error:
|
| 1222 |
+
error = lerror
|
| 1223 |
+
best = est
|
| 1224 |
+
if TRY_EULER_MACLAURIN:
|
| 1225 |
+
if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1):
|
| 1226 |
+
if verbose:
|
| 1227 |
+
print ("NOT using Euler-Maclaurin: the series appears"
|
| 1228 |
+
" to be alternating, so numerical\n quadrature"
|
| 1229 |
+
" will most likely fail")
|
| 1230 |
+
TRY_EULER_MACLAURIN = False
|
| 1231 |
+
else:
|
| 1232 |
+
value, em_error = emfun(index, tol)
|
| 1233 |
+
value += partial[-1]
|
| 1234 |
+
if verbose:
|
| 1235 |
+
print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
|
| 1236 |
+
if em_error <= tol:
|
| 1237 |
+
return value
|
| 1238 |
+
if em_error < error:
|
| 1239 |
+
best = value
|
| 1240 |
+
finally:
|
| 1241 |
+
ctx.prec = orig
|
| 1242 |
+
if strict:
|
| 1243 |
+
raise ctx.NoConvergence
|
| 1244 |
+
if verbose:
|
| 1245 |
+
print("Warning: failed to converge to target accuracy")
|
| 1246 |
+
return best
|
| 1247 |
+
|
| 1248 |
+
@defun
|
| 1249 |
+
def nsum(ctx, f, *intervals, **options):
|
| 1250 |
+
r"""
|
| 1251 |
+
Computes the sum
|
| 1252 |
+
|
| 1253 |
+
.. math :: S = \sum_{k=a}^b f(k)
|
| 1254 |
+
|
| 1255 |
+
where `(a, b)` = *interval*, and where `a = -\infty` and/or
|
| 1256 |
+
`b = \infty` are allowed, or more generally
|
| 1257 |
+
|
| 1258 |
+
.. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
|
| 1259 |
+
\sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
|
| 1260 |
+
|
| 1261 |
+
if multiple intervals are given.
|
| 1262 |
+
|
| 1263 |
+
Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
|
| 1264 |
+
where the first converges rapidly and the second converges slowly,
|
| 1265 |
+
are::
|
| 1266 |
+
|
| 1267 |
+
>>> from mpmath import *
|
| 1268 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 1269 |
+
>>> nsum(lambda n: 1/fac(n), [0, inf])
|
| 1270 |
+
2.71828182845905
|
| 1271 |
+
>>> nsum(lambda n: 1/n**2, [1, inf])
|
| 1272 |
+
1.64493406684823
|
| 1273 |
+
|
| 1274 |
+
When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
|
| 1275 |
+
accurately estimate the sums of slowly convergent series. If the series is
|
| 1276 |
+
finite, :func:`~mpmath.nsum` currently does not attempt to perform any
|
| 1277 |
+
extrapolation, and simply calls :func:`~mpmath.fsum`.
|
| 1278 |
+
|
| 1279 |
+
Multidimensional infinite series are reduced to a single-dimensional
|
| 1280 |
+
series over expanding hypercubes; if both infinite and finite dimensions
|
| 1281 |
+
are present, the finite ranges are moved innermost. For more advanced
|
| 1282 |
+
control over the summation order, use nested calls to :func:`~mpmath.nsum`,
|
| 1283 |
+
or manually rewrite the sum as a single-dimensional series.
|
| 1284 |
+
|
| 1285 |
+
**Options**
|
| 1286 |
+
|
| 1287 |
+
*tol*
|
| 1288 |
+
Desired maximum final error. Defaults roughly to the
|
| 1289 |
+
epsilon of the working precision.
|
| 1290 |
+
|
| 1291 |
+
*method*
|
| 1292 |
+
Which summation algorithm to use (described below).
|
| 1293 |
+
Default: ``'richardson+shanks'``.
|
| 1294 |
+
|
| 1295 |
+
*maxterms*
|
| 1296 |
+
Cancel after at most this many terms. Default: 10*dps.
|
| 1297 |
+
|
| 1298 |
+
*steps*
|
| 1299 |
+
An iterable giving the number of terms to add between
|
| 1300 |
+
each extrapolation attempt. The default sequence is
|
| 1301 |
+
[10, 20, 30, 40, ...]. For example, if you know that
|
| 1302 |
+
approximately 100 terms will be required, efficiency might be
|
| 1303 |
+
improved by setting this to [100, 10]. Then the first
|
| 1304 |
+
extrapolation will be performed after 100 terms, the second
|
| 1305 |
+
after 110, etc.
|
| 1306 |
+
|
| 1307 |
+
*verbose*
|
| 1308 |
+
Print details about progress.
|
| 1309 |
+
|
| 1310 |
+
*ignore*
|
| 1311 |
+
If enabled, any term that raises ``ArithmeticError``
|
| 1312 |
+
or ``ValueError`` (e.g. through division by zero) is replaced
|
| 1313 |
+
by a zero. This is convenient for lattice sums with
|
| 1314 |
+
a singular term near the origin.
|
| 1315 |
+
|
| 1316 |
+
**Methods**
|
| 1317 |
+
|
| 1318 |
+
Unfortunately, an algorithm that can efficiently sum any infinite
|
| 1319 |
+
series does not exist. :func:`~mpmath.nsum` implements several different
|
| 1320 |
+
algorithms that each work well in different cases. The *method*
|
| 1321 |
+
keyword argument selects a method.
|
| 1322 |
+
|
| 1323 |
+
The default method is ``'r+s'``, i.e. both Richardson extrapolation
|
| 1324 |
+
and Shanks transformation is attempted. A slower method that
|
| 1325 |
+
handles more cases is ``'r+s+e'``. For very high precision
|
| 1326 |
+
summation, or if the summation needs to be fast (for example if
|
| 1327 |
+
multiple sums need to be evaluated), it is a good idea to
|
| 1328 |
+
investigate which one method works best and only use that.
|
| 1329 |
+
|
| 1330 |
+
``'richardson'`` / ``'r'``:
|
| 1331 |
+
Uses Richardson extrapolation. Provides useful extrapolation
|
| 1332 |
+
when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
|
| 1333 |
+
for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
|
| 1334 |
+
additional information.
|
| 1335 |
+
|
| 1336 |
+
``'shanks'`` / ``'s'``:
|
| 1337 |
+
Uses Shanks transformation. Typically provides useful
|
| 1338 |
+
extrapolation when `f(k) \sim c^k` or when successive terms
|
| 1339 |
+
alternate signs. Is able to sum some divergent series.
|
| 1340 |
+
See :func:`~mpmath.shanks` for additional information.
|
| 1341 |
+
|
| 1342 |
+
``'levin'`` / ``'l'``:
|
| 1343 |
+
Uses the Levin transformation. It performs better than the Shanks
|
| 1344 |
+
transformation for logarithmic convergent or alternating divergent
|
| 1345 |
+
series. The ``'levin_variant'``-keyword selects the variant. Valid
|
| 1346 |
+
choices are "u", "t", "v" and "all" whereby "all" uses all three
|
| 1347 |
+
u,t and v simultanously (This is good for performance comparison in
|
| 1348 |
+
conjunction with "verbose=True"). Instead of the Levin transform one can
|
| 1349 |
+
also use the Sidi-S transform by selecting the method ``'sidi'``.
|
| 1350 |
+
See :func:`~mpmath.levin` for additional details.
|
| 1351 |
+
|
| 1352 |
+
``'alternating'`` / ``'a'``:
|
| 1353 |
+
This is the convergence acceleration of alternating series developped
|
| 1354 |
+
by Cohen, Villegras and Zagier.
|
| 1355 |
+
See :func:`~mpmath.cohen_alt` for additional details.
|
| 1356 |
+
|
| 1357 |
+
``'euler-maclaurin'`` / ``'e'``:
|
| 1358 |
+
Uses the Euler-Maclaurin summation formula to approximate
|
| 1359 |
+
the remainder sum by an integral. This requires high-order
|
| 1360 |
+
numerical derivatives and numerical integration. The advantage
|
| 1361 |
+
of this algorithm is that it works regardless of the
|
| 1362 |
+
decay rate of `f`, as long as `f` is sufficiently smooth.
|
| 1363 |
+
See :func:`~mpmath.sumem` for additional information.
|
| 1364 |
+
|
| 1365 |
+
``'direct'`` / ``'d'``:
|
| 1366 |
+
Does not perform any extrapolation. This can be used
|
| 1367 |
+
(and should only be used for) rapidly convergent series.
|
| 1368 |
+
The summation automatically stops when the terms
|
| 1369 |
+
decrease below the target tolerance.
|
| 1370 |
+
|
| 1371 |
+
**Basic examples**
|
| 1372 |
+
|
| 1373 |
+
A finite sum::
|
| 1374 |
+
|
| 1375 |
+
>>> nsum(lambda k: 1/k, [1, 6])
|
| 1376 |
+
2.45
|
| 1377 |
+
|
| 1378 |
+
Summation of a series going to negative infinity and a doubly
|
| 1379 |
+
infinite series::
|
| 1380 |
+
|
| 1381 |
+
>>> nsum(lambda k: 1/k**2, [-inf, -1])
|
| 1382 |
+
1.64493406684823
|
| 1383 |
+
>>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
|
| 1384 |
+
3.15334809493716
|
| 1385 |
+
|
| 1386 |
+
:func:`~mpmath.nsum` handles sums of complex numbers::
|
| 1387 |
+
|
| 1388 |
+
>>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
|
| 1389 |
+
(1.6 + 0.8j)
|
| 1390 |
+
|
| 1391 |
+
The following sum converges very rapidly, so it is most
|
| 1392 |
+
efficient to sum it by disabling convergence acceleration::
|
| 1393 |
+
|
| 1394 |
+
>>> mp.dps = 1000
|
| 1395 |
+
>>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
|
| 1396 |
+
... method='direct')
|
| 1397 |
+
>>> b = (cos(1)+sin(1))/4
|
| 1398 |
+
>>> abs(a-b) < mpf('1e-998')
|
| 1399 |
+
True
|
| 1400 |
+
|
| 1401 |
+
**Examples with Richardson extrapolation**
|
| 1402 |
+
|
| 1403 |
+
Richardson extrapolation works well for sums over rational
|
| 1404 |
+
functions, as well as their alternating counterparts::
|
| 1405 |
+
|
| 1406 |
+
>>> mp.dps = 50
|
| 1407 |
+
>>> nsum(lambda k: 1 / k**3, [1, inf],
|
| 1408 |
+
... method='richardson')
|
| 1409 |
+
1.2020569031595942853997381615114499907649862923405
|
| 1410 |
+
>>> zeta(3)
|
| 1411 |
+
1.2020569031595942853997381615114499907649862923405
|
| 1412 |
+
|
| 1413 |
+
>>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
|
| 1414 |
+
... method='richardson')
|
| 1415 |
+
2.9348022005446793094172454999380755676568497036204
|
| 1416 |
+
>>> pi**2/2-2
|
| 1417 |
+
2.9348022005446793094172454999380755676568497036204
|
| 1418 |
+
|
| 1419 |
+
>>> nsum(lambda k: (-1)**k / k**3, [1, inf],
|
| 1420 |
+
... method='richardson')
|
| 1421 |
+
-0.90154267736969571404980362113358749307373971925537
|
| 1422 |
+
>>> -3*zeta(3)/4
|
| 1423 |
+
-0.90154267736969571404980362113358749307373971925538
|
| 1424 |
+
|
| 1425 |
+
**Examples with Shanks transformation**
|
| 1426 |
+
|
| 1427 |
+
The Shanks transformation works well for geometric series
|
| 1428 |
+
and typically provides excellent acceleration for Taylor
|
| 1429 |
+
series near the border of their disk of convergence.
|
| 1430 |
+
Here we apply it to a series for `\log(2)`, which can be
|
| 1431 |
+
seen as the Taylor series for `\log(1+x)` with `x = 1`::
|
| 1432 |
+
|
| 1433 |
+
>>> nsum(lambda k: -(-1)**k/k, [1, inf],
|
| 1434 |
+
... method='shanks')
|
| 1435 |
+
0.69314718055994530941723212145817656807550013436025
|
| 1436 |
+
>>> log(2)
|
| 1437 |
+
0.69314718055994530941723212145817656807550013436025
|
| 1438 |
+
|
| 1439 |
+
Here we apply it to a slowly convergent geometric series::
|
| 1440 |
+
|
| 1441 |
+
>>> nsum(lambda k: mpf('0.995')**k, [0, inf],
|
| 1442 |
+
... method='shanks')
|
| 1443 |
+
200.0
|
| 1444 |
+
|
| 1445 |
+
Finally, Shanks' method works very well for alternating series
|
| 1446 |
+
where `f(k) = (-1)^k g(k)`, and often does so regardless of
|
| 1447 |
+
the exact decay rate of `g(k)`::
|
| 1448 |
+
|
| 1449 |
+
>>> mp.dps = 15
|
| 1450 |
+
>>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
|
| 1451 |
+
... method='shanks')
|
| 1452 |
+
0.765147024625408
|
| 1453 |
+
>>> (2-sqrt(2))*zeta(1.5)/2
|
| 1454 |
+
0.765147024625408
|
| 1455 |
+
|
| 1456 |
+
The following slowly convergent alternating series has no known
|
| 1457 |
+
closed-form value. Evaluating the sum a second time at higher
|
| 1458 |
+
precision indicates that the value is probably correct::
|
| 1459 |
+
|
| 1460 |
+
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
|
| 1461 |
+
... method='shanks')
|
| 1462 |
+
0.924299897222939
|
| 1463 |
+
>>> mp.dps = 30
|
| 1464 |
+
>>> nsum(lambda k: (-1)**k / log(k), [2, inf],
|
| 1465 |
+
... method='shanks')
|
| 1466 |
+
0.92429989722293885595957018136
|
| 1467 |
+
|
| 1468 |
+
**Examples with Levin transformation**
|
| 1469 |
+
|
| 1470 |
+
The following example calculates Euler's constant as the constant term in
|
| 1471 |
+
the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
|
| 1472 |
+
because of the logarithmic convergence behaviour of the Dirichlet series
|
| 1473 |
+
for zeta.
|
| 1474 |
+
|
| 1475 |
+
>>> mp.dps = 30
|
| 1476 |
+
>>> z = mp.mpf(10) ** (-10)
|
| 1477 |
+
>>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
|
| 1478 |
+
>>> print(mp.chop(a - mp.euler, tol = 1e-10))
|
| 1479 |
+
0.0
|
| 1480 |
+
|
| 1481 |
+
Now we sum the zeta function outside its range of convergence
|
| 1482 |
+
(attention: This does not work at the negative integers!):
|
| 1483 |
+
|
| 1484 |
+
>>> mp.dps = 15
|
| 1485 |
+
>>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
|
| 1486 |
+
>>> print(mp.chop(w - mp.zeta(-2-3j)))
|
| 1487 |
+
0.0
|
| 1488 |
+
|
| 1489 |
+
The next example resummates an asymptotic series expansion of an integral
|
| 1490 |
+
related to the exponential integral.
|
| 1491 |
+
|
| 1492 |
+
>>> mp.dps = 15
|
| 1493 |
+
>>> z = mp.mpf(10)
|
| 1494 |
+
>>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
|
| 1495 |
+
>>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
|
| 1496 |
+
>>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
|
| 1497 |
+
>>> print(mp.chop(w - exact))
|
| 1498 |
+
0.0
|
| 1499 |
+
|
| 1500 |
+
Following highly divergent asymptotic expansion needs some care. Firstly we
|
| 1501 |
+
need copious amount of working precision. Secondly the stepsize must not be
|
| 1502 |
+
chosen to large, otherwise nsum may miss the point where the Levin transform
|
| 1503 |
+
converges and reach the point where only numerical garbage is produced due to
|
| 1504 |
+
numerical cancellation.
|
| 1505 |
+
|
| 1506 |
+
>>> mp.dps = 15
|
| 1507 |
+
>>> z = mp.mpf(2)
|
| 1508 |
+
>>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
|
| 1509 |
+
>>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
|
| 1510 |
+
>>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
|
| 1511 |
+
... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
|
| 1512 |
+
>>> print(mp.chop(w - exact))
|
| 1513 |
+
0.0
|
| 1514 |
+
|
| 1515 |
+
The hypergeoemtric function can also be summed outside its range of convergence:
|
| 1516 |
+
|
| 1517 |
+
>>> mp.dps = 15
|
| 1518 |
+
>>> z = 2 + 1j
|
| 1519 |
+
>>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
|
| 1520 |
+
>>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
|
| 1521 |
+
>>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
|
| 1522 |
+
>>> print(mp.chop(exact-v))
|
| 1523 |
+
0.0
|
| 1524 |
+
|
| 1525 |
+
**Examples with Cohen's alternating series resummation**
|
| 1526 |
+
|
| 1527 |
+
The next example sums the alternating zeta function:
|
| 1528 |
+
|
| 1529 |
+
>>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
|
| 1530 |
+
>>> print(mp.chop(v - mp.log(2)))
|
| 1531 |
+
0.0
|
| 1532 |
+
|
| 1533 |
+
The derivate of the alternating zeta function outside its range of
|
| 1534 |
+
convergence:
|
| 1535 |
+
|
| 1536 |
+
>>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
|
| 1537 |
+
>>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
|
| 1538 |
+
0.0
|
| 1539 |
+
|
| 1540 |
+
**Examples with Euler-Maclaurin summation**
|
| 1541 |
+
|
| 1542 |
+
The sum in the following example has the wrong rate of convergence
|
| 1543 |
+
for either Richardson or Shanks to be effective.
|
| 1544 |
+
|
| 1545 |
+
>>> f = lambda k: log(k)/k**2.5
|
| 1546 |
+
>>> mp.dps = 15
|
| 1547 |
+
>>> nsum(f, [1, inf], method='euler-maclaurin')
|
| 1548 |
+
0.38734195032621
|
| 1549 |
+
>>> -diff(zeta, 2.5)
|
| 1550 |
+
0.38734195032621
|
| 1551 |
+
|
| 1552 |
+
Increasing ``steps`` improves speed at higher precision::
|
| 1553 |
+
|
| 1554 |
+
>>> mp.dps = 50
|
| 1555 |
+
>>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
|
| 1556 |
+
0.38734195032620997271199237593105101319948228874688
|
| 1557 |
+
>>> -diff(zeta, 2.5)
|
| 1558 |
+
0.38734195032620997271199237593105101319948228874688
|
| 1559 |
+
|
| 1560 |
+
**Divergent series**
|
| 1561 |
+
|
| 1562 |
+
The Shanks transformation is able to sum some *divergent*
|
| 1563 |
+
series. In particular, it is often able to sum Taylor series
|
| 1564 |
+
beyond their radius of convergence (this is due to a relation
|
| 1565 |
+
between the Shanks transformation and Pade approximations;
|
| 1566 |
+
see :func:`~mpmath.pade` for an alternative way to evaluate divergent
|
| 1567 |
+
Taylor series). Furthermore the Levin-transform examples above
|
| 1568 |
+
contain some divergent series resummation.
|
| 1569 |
+
|
| 1570 |
+
Here we apply it to `\log(1+x)` far outside the region of
|
| 1571 |
+
convergence::
|
| 1572 |
+
|
| 1573 |
+
>>> mp.dps = 50
|
| 1574 |
+
>>> nsum(lambda k: -(-9)**k/k, [1, inf],
|
| 1575 |
+
... method='shanks')
|
| 1576 |
+
2.3025850929940456840179914546843642076011014886288
|
| 1577 |
+
>>> log(10)
|
| 1578 |
+
2.3025850929940456840179914546843642076011014886288
|
| 1579 |
+
|
| 1580 |
+
A particular type of divergent series that can be summed
|
| 1581 |
+
using the Shanks transformation is geometric series.
|
| 1582 |
+
The result is the same as using the closed-form formula
|
| 1583 |
+
for an infinite geometric series::
|
| 1584 |
+
|
| 1585 |
+
>>> mp.dps = 15
|
| 1586 |
+
>>> for n in range(-8, 8):
|
| 1587 |
+
... if n == 1:
|
| 1588 |
+
... continue
|
| 1589 |
+
... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
|
| 1590 |
+
... nsum(lambda k: n**k, [0, inf], method='shanks')))
|
| 1591 |
+
...
|
| 1592 |
+
-8.0 0.111111111111111 0.111111111111111
|
| 1593 |
+
-7.0 0.125 0.125
|
| 1594 |
+
-6.0 0.142857142857143 0.142857142857143
|
| 1595 |
+
-5.0 0.166666666666667 0.166666666666667
|
| 1596 |
+
-4.0 0.2 0.2
|
| 1597 |
+
-3.0 0.25 0.25
|
| 1598 |
+
-2.0 0.333333333333333 0.333333333333333
|
| 1599 |
+
-1.0 0.5 0.5
|
| 1600 |
+
0.0 1.0 1.0
|
| 1601 |
+
2.0 -1.0 -1.0
|
| 1602 |
+
3.0 -0.5 -0.5
|
| 1603 |
+
4.0 -0.333333333333333 -0.333333333333333
|
| 1604 |
+
5.0 -0.25 -0.25
|
| 1605 |
+
6.0 -0.2 -0.2
|
| 1606 |
+
7.0 -0.166666666666667 -0.166666666666667
|
| 1607 |
+
|
| 1608 |
+
**Multidimensional sums**
|
| 1609 |
+
|
| 1610 |
+
Any combination of finite and infinite ranges is allowed for the
|
| 1611 |
+
summation indices::
|
| 1612 |
+
|
| 1613 |
+
>>> mp.dps = 15
|
| 1614 |
+
>>> nsum(lambda x,y: x+y, [2,3], [4,5])
|
| 1615 |
+
28.0
|
| 1616 |
+
>>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
|
| 1617 |
+
6.0
|
| 1618 |
+
>>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
|
| 1619 |
+
6.0
|
| 1620 |
+
>>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
|
| 1621 |
+
7.0
|
| 1622 |
+
>>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
|
| 1623 |
+
7.0
|
| 1624 |
+
>>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
|
| 1625 |
+
7.0
|
| 1626 |
+
|
| 1627 |
+
Some nice examples of double series with analytic solutions or
|
| 1628 |
+
reductions to single-dimensional series (see [1])::
|
| 1629 |
+
|
| 1630 |
+
>>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
|
| 1631 |
+
1.60669515241529
|
| 1632 |
+
>>> nsum(lambda n: 1/(2**n-1), [1,inf])
|
| 1633 |
+
1.60669515241529
|
| 1634 |
+
|
| 1635 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
|
| 1636 |
+
0.278070510848213
|
| 1637 |
+
>>> pi*(pi-3*ln2)/12
|
| 1638 |
+
0.278070510848213
|
| 1639 |
+
|
| 1640 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
|
| 1641 |
+
0.129319852864168
|
| 1642 |
+
>>> altzeta(2) - altzeta(1)
|
| 1643 |
+
0.129319852864168
|
| 1644 |
+
|
| 1645 |
+
>>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
|
| 1646 |
+
0.0790756439455825
|
| 1647 |
+
>>> altzeta(3) - altzeta(2)
|
| 1648 |
+
0.0790756439455825
|
| 1649 |
+
|
| 1650 |
+
>>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
|
| 1651 |
+
... [1,inf], [1,inf])
|
| 1652 |
+
0.28125
|
| 1653 |
+
>>> mpf(9)/32
|
| 1654 |
+
0.28125
|
| 1655 |
+
|
| 1656 |
+
>>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
|
| 1657 |
+
... [1,inf], [1,inf], workprec=400)
|
| 1658 |
+
1.64493406684823
|
| 1659 |
+
>>> zeta(2)
|
| 1660 |
+
1.64493406684823
|
| 1661 |
+
|
| 1662 |
+
A hard example of a multidimensional sum is the Madelung constant
|
| 1663 |
+
in three dimensions (see [2]). The defining sum converges very
|
| 1664 |
+
slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
|
| 1665 |
+
obtain an accurate value through convergence acceleration. The
|
| 1666 |
+
second evaluation below uses a much more efficient, rapidly
|
| 1667 |
+
convergent 2D sum::
|
| 1668 |
+
|
| 1669 |
+
>>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
|
| 1670 |
+
... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
|
| 1671 |
+
-1.74756459463318
|
| 1672 |
+
>>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
|
| 1673 |
+
... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
|
| 1674 |
+
-1.74756459463318
|
| 1675 |
+
|
| 1676 |
+
Another example of a lattice sum in 2D::
|
| 1677 |
+
|
| 1678 |
+
>>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
|
| 1679 |
+
... [-inf,inf], ignore=True)
|
| 1680 |
+
-2.1775860903036
|
| 1681 |
+
>>> -pi*ln2
|
| 1682 |
+
-2.1775860903036
|
| 1683 |
+
|
| 1684 |
+
An example of an Eisenstein series::
|
| 1685 |
+
|
| 1686 |
+
>>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
|
| 1687 |
+
... ignore=True)
|
| 1688 |
+
(3.1512120021539 + 0.0j)
|
| 1689 |
+
|
| 1690 |
+
**References**
|
| 1691 |
+
|
| 1692 |
+
1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
|
| 1693 |
+
2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
|
| 1694 |
+
|
| 1695 |
+
"""
|
| 1696 |
+
infinite, g = standardize(ctx, f, intervals, options)
|
| 1697 |
+
if not infinite:
|
| 1698 |
+
return +g()
|
| 1699 |
+
|
| 1700 |
+
def update(partial_sums, indices):
|
| 1701 |
+
if partial_sums:
|
| 1702 |
+
psum = partial_sums[-1]
|
| 1703 |
+
else:
|
| 1704 |
+
psum = ctx.zero
|
| 1705 |
+
for k in indices:
|
| 1706 |
+
psum = psum + g(ctx.mpf(k))
|
| 1707 |
+
partial_sums.append(psum)
|
| 1708 |
+
|
| 1709 |
+
prec = ctx.prec
|
| 1710 |
+
|
| 1711 |
+
def emfun(point, tol):
|
| 1712 |
+
workprec = ctx.prec
|
| 1713 |
+
ctx.prec = prec + 10
|
| 1714 |
+
v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
|
| 1715 |
+
ctx.prec = workprec
|
| 1716 |
+
return v
|
| 1717 |
+
|
| 1718 |
+
return +ctx.adaptive_extrapolation(update, emfun, options)
|
| 1719 |
+
|
| 1720 |
+
|
| 1721 |
+
def wrapsafe(f):
|
| 1722 |
+
def g(*args):
|
| 1723 |
+
try:
|
| 1724 |
+
return f(*args)
|
| 1725 |
+
except (ArithmeticError, ValueError):
|
| 1726 |
+
return 0
|
| 1727 |
+
return g
|
| 1728 |
+
|
| 1729 |
+
def standardize(ctx, f, intervals, options):
|
| 1730 |
+
if options.get("ignore"):
|
| 1731 |
+
f = wrapsafe(f)
|
| 1732 |
+
finite = []
|
| 1733 |
+
infinite = []
|
| 1734 |
+
for k, points in enumerate(intervals):
|
| 1735 |
+
a, b = ctx._as_points(points)
|
| 1736 |
+
if b < a:
|
| 1737 |
+
return False, (lambda: ctx.zero)
|
| 1738 |
+
if a == ctx.ninf or b == ctx.inf:
|
| 1739 |
+
infinite.append((k, (a,b)))
|
| 1740 |
+
else:
|
| 1741 |
+
finite.append((k, (int(a), int(b))))
|
| 1742 |
+
if finite:
|
| 1743 |
+
f = fold_finite(ctx, f, finite)
|
| 1744 |
+
if not infinite:
|
| 1745 |
+
return False, lambda: f(*([0]*len(intervals)))
|
| 1746 |
+
if infinite:
|
| 1747 |
+
f = standardize_infinite(ctx, f, infinite)
|
| 1748 |
+
f = fold_infinite(ctx, f, infinite)
|
| 1749 |
+
args = [0] * len(intervals)
|
| 1750 |
+
d = infinite[0][0]
|
| 1751 |
+
def g(k):
|
| 1752 |
+
args[d] = k
|
| 1753 |
+
return f(*args)
|
| 1754 |
+
return True, g
|
| 1755 |
+
|
| 1756 |
+
# backwards compatible itertools.product
|
| 1757 |
+
def cartesian_product(args):
|
| 1758 |
+
pools = map(tuple, args)
|
| 1759 |
+
result = [[]]
|
| 1760 |
+
for pool in pools:
|
| 1761 |
+
result = [x+[y] for x in result for y in pool]
|
| 1762 |
+
for prod in result:
|
| 1763 |
+
yield tuple(prod)
|
| 1764 |
+
|
| 1765 |
+
def fold_finite(ctx, f, intervals):
|
| 1766 |
+
if not intervals:
|
| 1767 |
+
return f
|
| 1768 |
+
indices = [v[0] for v in intervals]
|
| 1769 |
+
points = [v[1] for v in intervals]
|
| 1770 |
+
ranges = [xrange(a, b+1) for (a,b) in points]
|
| 1771 |
+
def g(*args):
|
| 1772 |
+
args = list(args)
|
| 1773 |
+
s = ctx.zero
|
| 1774 |
+
for xs in cartesian_product(ranges):
|
| 1775 |
+
for dim, x in zip(indices, xs):
|
| 1776 |
+
args[dim] = ctx.mpf(x)
|
| 1777 |
+
s += f(*args)
|
| 1778 |
+
return s
|
| 1779 |
+
#print "Folded finite", indices
|
| 1780 |
+
return g
|
| 1781 |
+
|
| 1782 |
+
# Standardize each interval to [0,inf]
|
| 1783 |
+
def standardize_infinite(ctx, f, intervals):
|
| 1784 |
+
if not intervals:
|
| 1785 |
+
return f
|
| 1786 |
+
dim, [a,b] = intervals[-1]
|
| 1787 |
+
if a == ctx.ninf:
|
| 1788 |
+
if b == ctx.inf:
|
| 1789 |
+
def g(*args):
|
| 1790 |
+
args = list(args)
|
| 1791 |
+
k = args[dim]
|
| 1792 |
+
if k:
|
| 1793 |
+
s = f(*args)
|
| 1794 |
+
args[dim] = -k
|
| 1795 |
+
s += f(*args)
|
| 1796 |
+
return s
|
| 1797 |
+
else:
|
| 1798 |
+
return f(*args)
|
| 1799 |
+
else:
|
| 1800 |
+
def g(*args):
|
| 1801 |
+
args = list(args)
|
| 1802 |
+
args[dim] = b - args[dim]
|
| 1803 |
+
return f(*args)
|
| 1804 |
+
else:
|
| 1805 |
+
def g(*args):
|
| 1806 |
+
args = list(args)
|
| 1807 |
+
args[dim] += a
|
| 1808 |
+
return f(*args)
|
| 1809 |
+
#print "Standardized infinity along dimension", dim, a, b
|
| 1810 |
+
return standardize_infinite(ctx, g, intervals[:-1])
|
| 1811 |
+
|
| 1812 |
+
def fold_infinite(ctx, f, intervals):
|
| 1813 |
+
if len(intervals) < 2:
|
| 1814 |
+
return f
|
| 1815 |
+
dim1 = intervals[-2][0]
|
| 1816 |
+
dim2 = intervals[-1][0]
|
| 1817 |
+
# Assume intervals are [0,inf] x [0,inf] x ...
|
| 1818 |
+
def g(*args):
|
| 1819 |
+
args = list(args)
|
| 1820 |
+
#args.insert(dim2, None)
|
| 1821 |
+
n = int(args[dim1])
|
| 1822 |
+
s = ctx.zero
|
| 1823 |
+
#y = ctx.mpf(n)
|
| 1824 |
+
args[dim2] = ctx.mpf(n) #y
|
| 1825 |
+
for x in xrange(n+1):
|
| 1826 |
+
args[dim1] = ctx.mpf(x)
|
| 1827 |
+
s += f(*args)
|
| 1828 |
+
args[dim1] = ctx.mpf(n) #ctx.mpf(n)
|
| 1829 |
+
for y in xrange(n):
|
| 1830 |
+
args[dim2] = ctx.mpf(y)
|
| 1831 |
+
s += f(*args)
|
| 1832 |
+
return s
|
| 1833 |
+
#print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
|
| 1834 |
+
return fold_infinite(ctx, g, intervals[:-1])
|
| 1835 |
+
|
| 1836 |
+
@defun
|
| 1837 |
+
def nprod(ctx, f, interval, nsum=False, **kwargs):
|
| 1838 |
+
r"""
|
| 1839 |
+
Computes the product
|
| 1840 |
+
|
| 1841 |
+
.. math ::
|
| 1842 |
+
|
| 1843 |
+
P = \prod_{k=a}^b f(k)
|
| 1844 |
+
|
| 1845 |
+
where `(a, b)` = *interval*, and where `a = -\infty` and/or
|
| 1846 |
+
`b = \infty` are allowed.
|
| 1847 |
+
|
| 1848 |
+
By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
|
| 1849 |
+
:func:`~mpmath.nsum`, except applied to the partial products rather than
|
| 1850 |
+
partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
|
| 1851 |
+
supported. If ``nsum=True``, the product is instead computed via
|
| 1852 |
+
:func:`~mpmath.nsum` as
|
| 1853 |
+
|
| 1854 |
+
.. math ::
|
| 1855 |
+
|
| 1856 |
+
P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
|
| 1857 |
+
|
| 1858 |
+
This is slower, but can sometimes yield better results. It is
|
| 1859 |
+
also required (and used automatically) when Euler-Maclaurin
|
| 1860 |
+
summation is requested.
|
| 1861 |
+
|
| 1862 |
+
**Examples**
|
| 1863 |
+
|
| 1864 |
+
A simple finite product::
|
| 1865 |
+
|
| 1866 |
+
>>> from mpmath import *
|
| 1867 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1868 |
+
>>> nprod(lambda k: k, [1, 4])
|
| 1869 |
+
24.0
|
| 1870 |
+
|
| 1871 |
+
A large number of infinite products have known exact values,
|
| 1872 |
+
and can therefore be used as a reference. Most of the following
|
| 1873 |
+
examples are taken from MathWorld [1].
|
| 1874 |
+
|
| 1875 |
+
A few infinite products with simple values are::
|
| 1876 |
+
|
| 1877 |
+
>>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
|
| 1878 |
+
3.141592653589793238462643
|
| 1879 |
+
>>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
|
| 1880 |
+
2.0
|
| 1881 |
+
>>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
|
| 1882 |
+
0.6666666666666666666666667
|
| 1883 |
+
>>> nprod(lambda k: (1-1/k**2), [2, inf])
|
| 1884 |
+
0.5
|
| 1885 |
+
|
| 1886 |
+
Next, several more infinite products with more complicated
|
| 1887 |
+
values::
|
| 1888 |
+
|
| 1889 |
+
>>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
|
| 1890 |
+
5.180668317897115748416626
|
| 1891 |
+
5.180668317897115748416626
|
| 1892 |
+
|
| 1893 |
+
>>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
|
| 1894 |
+
0.2720290549821331629502366
|
| 1895 |
+
0.2720290549821331629502366
|
| 1896 |
+
|
| 1897 |
+
>>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
|
| 1898 |
+
0.8480540493529003921296502
|
| 1899 |
+
>>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
|
| 1900 |
+
0.8480540493529003921296502
|
| 1901 |
+
|
| 1902 |
+
>>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
|
| 1903 |
+
1.848936182858244485224927
|
| 1904 |
+
>>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
|
| 1905 |
+
1.848936182858244485224927
|
| 1906 |
+
|
| 1907 |
+
>>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
|
| 1908 |
+
0.9190194775937444301739244
|
| 1909 |
+
0.9190194775937444301739244
|
| 1910 |
+
|
| 1911 |
+
>>> nprod(lambda k: (1-1/k**6), [2, inf])
|
| 1912 |
+
0.9826842777421925183244759
|
| 1913 |
+
>>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
|
| 1914 |
+
0.9826842777421925183244759
|
| 1915 |
+
|
| 1916 |
+
>>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
|
| 1917 |
+
1.838038955187488860347849
|
| 1918 |
+
1.838038955187488860347849
|
| 1919 |
+
|
| 1920 |
+
>>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
|
| 1921 |
+
1.447255926890365298959138
|
| 1922 |
+
>>> exp(1+euler/2)/sqrt(2*pi)
|
| 1923 |
+
1.447255926890365298959138
|
| 1924 |
+
|
| 1925 |
+
The following two products are equivalent and can be evaluated in
|
| 1926 |
+
terms of a Jacobi theta function. Pi can be replaced by any value
|
| 1927 |
+
(as long as convergence is preserved)::
|
| 1928 |
+
|
| 1929 |
+
>>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
|
| 1930 |
+
0.3838451207481672404778686
|
| 1931 |
+
>>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
|
| 1932 |
+
0.3838451207481672404778686
|
| 1933 |
+
>>> jtheta(4,0,1/pi)
|
| 1934 |
+
0.3838451207481672404778686
|
| 1935 |
+
|
| 1936 |
+
This product does not have a known closed form value::
|
| 1937 |
+
|
| 1938 |
+
>>> nprod(lambda k: (1-1/2**k), [1, inf])
|
| 1939 |
+
0.2887880950866024212788997
|
| 1940 |
+
|
| 1941 |
+
A product taken from `-\infty`::
|
| 1942 |
+
|
| 1943 |
+
>>> nprod(lambda k: 1-k**(-3), [-inf,-2])
|
| 1944 |
+
0.8093965973662901095786805
|
| 1945 |
+
>>> cosh(pi*sqrt(3)/2)/(3*pi)
|
| 1946 |
+
0.8093965973662901095786805
|
| 1947 |
+
|
| 1948 |
+
A doubly infinite product::
|
| 1949 |
+
|
| 1950 |
+
>>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
|
| 1951 |
+
23.41432688231864337420035
|
| 1952 |
+
>>> exp(pi/tanh(pi))
|
| 1953 |
+
23.41432688231864337420035
|
| 1954 |
+
|
| 1955 |
+
A product requiring the use of Euler-Maclaurin summation to compute
|
| 1956 |
+
an accurate value::
|
| 1957 |
+
|
| 1958 |
+
>>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
|
| 1959 |
+
0.696155111336231052898125
|
| 1960 |
+
|
| 1961 |
+
**References**
|
| 1962 |
+
|
| 1963 |
+
1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
|
| 1964 |
+
|
| 1965 |
+
"""
|
| 1966 |
+
if nsum or ('e' in kwargs.get('method', '')):
|
| 1967 |
+
orig = ctx.prec
|
| 1968 |
+
try:
|
| 1969 |
+
# TODO: we are evaluating log(1+eps) -> eps, which is
|
| 1970 |
+
# inaccurate. This currently works because nsum greatly
|
| 1971 |
+
# increases the working precision. But we should be
|
| 1972 |
+
# more intelligent and handle the precision here.
|
| 1973 |
+
ctx.prec += 10
|
| 1974 |
+
v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
|
| 1975 |
+
finally:
|
| 1976 |
+
ctx.prec = orig
|
| 1977 |
+
return +ctx.exp(v)
|
| 1978 |
+
|
| 1979 |
+
a, b = ctx._as_points(interval)
|
| 1980 |
+
if a == ctx.ninf:
|
| 1981 |
+
if b == ctx.inf:
|
| 1982 |
+
return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
|
| 1983 |
+
return ctx.nprod(f, [-b, ctx.inf], **kwargs)
|
| 1984 |
+
elif b != ctx.inf:
|
| 1985 |
+
return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
|
| 1986 |
+
|
| 1987 |
+
a = int(a)
|
| 1988 |
+
|
| 1989 |
+
def update(partial_products, indices):
|
| 1990 |
+
if partial_products:
|
| 1991 |
+
pprod = partial_products[-1]
|
| 1992 |
+
else:
|
| 1993 |
+
pprod = ctx.one
|
| 1994 |
+
for k in indices:
|
| 1995 |
+
pprod = pprod * f(a + ctx.mpf(k))
|
| 1996 |
+
partial_products.append(pprod)
|
| 1997 |
+
|
| 1998 |
+
return +ctx.adaptive_extrapolation(update, None, kwargs)
|
| 1999 |
+
|
| 2000 |
+
|
| 2001 |
+
@defun
|
| 2002 |
+
def limit(ctx, f, x, direction=1, exp=False, **kwargs):
|
| 2003 |
+
r"""
|
| 2004 |
+
Computes an estimate of the limit
|
| 2005 |
+
|
| 2006 |
+
.. math ::
|
| 2007 |
+
|
| 2008 |
+
\lim_{t \to x} f(t)
|
| 2009 |
+
|
| 2010 |
+
where `x` may be finite or infinite.
|
| 2011 |
+
|
| 2012 |
+
For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
|
| 2013 |
+
consecutive integer values of `n`, where the approach direction
|
| 2014 |
+
`d` may be specified using the *direction* keyword argument.
|
| 2015 |
+
For infinite `x`, :func:`~mpmath.limit` evaluates values of
|
| 2016 |
+
`f(\mathrm{sign}(x) \cdot n)`.
|
| 2017 |
+
|
| 2018 |
+
If the approach to the limit is not sufficiently fast to give
|
| 2019 |
+
an accurate estimate directly, :func:`~mpmath.limit` attempts to find
|
| 2020 |
+
the limit using Richardson extrapolation or the Shanks
|
| 2021 |
+
transformation. You can select between these methods using
|
| 2022 |
+
the *method* keyword (see documentation of :func:`~mpmath.nsum` for
|
| 2023 |
+
more information).
|
| 2024 |
+
|
| 2025 |
+
**Options**
|
| 2026 |
+
|
| 2027 |
+
The following options are available with essentially the
|
| 2028 |
+
same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
|
| 2029 |
+
*steps*, *verbose*.
|
| 2030 |
+
|
| 2031 |
+
If the option *exp=True* is set, `f` will be
|
| 2032 |
+
sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
|
| 2033 |
+
instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
|
| 2034 |
+
This can sometimes improve the rate of convergence so that
|
| 2035 |
+
:func:`~mpmath.limit` may return a more accurate answer (and faster).
|
| 2036 |
+
However, do note that this can only be used if `f`
|
| 2037 |
+
supports fast and accurate evaluation for arguments that
|
| 2038 |
+
are extremely close to the limit point (or if infinite,
|
| 2039 |
+
very large arguments).
|
| 2040 |
+
|
| 2041 |
+
**Examples**
|
| 2042 |
+
|
| 2043 |
+
A basic evaluation of a removable singularity::
|
| 2044 |
+
|
| 2045 |
+
>>> from mpmath import *
|
| 2046 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 2047 |
+
>>> limit(lambda x: (x-sin(x))/x**3, 0)
|
| 2048 |
+
0.166666666666666666666666666667
|
| 2049 |
+
|
| 2050 |
+
Computing the exponential function using its limit definition::
|
| 2051 |
+
|
| 2052 |
+
>>> limit(lambda n: (1+3/n)**n, inf)
|
| 2053 |
+
20.0855369231876677409285296546
|
| 2054 |
+
>>> exp(3)
|
| 2055 |
+
20.0855369231876677409285296546
|
| 2056 |
+
|
| 2057 |
+
A limit for `\pi`::
|
| 2058 |
+
|
| 2059 |
+
>>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
|
| 2060 |
+
>>> limit(f, inf)
|
| 2061 |
+
3.14159265358979323846264338328
|
| 2062 |
+
|
| 2063 |
+
Calculating the coefficient in Stirling's formula::
|
| 2064 |
+
|
| 2065 |
+
>>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
|
| 2066 |
+
2.50662827463100050241576528481
|
| 2067 |
+
>>> sqrt(2*pi)
|
| 2068 |
+
2.50662827463100050241576528481
|
| 2069 |
+
|
| 2070 |
+
Evaluating Euler's constant `\gamma` using the limit representation
|
| 2071 |
+
|
| 2072 |
+
.. math ::
|
| 2073 |
+
|
| 2074 |
+
\gamma = \lim_{n \rightarrow \infty } \left[ \left(
|
| 2075 |
+
\sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
|
| 2076 |
+
|
| 2077 |
+
(which converges notoriously slowly)::
|
| 2078 |
+
|
| 2079 |
+
>>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
|
| 2080 |
+
>>> limit(f, inf)
|
| 2081 |
+
0.577215664901532860606512090082
|
| 2082 |
+
>>> +euler
|
| 2083 |
+
0.577215664901532860606512090082
|
| 2084 |
+
|
| 2085 |
+
With default settings, the following limit converges too slowly
|
| 2086 |
+
to be evaluated accurately. Changing to exponential sampling
|
| 2087 |
+
however gives a perfect result::
|
| 2088 |
+
|
| 2089 |
+
>>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
|
| 2090 |
+
>>> limit(f, inf)
|
| 2091 |
+
0.992831158558330281129249686491
|
| 2092 |
+
>>> limit(f, inf, exp=True)
|
| 2093 |
+
1.0
|
| 2094 |
+
|
| 2095 |
+
"""
|
| 2096 |
+
|
| 2097 |
+
if ctx.isinf(x):
|
| 2098 |
+
direction = ctx.sign(x)
|
| 2099 |
+
g = lambda k: f(ctx.mpf(k+1)*direction)
|
| 2100 |
+
else:
|
| 2101 |
+
direction *= ctx.one
|
| 2102 |
+
g = lambda k: f(x + direction/(k+1))
|
| 2103 |
+
if exp:
|
| 2104 |
+
h = g
|
| 2105 |
+
g = lambda k: h(2**k)
|
| 2106 |
+
|
| 2107 |
+
def update(values, indices):
|
| 2108 |
+
for k in indices:
|
| 2109 |
+
values.append(g(k+1))
|
| 2110 |
+
|
| 2111 |
+
# XXX: steps used by nsum don't work well
|
| 2112 |
+
if not 'steps' in kwargs:
|
| 2113 |
+
kwargs['steps'] = [10]
|
| 2114 |
+
|
| 2115 |
+
return +ctx.adaptive_extrapolation(update, None, kwargs)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/odes.py
ADDED
|
@@ -0,0 +1,288 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from bisect import bisect
|
| 2 |
+
from ..libmp.backend import xrange
|
| 3 |
+
|
| 4 |
+
class ODEMethods(object):
|
| 5 |
+
pass
|
| 6 |
+
|
| 7 |
+
def ode_taylor(ctx, derivs, x0, y0, tol_prec, n):
|
| 8 |
+
h = tol = ctx.ldexp(1, -tol_prec)
|
| 9 |
+
dim = len(y0)
|
| 10 |
+
xs = [x0]
|
| 11 |
+
ys = [y0]
|
| 12 |
+
x = x0
|
| 13 |
+
y = y0
|
| 14 |
+
orig = ctx.prec
|
| 15 |
+
try:
|
| 16 |
+
ctx.prec = orig*(1+n)
|
| 17 |
+
# Use n steps with Euler's method to get
|
| 18 |
+
# evaluation points for derivatives
|
| 19 |
+
for i in range(n):
|
| 20 |
+
fxy = derivs(x, y)
|
| 21 |
+
y = [y[i]+h*fxy[i] for i in xrange(len(y))]
|
| 22 |
+
x += h
|
| 23 |
+
xs.append(x)
|
| 24 |
+
ys.append(y)
|
| 25 |
+
# Compute derivatives
|
| 26 |
+
ser = [[] for d in range(dim)]
|
| 27 |
+
for j in range(n+1):
|
| 28 |
+
s = [0]*dim
|
| 29 |
+
b = (-1) ** (j & 1)
|
| 30 |
+
k = 1
|
| 31 |
+
for i in range(j+1):
|
| 32 |
+
for d in range(dim):
|
| 33 |
+
s[d] += b * ys[i][d]
|
| 34 |
+
b = (b * (j-k+1)) // (-k)
|
| 35 |
+
k += 1
|
| 36 |
+
scale = h**(-j) / ctx.fac(j)
|
| 37 |
+
for d in range(dim):
|
| 38 |
+
s[d] = s[d] * scale
|
| 39 |
+
ser[d].append(s[d])
|
| 40 |
+
finally:
|
| 41 |
+
ctx.prec = orig
|
| 42 |
+
# Estimate radius for which we can get full accuracy.
|
| 43 |
+
# XXX: do this right for zeros
|
| 44 |
+
radius = ctx.one
|
| 45 |
+
for ts in ser:
|
| 46 |
+
if ts[-1]:
|
| 47 |
+
radius = min(radius, ctx.nthroot(tol/abs(ts[-1]), n))
|
| 48 |
+
radius /= 2 # XXX
|
| 49 |
+
return ser, x0+radius
|
| 50 |
+
|
| 51 |
+
def odefun(ctx, F, x0, y0, tol=None, degree=None, method='taylor', verbose=False):
|
| 52 |
+
r"""
|
| 53 |
+
Returns a function `y(x) = [y_0(x), y_1(x), \ldots, y_n(x)]`
|
| 54 |
+
that is a numerical solution of the `n+1`-dimensional first-order
|
| 55 |
+
ordinary differential equation (ODE) system
|
| 56 |
+
|
| 57 |
+
.. math ::
|
| 58 |
+
|
| 59 |
+
y_0'(x) = F_0(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 60 |
+
|
| 61 |
+
y_1'(x) = F_1(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 62 |
+
|
| 63 |
+
\vdots
|
| 64 |
+
|
| 65 |
+
y_n'(x) = F_n(x, [y_0(x), y_1(x), \ldots, y_n(x)])
|
| 66 |
+
|
| 67 |
+
The derivatives are specified by the vector-valued function
|
| 68 |
+
*F* that evaluates
|
| 69 |
+
`[y_0', \ldots, y_n'] = F(x, [y_0, \ldots, y_n])`.
|
| 70 |
+
The initial point `x_0` is specified by the scalar argument *x0*,
|
| 71 |
+
and the initial value `y(x_0) = [y_0(x_0), \ldots, y_n(x_0)]` is
|
| 72 |
+
specified by the vector argument *y0*.
|
| 73 |
+
|
| 74 |
+
For convenience, if the system is one-dimensional, you may optionally
|
| 75 |
+
provide just a scalar value for *y0*. In this case, *F* should accept
|
| 76 |
+
a scalar *y* argument and return a scalar. The solution function
|
| 77 |
+
*y* will return scalar values instead of length-1 vectors.
|
| 78 |
+
|
| 79 |
+
Evaluation of the solution function `y(x)` is permitted
|
| 80 |
+
for any `x \ge x_0`.
|
| 81 |
+
|
| 82 |
+
A high-order ODE can be solved by transforming it into first-order
|
| 83 |
+
vector form. This transformation is described in standard texts
|
| 84 |
+
on ODEs. Examples will also be given below.
|
| 85 |
+
|
| 86 |
+
**Options, speed and accuracy**
|
| 87 |
+
|
| 88 |
+
By default, :func:`~mpmath.odefun` uses a high-order Taylor series
|
| 89 |
+
method. For reasonably well-behaved problems, the solution will
|
| 90 |
+
be fully accurate to within the working precision. Note that
|
| 91 |
+
*F* must be possible to evaluate to very high precision
|
| 92 |
+
for the generation of Taylor series to work.
|
| 93 |
+
|
| 94 |
+
To get a faster but less accurate solution, you can set a large
|
| 95 |
+
value for *tol* (which defaults roughly to *eps*). If you just
|
| 96 |
+
want to plot the solution or perform a basic simulation,
|
| 97 |
+
*tol = 0.01* is likely sufficient.
|
| 98 |
+
|
| 99 |
+
The *degree* argument controls the degree of the solver (with
|
| 100 |
+
*method='taylor'*, this is the degree of the Taylor series
|
| 101 |
+
expansion). A higher degree means that a longer step can be taken
|
| 102 |
+
before a new local solution must be generated from *F*,
|
| 103 |
+
meaning that fewer steps are required to get from `x_0` to a given
|
| 104 |
+
`x_1`. On the other hand, a higher degree also means that each
|
| 105 |
+
local solution becomes more expensive (i.e., more evaluations of
|
| 106 |
+
*F* are required per step, and at higher precision).
|
| 107 |
+
|
| 108 |
+
The optimal setting therefore involves a tradeoff. Generally,
|
| 109 |
+
decreasing the *degree* for Taylor series is likely to give faster
|
| 110 |
+
solution at low precision, while increasing is likely to be better
|
| 111 |
+
at higher precision.
|
| 112 |
+
|
| 113 |
+
The function
|
| 114 |
+
object returned by :func:`~mpmath.odefun` caches the solutions at all step
|
| 115 |
+
points and uses polynomial interpolation between step points.
|
| 116 |
+
Therefore, once `y(x_1)` has been evaluated for some `x_1`,
|
| 117 |
+
`y(x)` can be evaluated very quickly for any `x_0 \le x \le x_1`.
|
| 118 |
+
and continuing the evaluation up to `x_2 > x_1` is also fast.
|
| 119 |
+
|
| 120 |
+
**Examples of first-order ODEs**
|
| 121 |
+
|
| 122 |
+
We will solve the standard test problem `y'(x) = y(x), y(0) = 1`
|
| 123 |
+
which has explicit solution `y(x) = \exp(x)`::
|
| 124 |
+
|
| 125 |
+
>>> from mpmath import *
|
| 126 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 127 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
| 128 |
+
>>> for x in [0, 1, 2.5]:
|
| 129 |
+
... print((f(x), exp(x)))
|
| 130 |
+
...
|
| 131 |
+
(1.0, 1.0)
|
| 132 |
+
(2.71828182845905, 2.71828182845905)
|
| 133 |
+
(12.1824939607035, 12.1824939607035)
|
| 134 |
+
|
| 135 |
+
The solution with high precision::
|
| 136 |
+
|
| 137 |
+
>>> mp.dps = 50
|
| 138 |
+
>>> f = odefun(lambda x, y: y, 0, 1)
|
| 139 |
+
>>> f(1)
|
| 140 |
+
2.7182818284590452353602874713526624977572470937
|
| 141 |
+
>>> exp(1)
|
| 142 |
+
2.7182818284590452353602874713526624977572470937
|
| 143 |
+
|
| 144 |
+
Using the more general vectorized form, the test problem
|
| 145 |
+
can be input as (note that *f* returns a 1-element vector)::
|
| 146 |
+
|
| 147 |
+
>>> mp.dps = 15
|
| 148 |
+
>>> f = odefun(lambda x, y: [y[0]], 0, [1])
|
| 149 |
+
>>> f(1)
|
| 150 |
+
[2.71828182845905]
|
| 151 |
+
|
| 152 |
+
:func:`~mpmath.odefun` can solve nonlinear ODEs, which are generally
|
| 153 |
+
impossible (and at best difficult) to solve analytically. As
|
| 154 |
+
an example of a nonlinear ODE, we will solve `y'(x) = x \sin(y(x))`
|
| 155 |
+
for `y(0) = \pi/2`. An exact solution happens to be known
|
| 156 |
+
for this problem, and is given by
|
| 157 |
+
`y(x) = 2 \tan^{-1}\left(\exp\left(x^2/2\right)\right)`::
|
| 158 |
+
|
| 159 |
+
>>> f = odefun(lambda x, y: x*sin(y), 0, pi/2)
|
| 160 |
+
>>> for x in [2, 5, 10]:
|
| 161 |
+
... print((f(x), 2*atan(exp(mpf(x)**2/2))))
|
| 162 |
+
...
|
| 163 |
+
(2.87255666284091, 2.87255666284091)
|
| 164 |
+
(3.14158520028345, 3.14158520028345)
|
| 165 |
+
(3.14159265358979, 3.14159265358979)
|
| 166 |
+
|
| 167 |
+
If `F` is independent of `y`, an ODE can be solved using direct
|
| 168 |
+
integration. We can therefore obtain a reference solution with
|
| 169 |
+
:func:`~mpmath.quad`::
|
| 170 |
+
|
| 171 |
+
>>> f = lambda x: (1+x**2)/(1+x**3)
|
| 172 |
+
>>> g = odefun(lambda x, y: f(x), pi, 0)
|
| 173 |
+
>>> g(2*pi)
|
| 174 |
+
0.72128263801696
|
| 175 |
+
>>> quad(f, [pi, 2*pi])
|
| 176 |
+
0.72128263801696
|
| 177 |
+
|
| 178 |
+
**Examples of second-order ODEs**
|
| 179 |
+
|
| 180 |
+
We will solve the harmonic oscillator equation `y''(x) + y(x) = 0`.
|
| 181 |
+
To do this, we introduce the helper functions `y_0 = y, y_1 = y_0'`
|
| 182 |
+
whereby the original equation can be written as `y_1' + y_0' = 0`. Put
|
| 183 |
+
together, we get the first-order, two-dimensional vector ODE
|
| 184 |
+
|
| 185 |
+
.. math ::
|
| 186 |
+
|
| 187 |
+
\begin{cases}
|
| 188 |
+
y_0' = y_1 \\
|
| 189 |
+
y_1' = -y_0
|
| 190 |
+
\end{cases}
|
| 191 |
+
|
| 192 |
+
To get a well-defined IVP, we need two initial values. With
|
| 193 |
+
`y(0) = y_0(0) = 1` and `-y'(0) = y_1(0) = 0`, the problem will of
|
| 194 |
+
course be solved by `y(x) = y_0(x) = \cos(x)` and
|
| 195 |
+
`-y'(x) = y_1(x) = \sin(x)`. We check this::
|
| 196 |
+
|
| 197 |
+
>>> f = odefun(lambda x, y: [-y[1], y[0]], 0, [1, 0])
|
| 198 |
+
>>> for x in [0, 1, 2.5, 10]:
|
| 199 |
+
... nprint(f(x), 15)
|
| 200 |
+
... nprint([cos(x), sin(x)], 15)
|
| 201 |
+
... print("---")
|
| 202 |
+
...
|
| 203 |
+
[1.0, 0.0]
|
| 204 |
+
[1.0, 0.0]
|
| 205 |
+
---
|
| 206 |
+
[0.54030230586814, 0.841470984807897]
|
| 207 |
+
[0.54030230586814, 0.841470984807897]
|
| 208 |
+
---
|
| 209 |
+
[-0.801143615546934, 0.598472144103957]
|
| 210 |
+
[-0.801143615546934, 0.598472144103957]
|
| 211 |
+
---
|
| 212 |
+
[-0.839071529076452, -0.54402111088937]
|
| 213 |
+
[-0.839071529076452, -0.54402111088937]
|
| 214 |
+
---
|
| 215 |
+
|
| 216 |
+
Note that we get both the sine and the cosine solutions
|
| 217 |
+
simultaneously.
|
| 218 |
+
|
| 219 |
+
**TODO**
|
| 220 |
+
|
| 221 |
+
* Better automatic choice of degree and step size
|
| 222 |
+
* Make determination of Taylor series convergence radius
|
| 223 |
+
more robust
|
| 224 |
+
* Allow solution for `x < x_0`
|
| 225 |
+
* Allow solution for complex `x`
|
| 226 |
+
* Test for difficult (ill-conditioned) problems
|
| 227 |
+
* Implement Runge-Kutta and other algorithms
|
| 228 |
+
|
| 229 |
+
"""
|
| 230 |
+
if tol:
|
| 231 |
+
tol_prec = int(-ctx.log(tol, 2))+10
|
| 232 |
+
else:
|
| 233 |
+
tol_prec = ctx.prec+10
|
| 234 |
+
degree = degree or (3 + int(3*ctx.dps/2.))
|
| 235 |
+
workprec = ctx.prec + 40
|
| 236 |
+
try:
|
| 237 |
+
len(y0)
|
| 238 |
+
return_vector = True
|
| 239 |
+
except TypeError:
|
| 240 |
+
F_ = F
|
| 241 |
+
F = lambda x, y: [F_(x, y[0])]
|
| 242 |
+
y0 = [y0]
|
| 243 |
+
return_vector = False
|
| 244 |
+
ser, xb = ode_taylor(ctx, F, x0, y0, tol_prec, degree)
|
| 245 |
+
series_boundaries = [x0, xb]
|
| 246 |
+
series_data = [(ser, x0, xb)]
|
| 247 |
+
# We will be working with vectors of Taylor series
|
| 248 |
+
def mpolyval(ser, a):
|
| 249 |
+
return [ctx.polyval(s[::-1], a) for s in ser]
|
| 250 |
+
# Find nearest expansion point; compute if necessary
|
| 251 |
+
def get_series(x):
|
| 252 |
+
if x < x0:
|
| 253 |
+
raise ValueError
|
| 254 |
+
n = bisect(series_boundaries, x)
|
| 255 |
+
if n < len(series_boundaries):
|
| 256 |
+
return series_data[n-1]
|
| 257 |
+
while 1:
|
| 258 |
+
ser, xa, xb = series_data[-1]
|
| 259 |
+
if verbose:
|
| 260 |
+
print("Computing Taylor series for [%f, %f]" % (xa, xb))
|
| 261 |
+
y = mpolyval(ser, xb-xa)
|
| 262 |
+
xa = xb
|
| 263 |
+
ser, xb = ode_taylor(ctx, F, xb, y, tol_prec, degree)
|
| 264 |
+
series_boundaries.append(xb)
|
| 265 |
+
series_data.append((ser, xa, xb))
|
| 266 |
+
if x <= xb:
|
| 267 |
+
return series_data[-1]
|
| 268 |
+
# Evaluation function
|
| 269 |
+
def interpolant(x):
|
| 270 |
+
x = ctx.convert(x)
|
| 271 |
+
orig = ctx.prec
|
| 272 |
+
try:
|
| 273 |
+
ctx.prec = workprec
|
| 274 |
+
ser, xa, xb = get_series(x)
|
| 275 |
+
y = mpolyval(ser, x-xa)
|
| 276 |
+
finally:
|
| 277 |
+
ctx.prec = orig
|
| 278 |
+
if return_vector:
|
| 279 |
+
return [+yk for yk in y]
|
| 280 |
+
else:
|
| 281 |
+
return +y[0]
|
| 282 |
+
return interpolant
|
| 283 |
+
|
| 284 |
+
ODEMethods.odefun = odefun
|
| 285 |
+
|
| 286 |
+
if __name__ == "__main__":
|
| 287 |
+
import doctest
|
| 288 |
+
doctest.testmod()
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/optimization.py
ADDED
|
@@ -0,0 +1,1102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function
|
| 2 |
+
|
| 3 |
+
from copy import copy
|
| 4 |
+
|
| 5 |
+
from ..libmp.backend import xrange
|
| 6 |
+
|
| 7 |
+
class OptimizationMethods(object):
|
| 8 |
+
def __init__(ctx):
|
| 9 |
+
pass
|
| 10 |
+
|
| 11 |
+
##############
|
| 12 |
+
# 1D-SOLVERS #
|
| 13 |
+
##############
|
| 14 |
+
|
| 15 |
+
class Newton:
|
| 16 |
+
"""
|
| 17 |
+
1d-solver generating pairs of approximative root and error.
|
| 18 |
+
|
| 19 |
+
Needs starting points x0 close to the root.
|
| 20 |
+
|
| 21 |
+
Pro:
|
| 22 |
+
|
| 23 |
+
* converges fast
|
| 24 |
+
* sometimes more robust than secant with bad second starting point
|
| 25 |
+
|
| 26 |
+
Contra:
|
| 27 |
+
|
| 28 |
+
* converges slowly for multiple roots
|
| 29 |
+
* needs first derivative
|
| 30 |
+
* 2 function evaluations per iteration
|
| 31 |
+
"""
|
| 32 |
+
maxsteps = 20
|
| 33 |
+
|
| 34 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 35 |
+
self.ctx = ctx
|
| 36 |
+
if len(x0) == 1:
|
| 37 |
+
self.x0 = x0[0]
|
| 38 |
+
else:
|
| 39 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 40 |
+
self.f = f
|
| 41 |
+
if not 'df' in kwargs:
|
| 42 |
+
def df(x):
|
| 43 |
+
return self.ctx.diff(f, x)
|
| 44 |
+
else:
|
| 45 |
+
df = kwargs['df']
|
| 46 |
+
self.df = df
|
| 47 |
+
|
| 48 |
+
def __iter__(self):
|
| 49 |
+
f = self.f
|
| 50 |
+
df = self.df
|
| 51 |
+
x0 = self.x0
|
| 52 |
+
while True:
|
| 53 |
+
x1 = x0 - f(x0) / df(x0)
|
| 54 |
+
error = abs(x1 - x0)
|
| 55 |
+
x0 = x1
|
| 56 |
+
yield (x1, error)
|
| 57 |
+
|
| 58 |
+
class Secant:
|
| 59 |
+
"""
|
| 60 |
+
1d-solver generating pairs of approximative root and error.
|
| 61 |
+
|
| 62 |
+
Needs starting points x0 and x1 close to the root.
|
| 63 |
+
x1 defaults to x0 + 0.25.
|
| 64 |
+
|
| 65 |
+
Pro:
|
| 66 |
+
|
| 67 |
+
* converges fast
|
| 68 |
+
|
| 69 |
+
Contra:
|
| 70 |
+
|
| 71 |
+
* converges slowly for multiple roots
|
| 72 |
+
"""
|
| 73 |
+
maxsteps = 30
|
| 74 |
+
|
| 75 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 76 |
+
self.ctx = ctx
|
| 77 |
+
if len(x0) == 1:
|
| 78 |
+
self.x0 = x0[0]
|
| 79 |
+
self.x1 = self.x0 + 0.25
|
| 80 |
+
elif len(x0) == 2:
|
| 81 |
+
self.x0 = x0[0]
|
| 82 |
+
self.x1 = x0[1]
|
| 83 |
+
else:
|
| 84 |
+
raise ValueError('expected 1 or 2 starting points, got %i' % len(x0))
|
| 85 |
+
self.f = f
|
| 86 |
+
|
| 87 |
+
def __iter__(self):
|
| 88 |
+
f = self.f
|
| 89 |
+
x0 = self.x0
|
| 90 |
+
x1 = self.x1
|
| 91 |
+
f0 = f(x0)
|
| 92 |
+
while True:
|
| 93 |
+
f1 = f(x1)
|
| 94 |
+
l = x1 - x0
|
| 95 |
+
if not l:
|
| 96 |
+
break
|
| 97 |
+
s = (f1 - f0) / l
|
| 98 |
+
if not s:
|
| 99 |
+
break
|
| 100 |
+
x0, x1 = x1, x1 - f1/s
|
| 101 |
+
f0 = f1
|
| 102 |
+
yield x1, abs(l)
|
| 103 |
+
|
| 104 |
+
class MNewton:
|
| 105 |
+
"""
|
| 106 |
+
1d-solver generating pairs of approximative root and error.
|
| 107 |
+
|
| 108 |
+
Needs starting point x0 close to the root.
|
| 109 |
+
Uses modified Newton's method that converges fast regardless of the
|
| 110 |
+
multiplicity of the root.
|
| 111 |
+
|
| 112 |
+
Pro:
|
| 113 |
+
|
| 114 |
+
* converges fast for multiple roots
|
| 115 |
+
|
| 116 |
+
Contra:
|
| 117 |
+
|
| 118 |
+
* needs first and second derivative of f
|
| 119 |
+
* 3 function evaluations per iteration
|
| 120 |
+
"""
|
| 121 |
+
maxsteps = 20
|
| 122 |
+
|
| 123 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 124 |
+
self.ctx = ctx
|
| 125 |
+
if not len(x0) == 1:
|
| 126 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 127 |
+
self.x0 = x0[0]
|
| 128 |
+
self.f = f
|
| 129 |
+
if not 'df' in kwargs:
|
| 130 |
+
def df(x):
|
| 131 |
+
return self.ctx.diff(f, x)
|
| 132 |
+
else:
|
| 133 |
+
df = kwargs['df']
|
| 134 |
+
self.df = df
|
| 135 |
+
if not 'd2f' in kwargs:
|
| 136 |
+
def d2f(x):
|
| 137 |
+
return self.ctx.diff(df, x)
|
| 138 |
+
else:
|
| 139 |
+
d2f = kwargs['df']
|
| 140 |
+
self.d2f = d2f
|
| 141 |
+
|
| 142 |
+
def __iter__(self):
|
| 143 |
+
x = self.x0
|
| 144 |
+
f = self.f
|
| 145 |
+
df = self.df
|
| 146 |
+
d2f = self.d2f
|
| 147 |
+
while True:
|
| 148 |
+
prevx = x
|
| 149 |
+
fx = f(x)
|
| 150 |
+
if fx == 0:
|
| 151 |
+
break
|
| 152 |
+
dfx = df(x)
|
| 153 |
+
d2fx = d2f(x)
|
| 154 |
+
# x = x - F(x)/F'(x) with F(x) = f(x)/f'(x)
|
| 155 |
+
x -= fx / (dfx - fx * d2fx / dfx)
|
| 156 |
+
error = abs(x - prevx)
|
| 157 |
+
yield x, error
|
| 158 |
+
|
| 159 |
+
class Halley:
|
| 160 |
+
"""
|
| 161 |
+
1d-solver generating pairs of approximative root and error.
|
| 162 |
+
|
| 163 |
+
Needs a starting point x0 close to the root.
|
| 164 |
+
Uses Halley's method with cubic convergence rate.
|
| 165 |
+
|
| 166 |
+
Pro:
|
| 167 |
+
|
| 168 |
+
* converges even faster the Newton's method
|
| 169 |
+
* useful when computing with *many* digits
|
| 170 |
+
|
| 171 |
+
Contra:
|
| 172 |
+
|
| 173 |
+
* needs first and second derivative of f
|
| 174 |
+
* 3 function evaluations per iteration
|
| 175 |
+
* converges slowly for multiple roots
|
| 176 |
+
"""
|
| 177 |
+
|
| 178 |
+
maxsteps = 20
|
| 179 |
+
|
| 180 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 181 |
+
self.ctx = ctx
|
| 182 |
+
if not len(x0) == 1:
|
| 183 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 184 |
+
self.x0 = x0[0]
|
| 185 |
+
self.f = f
|
| 186 |
+
if not 'df' in kwargs:
|
| 187 |
+
def df(x):
|
| 188 |
+
return self.ctx.diff(f, x)
|
| 189 |
+
else:
|
| 190 |
+
df = kwargs['df']
|
| 191 |
+
self.df = df
|
| 192 |
+
if not 'd2f' in kwargs:
|
| 193 |
+
def d2f(x):
|
| 194 |
+
return self.ctx.diff(df, x)
|
| 195 |
+
else:
|
| 196 |
+
d2f = kwargs['df']
|
| 197 |
+
self.d2f = d2f
|
| 198 |
+
|
| 199 |
+
def __iter__(self):
|
| 200 |
+
x = self.x0
|
| 201 |
+
f = self.f
|
| 202 |
+
df = self.df
|
| 203 |
+
d2f = self.d2f
|
| 204 |
+
while True:
|
| 205 |
+
prevx = x
|
| 206 |
+
fx = f(x)
|
| 207 |
+
dfx = df(x)
|
| 208 |
+
d2fx = d2f(x)
|
| 209 |
+
x -= 2*fx*dfx / (2*dfx**2 - fx*d2fx)
|
| 210 |
+
error = abs(x - prevx)
|
| 211 |
+
yield x, error
|
| 212 |
+
|
| 213 |
+
class Muller:
|
| 214 |
+
"""
|
| 215 |
+
1d-solver generating pairs of approximative root and error.
|
| 216 |
+
|
| 217 |
+
Needs starting points x0, x1 and x2 close to the root.
|
| 218 |
+
x1 defaults to x0 + 0.25; x2 to x1 + 0.25.
|
| 219 |
+
Uses Muller's method that converges towards complex roots.
|
| 220 |
+
|
| 221 |
+
Pro:
|
| 222 |
+
|
| 223 |
+
* converges fast (somewhat faster than secant)
|
| 224 |
+
* can find complex roots
|
| 225 |
+
|
| 226 |
+
Contra:
|
| 227 |
+
|
| 228 |
+
* converges slowly for multiple roots
|
| 229 |
+
* may have complex values for real starting points and real roots
|
| 230 |
+
|
| 231 |
+
http://en.wikipedia.org/wiki/Muller's_method
|
| 232 |
+
"""
|
| 233 |
+
maxsteps = 30
|
| 234 |
+
|
| 235 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 236 |
+
self.ctx = ctx
|
| 237 |
+
if len(x0) == 1:
|
| 238 |
+
self.x0 = x0[0]
|
| 239 |
+
self.x1 = self.x0 + 0.25
|
| 240 |
+
self.x2 = self.x1 + 0.25
|
| 241 |
+
elif len(x0) == 2:
|
| 242 |
+
self.x0 = x0[0]
|
| 243 |
+
self.x1 = x0[1]
|
| 244 |
+
self.x2 = self.x1 + 0.25
|
| 245 |
+
elif len(x0) == 3:
|
| 246 |
+
self.x0 = x0[0]
|
| 247 |
+
self.x1 = x0[1]
|
| 248 |
+
self.x2 = x0[2]
|
| 249 |
+
else:
|
| 250 |
+
raise ValueError('expected 1, 2 or 3 starting points, got %i'
|
| 251 |
+
% len(x0))
|
| 252 |
+
self.f = f
|
| 253 |
+
self.verbose = kwargs['verbose']
|
| 254 |
+
|
| 255 |
+
def __iter__(self):
|
| 256 |
+
f = self.f
|
| 257 |
+
x0 = self.x0
|
| 258 |
+
x1 = self.x1
|
| 259 |
+
x2 = self.x2
|
| 260 |
+
fx0 = f(x0)
|
| 261 |
+
fx1 = f(x1)
|
| 262 |
+
fx2 = f(x2)
|
| 263 |
+
while True:
|
| 264 |
+
# TODO: maybe refactoring with function for divided differences
|
| 265 |
+
# calculate divided differences
|
| 266 |
+
fx2x1 = (fx1 - fx2) / (x1 - x2)
|
| 267 |
+
fx2x0 = (fx0 - fx2) / (x0 - x2)
|
| 268 |
+
fx1x0 = (fx0 - fx1) / (x0 - x1)
|
| 269 |
+
w = fx2x1 + fx2x0 - fx1x0
|
| 270 |
+
fx2x1x0 = (fx1x0 - fx2x1) / (x0 - x2)
|
| 271 |
+
if w == 0 and fx2x1x0 == 0:
|
| 272 |
+
if self.verbose:
|
| 273 |
+
print('canceled with')
|
| 274 |
+
print('x0 =', x0, ', x1 =', x1, 'and x2 =', x2)
|
| 275 |
+
break
|
| 276 |
+
x0 = x1
|
| 277 |
+
fx0 = fx1
|
| 278 |
+
x1 = x2
|
| 279 |
+
fx1 = fx2
|
| 280 |
+
# denominator should be as large as possible => choose sign
|
| 281 |
+
r = self.ctx.sqrt(w**2 - 4*fx2*fx2x1x0)
|
| 282 |
+
if abs(w - r) > abs(w + r):
|
| 283 |
+
r = -r
|
| 284 |
+
x2 -= 2*fx2 / (w + r)
|
| 285 |
+
fx2 = f(x2)
|
| 286 |
+
error = abs(x2 - x1)
|
| 287 |
+
yield x2, error
|
| 288 |
+
|
| 289 |
+
# TODO: consider raising a ValueError when there's no sign change in a and b
|
| 290 |
+
class Bisection:
|
| 291 |
+
"""
|
| 292 |
+
1d-solver generating pairs of approximative root and error.
|
| 293 |
+
|
| 294 |
+
Uses bisection method to find a root of f in [a, b].
|
| 295 |
+
Might fail for multiple roots (needs sign change).
|
| 296 |
+
|
| 297 |
+
Pro:
|
| 298 |
+
|
| 299 |
+
* robust and reliable
|
| 300 |
+
|
| 301 |
+
Contra:
|
| 302 |
+
|
| 303 |
+
* converges slowly
|
| 304 |
+
* needs sign change
|
| 305 |
+
"""
|
| 306 |
+
maxsteps = 100
|
| 307 |
+
|
| 308 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 309 |
+
self.ctx = ctx
|
| 310 |
+
if len(x0) != 2:
|
| 311 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 312 |
+
self.f = f
|
| 313 |
+
self.a = x0[0]
|
| 314 |
+
self.b = x0[1]
|
| 315 |
+
|
| 316 |
+
def __iter__(self):
|
| 317 |
+
f = self.f
|
| 318 |
+
a = self.a
|
| 319 |
+
b = self.b
|
| 320 |
+
l = b - a
|
| 321 |
+
fb = f(b)
|
| 322 |
+
while True:
|
| 323 |
+
m = self.ctx.ldexp(a + b, -1)
|
| 324 |
+
fm = f(m)
|
| 325 |
+
sign = fm * fb
|
| 326 |
+
if sign < 0:
|
| 327 |
+
a = m
|
| 328 |
+
elif sign > 0:
|
| 329 |
+
b = m
|
| 330 |
+
fb = fm
|
| 331 |
+
else:
|
| 332 |
+
yield m, self.ctx.zero
|
| 333 |
+
l /= 2
|
| 334 |
+
yield (a + b)/2, abs(l)
|
| 335 |
+
|
| 336 |
+
def _getm(method):
|
| 337 |
+
"""
|
| 338 |
+
Return a function to calculate m for Illinois-like methods.
|
| 339 |
+
"""
|
| 340 |
+
if method == 'illinois':
|
| 341 |
+
def getm(fz, fb):
|
| 342 |
+
return 0.5
|
| 343 |
+
elif method == 'pegasus':
|
| 344 |
+
def getm(fz, fb):
|
| 345 |
+
return fb/(fb + fz)
|
| 346 |
+
elif method == 'anderson':
|
| 347 |
+
def getm(fz, fb):
|
| 348 |
+
m = 1 - fz/fb
|
| 349 |
+
if m > 0:
|
| 350 |
+
return m
|
| 351 |
+
else:
|
| 352 |
+
return 0.5
|
| 353 |
+
else:
|
| 354 |
+
raise ValueError("method '%s' not recognized" % method)
|
| 355 |
+
return getm
|
| 356 |
+
|
| 357 |
+
class Illinois:
|
| 358 |
+
"""
|
| 359 |
+
1d-solver generating pairs of approximative root and error.
|
| 360 |
+
|
| 361 |
+
Uses Illinois method or similar to find a root of f in [a, b].
|
| 362 |
+
Might fail for multiple roots (needs sign change).
|
| 363 |
+
Combines bisect with secant (improved regula falsi).
|
| 364 |
+
|
| 365 |
+
The only difference between the methods is the scaling factor m, which is
|
| 366 |
+
used to ensure convergence (you can choose one using the 'method' keyword):
|
| 367 |
+
|
| 368 |
+
Illinois method ('illinois'):
|
| 369 |
+
m = 0.5
|
| 370 |
+
|
| 371 |
+
Pegasus method ('pegasus'):
|
| 372 |
+
m = fb/(fb + fz)
|
| 373 |
+
|
| 374 |
+
Anderson-Bjoerk method ('anderson'):
|
| 375 |
+
m = 1 - fz/fb if positive else 0.5
|
| 376 |
+
|
| 377 |
+
Pro:
|
| 378 |
+
|
| 379 |
+
* converges very fast
|
| 380 |
+
|
| 381 |
+
Contra:
|
| 382 |
+
|
| 383 |
+
* has problems with multiple roots
|
| 384 |
+
* needs sign change
|
| 385 |
+
"""
|
| 386 |
+
maxsteps = 30
|
| 387 |
+
|
| 388 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 389 |
+
self.ctx = ctx
|
| 390 |
+
if len(x0) != 2:
|
| 391 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 392 |
+
self.a = x0[0]
|
| 393 |
+
self.b = x0[1]
|
| 394 |
+
self.f = f
|
| 395 |
+
self.tol = kwargs['tol']
|
| 396 |
+
self.verbose = kwargs['verbose']
|
| 397 |
+
self.method = kwargs.get('method', 'illinois')
|
| 398 |
+
self.getm = _getm(self.method)
|
| 399 |
+
if self.verbose:
|
| 400 |
+
print('using %s method' % self.method)
|
| 401 |
+
|
| 402 |
+
def __iter__(self):
|
| 403 |
+
method = self.method
|
| 404 |
+
f = self.f
|
| 405 |
+
a = self.a
|
| 406 |
+
b = self.b
|
| 407 |
+
fa = f(a)
|
| 408 |
+
fb = f(b)
|
| 409 |
+
m = None
|
| 410 |
+
while True:
|
| 411 |
+
l = b - a
|
| 412 |
+
if l == 0:
|
| 413 |
+
break
|
| 414 |
+
s = (fb - fa) / l
|
| 415 |
+
z = a - fa/s
|
| 416 |
+
fz = f(z)
|
| 417 |
+
if abs(fz) < self.tol:
|
| 418 |
+
# TODO: better condition (when f is very flat)
|
| 419 |
+
if self.verbose:
|
| 420 |
+
print('canceled with z =', z)
|
| 421 |
+
yield z, l
|
| 422 |
+
break
|
| 423 |
+
if fz * fb < 0: # root in [z, b]
|
| 424 |
+
a = b
|
| 425 |
+
fa = fb
|
| 426 |
+
b = z
|
| 427 |
+
fb = fz
|
| 428 |
+
else: # root in [a, z]
|
| 429 |
+
m = self.getm(fz, fb)
|
| 430 |
+
b = z
|
| 431 |
+
fb = fz
|
| 432 |
+
fa = m*fa # scale down to ensure convergence
|
| 433 |
+
if self.verbose and m and not method == 'illinois':
|
| 434 |
+
print('m:', m)
|
| 435 |
+
yield (a + b)/2, abs(l)
|
| 436 |
+
|
| 437 |
+
def Pegasus(*args, **kwargs):
|
| 438 |
+
"""
|
| 439 |
+
1d-solver generating pairs of approximative root and error.
|
| 440 |
+
|
| 441 |
+
Uses Pegasus method to find a root of f in [a, b].
|
| 442 |
+
Wrapper for illinois to use method='pegasus'.
|
| 443 |
+
"""
|
| 444 |
+
kwargs['method'] = 'pegasus'
|
| 445 |
+
return Illinois(*args, **kwargs)
|
| 446 |
+
|
| 447 |
+
def Anderson(*args, **kwargs):
|
| 448 |
+
"""
|
| 449 |
+
1d-solver generating pairs of approximative root and error.
|
| 450 |
+
|
| 451 |
+
Uses Anderson-Bjoerk method to find a root of f in [a, b].
|
| 452 |
+
Wrapper for illinois to use method='pegasus'.
|
| 453 |
+
"""
|
| 454 |
+
kwargs['method'] = 'anderson'
|
| 455 |
+
return Illinois(*args, **kwargs)
|
| 456 |
+
|
| 457 |
+
# TODO: check whether it's possible to combine it with Illinois stuff
|
| 458 |
+
class Ridder:
|
| 459 |
+
"""
|
| 460 |
+
1d-solver generating pairs of approximative root and error.
|
| 461 |
+
|
| 462 |
+
Ridders' method to find a root of f in [a, b].
|
| 463 |
+
Is told to perform as well as Brent's method while being simpler.
|
| 464 |
+
|
| 465 |
+
Pro:
|
| 466 |
+
|
| 467 |
+
* very fast
|
| 468 |
+
* simpler than Brent's method
|
| 469 |
+
|
| 470 |
+
Contra:
|
| 471 |
+
|
| 472 |
+
* two function evaluations per step
|
| 473 |
+
* has problems with multiple roots
|
| 474 |
+
* needs sign change
|
| 475 |
+
|
| 476 |
+
http://en.wikipedia.org/wiki/Ridders'_method
|
| 477 |
+
"""
|
| 478 |
+
maxsteps = 30
|
| 479 |
+
|
| 480 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 481 |
+
self.ctx = ctx
|
| 482 |
+
self.f = f
|
| 483 |
+
if len(x0) != 2:
|
| 484 |
+
raise ValueError('expected interval of 2 points, got %i' % len(x0))
|
| 485 |
+
self.x1 = x0[0]
|
| 486 |
+
self.x2 = x0[1]
|
| 487 |
+
self.verbose = kwargs['verbose']
|
| 488 |
+
self.tol = kwargs['tol']
|
| 489 |
+
|
| 490 |
+
def __iter__(self):
|
| 491 |
+
ctx = self.ctx
|
| 492 |
+
f = self.f
|
| 493 |
+
x1 = self.x1
|
| 494 |
+
fx1 = f(x1)
|
| 495 |
+
x2 = self.x2
|
| 496 |
+
fx2 = f(x2)
|
| 497 |
+
while True:
|
| 498 |
+
x3 = 0.5*(x1 + x2)
|
| 499 |
+
fx3 = f(x3)
|
| 500 |
+
x4 = x3 + (x3 - x1) * ctx.sign(fx1 - fx2) * fx3 / ctx.sqrt(fx3**2 - fx1*fx2)
|
| 501 |
+
fx4 = f(x4)
|
| 502 |
+
if abs(fx4) < self.tol:
|
| 503 |
+
# TODO: better condition (when f is very flat)
|
| 504 |
+
if self.verbose:
|
| 505 |
+
print('canceled with f(x4) =', fx4)
|
| 506 |
+
yield x4, abs(x1 - x2)
|
| 507 |
+
break
|
| 508 |
+
if fx4 * fx2 < 0: # root in [x4, x2]
|
| 509 |
+
x1 = x4
|
| 510 |
+
fx1 = fx4
|
| 511 |
+
else: # root in [x1, x4]
|
| 512 |
+
x2 = x4
|
| 513 |
+
fx2 = fx4
|
| 514 |
+
error = abs(x1 - x2)
|
| 515 |
+
yield (x1 + x2)/2, error
|
| 516 |
+
|
| 517 |
+
class ANewton:
|
| 518 |
+
"""
|
| 519 |
+
EXPERIMENTAL 1d-solver generating pairs of approximative root and error.
|
| 520 |
+
|
| 521 |
+
Uses Newton's method modified to use Steffensens method when convergence is
|
| 522 |
+
slow. (I.e. for multiple roots.)
|
| 523 |
+
"""
|
| 524 |
+
maxsteps = 20
|
| 525 |
+
|
| 526 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 527 |
+
self.ctx = ctx
|
| 528 |
+
if not len(x0) == 1:
|
| 529 |
+
raise ValueError('expected 1 starting point, got %i' % len(x0))
|
| 530 |
+
self.x0 = x0[0]
|
| 531 |
+
self.f = f
|
| 532 |
+
if not 'df' in kwargs:
|
| 533 |
+
def df(x):
|
| 534 |
+
return self.ctx.diff(f, x)
|
| 535 |
+
else:
|
| 536 |
+
df = kwargs['df']
|
| 537 |
+
self.df = df
|
| 538 |
+
def phi(x):
|
| 539 |
+
return x - f(x) / df(x)
|
| 540 |
+
self.phi = phi
|
| 541 |
+
self.verbose = kwargs['verbose']
|
| 542 |
+
|
| 543 |
+
def __iter__(self):
|
| 544 |
+
x0 = self.x0
|
| 545 |
+
f = self.f
|
| 546 |
+
df = self.df
|
| 547 |
+
phi = self.phi
|
| 548 |
+
error = 0
|
| 549 |
+
counter = 0
|
| 550 |
+
while True:
|
| 551 |
+
prevx = x0
|
| 552 |
+
try:
|
| 553 |
+
x0 = phi(x0)
|
| 554 |
+
except ZeroDivisionError:
|
| 555 |
+
if self.verbose:
|
| 556 |
+
print('ZeroDivisionError: canceled with x =', x0)
|
| 557 |
+
break
|
| 558 |
+
preverror = error
|
| 559 |
+
error = abs(prevx - x0)
|
| 560 |
+
# TODO: decide not to use convergence acceleration
|
| 561 |
+
if error and abs(error - preverror) / error < 1:
|
| 562 |
+
if self.verbose:
|
| 563 |
+
print('converging slowly')
|
| 564 |
+
counter += 1
|
| 565 |
+
if counter >= 3:
|
| 566 |
+
# accelerate convergence
|
| 567 |
+
phi = steffensen(phi)
|
| 568 |
+
counter = 0
|
| 569 |
+
if self.verbose:
|
| 570 |
+
print('accelerating convergence')
|
| 571 |
+
yield x0, error
|
| 572 |
+
|
| 573 |
+
# TODO: add Brent
|
| 574 |
+
|
| 575 |
+
############################
|
| 576 |
+
# MULTIDIMENSIONAL SOLVERS #
|
| 577 |
+
############################
|
| 578 |
+
|
| 579 |
+
def jacobian(ctx, f, x):
|
| 580 |
+
"""
|
| 581 |
+
Calculate the Jacobian matrix of a function at the point x0.
|
| 582 |
+
|
| 583 |
+
This is the first derivative of a vectorial function:
|
| 584 |
+
|
| 585 |
+
f : R^m -> R^n with m >= n
|
| 586 |
+
"""
|
| 587 |
+
x = ctx.matrix(x)
|
| 588 |
+
h = ctx.sqrt(ctx.eps)
|
| 589 |
+
fx = ctx.matrix(f(*x))
|
| 590 |
+
m = len(fx)
|
| 591 |
+
n = len(x)
|
| 592 |
+
J = ctx.matrix(m, n)
|
| 593 |
+
for j in xrange(n):
|
| 594 |
+
xj = x.copy()
|
| 595 |
+
xj[j] += h
|
| 596 |
+
Jj = (ctx.matrix(f(*xj)) - fx) / h
|
| 597 |
+
for i in xrange(m):
|
| 598 |
+
J[i,j] = Jj[i]
|
| 599 |
+
return J
|
| 600 |
+
|
| 601 |
+
# TODO: test with user-specified jacobian matrix
|
| 602 |
+
class MDNewton:
|
| 603 |
+
"""
|
| 604 |
+
Find the root of a vector function numerically using Newton's method.
|
| 605 |
+
|
| 606 |
+
f is a vector function representing a nonlinear equation system.
|
| 607 |
+
|
| 608 |
+
x0 is the starting point close to the root.
|
| 609 |
+
|
| 610 |
+
J is a function returning the Jacobian matrix for a point.
|
| 611 |
+
|
| 612 |
+
Supports overdetermined systems.
|
| 613 |
+
|
| 614 |
+
Use the 'norm' keyword to specify which norm to use. Defaults to max-norm.
|
| 615 |
+
The function to calculate the Jacobian matrix can be given using the
|
| 616 |
+
keyword 'J'. Otherwise it will be calculated numerically.
|
| 617 |
+
|
| 618 |
+
Please note that this method converges only locally. Especially for high-
|
| 619 |
+
dimensional systems it is not trivial to find a good starting point being
|
| 620 |
+
close enough to the root.
|
| 621 |
+
|
| 622 |
+
It is recommended to use a faster, low-precision solver from SciPy [1] or
|
| 623 |
+
OpenOpt [2] to get an initial guess. Afterwards you can use this method for
|
| 624 |
+
root-polishing to any precision.
|
| 625 |
+
|
| 626 |
+
[1] http://scipy.org
|
| 627 |
+
|
| 628 |
+
[2] http://openopt.org/Welcome
|
| 629 |
+
"""
|
| 630 |
+
maxsteps = 10
|
| 631 |
+
|
| 632 |
+
def __init__(self, ctx, f, x0, **kwargs):
|
| 633 |
+
self.ctx = ctx
|
| 634 |
+
self.f = f
|
| 635 |
+
if isinstance(x0, (tuple, list)):
|
| 636 |
+
x0 = ctx.matrix(x0)
|
| 637 |
+
assert x0.cols == 1, 'need a vector'
|
| 638 |
+
self.x0 = x0
|
| 639 |
+
if 'J' in kwargs:
|
| 640 |
+
self.J = kwargs['J']
|
| 641 |
+
else:
|
| 642 |
+
def J(*x):
|
| 643 |
+
return ctx.jacobian(f, x)
|
| 644 |
+
self.J = J
|
| 645 |
+
self.norm = kwargs['norm']
|
| 646 |
+
self.verbose = kwargs['verbose']
|
| 647 |
+
|
| 648 |
+
def __iter__(self):
|
| 649 |
+
f = self.f
|
| 650 |
+
x0 = self.x0
|
| 651 |
+
norm = self.norm
|
| 652 |
+
J = self.J
|
| 653 |
+
fx = self.ctx.matrix(f(*x0))
|
| 654 |
+
fxnorm = norm(fx)
|
| 655 |
+
cancel = False
|
| 656 |
+
while not cancel:
|
| 657 |
+
# get direction of descent
|
| 658 |
+
fxn = -fx
|
| 659 |
+
Jx = J(*x0)
|
| 660 |
+
s = self.ctx.lu_solve(Jx, fxn)
|
| 661 |
+
if self.verbose:
|
| 662 |
+
print('Jx:')
|
| 663 |
+
print(Jx)
|
| 664 |
+
print('s:', s)
|
| 665 |
+
# damping step size TODO: better strategy (hard task)
|
| 666 |
+
l = self.ctx.one
|
| 667 |
+
x1 = x0 + s
|
| 668 |
+
while True:
|
| 669 |
+
if x1 == x0:
|
| 670 |
+
if self.verbose:
|
| 671 |
+
print("canceled, won't get more excact")
|
| 672 |
+
cancel = True
|
| 673 |
+
break
|
| 674 |
+
fx = self.ctx.matrix(f(*x1))
|
| 675 |
+
newnorm = norm(fx)
|
| 676 |
+
if newnorm < fxnorm:
|
| 677 |
+
# new x accepted
|
| 678 |
+
fxnorm = newnorm
|
| 679 |
+
x0 = x1
|
| 680 |
+
break
|
| 681 |
+
l /= 2
|
| 682 |
+
x1 = x0 + l*s
|
| 683 |
+
yield (x0, fxnorm)
|
| 684 |
+
|
| 685 |
+
#############
|
| 686 |
+
# UTILITIES #
|
| 687 |
+
#############
|
| 688 |
+
|
| 689 |
+
str2solver = {'newton':Newton, 'secant':Secant, 'mnewton':MNewton,
|
| 690 |
+
'halley':Halley, 'muller':Muller, 'bisect':Bisection,
|
| 691 |
+
'illinois':Illinois, 'pegasus':Pegasus, 'anderson':Anderson,
|
| 692 |
+
'ridder':Ridder, 'anewton':ANewton, 'mdnewton':MDNewton}
|
| 693 |
+
|
| 694 |
+
def findroot(ctx, f, x0, solver='secant', tol=None, verbose=False, verify=True, **kwargs):
|
| 695 |
+
r"""
|
| 696 |
+
Find an approximate solution to `f(x) = 0`, using *x0* as starting point or
|
| 697 |
+
interval for *x*.
|
| 698 |
+
|
| 699 |
+
Multidimensional overdetermined systems are supported.
|
| 700 |
+
You can specify them using a function or a list of functions.
|
| 701 |
+
|
| 702 |
+
Mathematically speaking, this function returns `x` such that
|
| 703 |
+
`|f(x)|^2 \leq \mathrm{tol}` is true within the current working precision.
|
| 704 |
+
If the computed value does not meet this criterion, an exception is raised.
|
| 705 |
+
This exception can be disabled with *verify=False*.
|
| 706 |
+
|
| 707 |
+
For interval arithmetic (``iv.findroot()``), please note that
|
| 708 |
+
the returned interval ``x`` is not guaranteed to contain `f(x)=0`!
|
| 709 |
+
It is only some `x` for which `|f(x)|^2 \leq \mathrm{tol}` certainly holds
|
| 710 |
+
regardless of numerical error. This may be improved in the future.
|
| 711 |
+
|
| 712 |
+
**Arguments**
|
| 713 |
+
|
| 714 |
+
*f*
|
| 715 |
+
one dimensional function
|
| 716 |
+
*x0*
|
| 717 |
+
starting point, several starting points or interval (depends on solver)
|
| 718 |
+
*tol*
|
| 719 |
+
the returned solution has an error smaller than this
|
| 720 |
+
*verbose*
|
| 721 |
+
print additional information for each iteration if true
|
| 722 |
+
*verify*
|
| 723 |
+
verify the solution and raise a ValueError if `|f(x)|^2 > \mathrm{tol}`
|
| 724 |
+
*solver*
|
| 725 |
+
a generator for *f* and *x0* returning approximative solution and error
|
| 726 |
+
*maxsteps*
|
| 727 |
+
after how many steps the solver will cancel
|
| 728 |
+
*df*
|
| 729 |
+
first derivative of *f* (used by some solvers)
|
| 730 |
+
*d2f*
|
| 731 |
+
second derivative of *f* (used by some solvers)
|
| 732 |
+
*multidimensional*
|
| 733 |
+
force multidimensional solving
|
| 734 |
+
*J*
|
| 735 |
+
Jacobian matrix of *f* (used by multidimensional solvers)
|
| 736 |
+
*norm*
|
| 737 |
+
used vector norm (used by multidimensional solvers)
|
| 738 |
+
|
| 739 |
+
solver has to be callable with ``(f, x0, **kwargs)`` and return an generator
|
| 740 |
+
yielding pairs of approximative solution and estimated error (which is
|
| 741 |
+
expected to be positive).
|
| 742 |
+
You can use the following string aliases:
|
| 743 |
+
'secant', 'mnewton', 'halley', 'muller', 'illinois', 'pegasus', 'anderson',
|
| 744 |
+
'ridder', 'anewton', 'bisect'
|
| 745 |
+
|
| 746 |
+
See mpmath.calculus.optimization for their documentation.
|
| 747 |
+
|
| 748 |
+
**Examples**
|
| 749 |
+
|
| 750 |
+
The function :func:`~mpmath.findroot` locates a root of a given function using the
|
| 751 |
+
secant method by default. A simple example use of the secant method is to
|
| 752 |
+
compute `\pi` as the root of `\sin x` closest to `x_0 = 3`::
|
| 753 |
+
|
| 754 |
+
>>> from mpmath import *
|
| 755 |
+
>>> mp.dps = 30; mp.pretty = True
|
| 756 |
+
>>> findroot(sin, 3)
|
| 757 |
+
3.14159265358979323846264338328
|
| 758 |
+
|
| 759 |
+
The secant method can be used to find complex roots of analytic functions,
|
| 760 |
+
although it must in that case generally be given a nonreal starting value
|
| 761 |
+
(or else it will never leave the real line)::
|
| 762 |
+
|
| 763 |
+
>>> mp.dps = 15
|
| 764 |
+
>>> findroot(lambda x: x**3 + 2*x + 1, j)
|
| 765 |
+
(0.226698825758202 + 1.46771150871022j)
|
| 766 |
+
|
| 767 |
+
A nice application is to compute nontrivial roots of the Riemann zeta
|
| 768 |
+
function with many digits (good initial values are needed for convergence)::
|
| 769 |
+
|
| 770 |
+
>>> mp.dps = 30
|
| 771 |
+
>>> findroot(zeta, 0.5+14j)
|
| 772 |
+
(0.5 + 14.1347251417346937904572519836j)
|
| 773 |
+
|
| 774 |
+
The secant method can also be used as an optimization algorithm, by passing
|
| 775 |
+
it a derivative of a function. The following example locates the positive
|
| 776 |
+
minimum of the gamma function::
|
| 777 |
+
|
| 778 |
+
>>> mp.dps = 20
|
| 779 |
+
>>> findroot(lambda x: diff(gamma, x), 1)
|
| 780 |
+
1.4616321449683623413
|
| 781 |
+
|
| 782 |
+
Finally, a useful application is to compute inverse functions, such as the
|
| 783 |
+
Lambert W function which is the inverse of `w e^w`, given the first
|
| 784 |
+
term of the solution's asymptotic expansion as the initial value. In basic
|
| 785 |
+
cases, this gives identical results to mpmath's built-in ``lambertw``
|
| 786 |
+
function::
|
| 787 |
+
|
| 788 |
+
>>> def lambert(x):
|
| 789 |
+
... return findroot(lambda w: w*exp(w) - x, log(1+x))
|
| 790 |
+
...
|
| 791 |
+
>>> mp.dps = 15
|
| 792 |
+
>>> lambert(1); lambertw(1)
|
| 793 |
+
0.567143290409784
|
| 794 |
+
0.567143290409784
|
| 795 |
+
>>> lambert(1000); lambert(1000)
|
| 796 |
+
5.2496028524016
|
| 797 |
+
5.2496028524016
|
| 798 |
+
|
| 799 |
+
Multidimensional functions are also supported::
|
| 800 |
+
|
| 801 |
+
>>> f = [lambda x1, x2: x1**2 + x2,
|
| 802 |
+
... lambda x1, x2: 5*x1**2 - 3*x1 + 2*x2 - 3]
|
| 803 |
+
>>> findroot(f, (0, 0))
|
| 804 |
+
[-0.618033988749895]
|
| 805 |
+
[-0.381966011250105]
|
| 806 |
+
>>> findroot(f, (10, 10))
|
| 807 |
+
[ 1.61803398874989]
|
| 808 |
+
[-2.61803398874989]
|
| 809 |
+
|
| 810 |
+
You can verify this by solving the system manually.
|
| 811 |
+
|
| 812 |
+
Please note that the following (more general) syntax also works::
|
| 813 |
+
|
| 814 |
+
>>> def f(x1, x2):
|
| 815 |
+
... return x1**2 + x2, 5*x1**2 - 3*x1 + 2*x2 - 3
|
| 816 |
+
...
|
| 817 |
+
>>> findroot(f, (0, 0))
|
| 818 |
+
[-0.618033988749895]
|
| 819 |
+
[-0.381966011250105]
|
| 820 |
+
|
| 821 |
+
|
| 822 |
+
**Multiple roots**
|
| 823 |
+
|
| 824 |
+
For multiple roots all methods of the Newtonian family (including secant)
|
| 825 |
+
converge slowly. Consider this example::
|
| 826 |
+
|
| 827 |
+
>>> f = lambda x: (x - 1)**99
|
| 828 |
+
>>> findroot(f, 0.9, verify=False)
|
| 829 |
+
0.918073542444929
|
| 830 |
+
|
| 831 |
+
Even for a very close starting point the secant method converges very
|
| 832 |
+
slowly. Use ``verbose=True`` to illustrate this.
|
| 833 |
+
|
| 834 |
+
It is possible to modify Newton's method to make it converge regardless of
|
| 835 |
+
the root's multiplicity::
|
| 836 |
+
|
| 837 |
+
>>> findroot(f, -10, solver='mnewton')
|
| 838 |
+
1.0
|
| 839 |
+
|
| 840 |
+
This variant uses the first and second derivative of the function, which is
|
| 841 |
+
not very efficient.
|
| 842 |
+
|
| 843 |
+
Alternatively you can use an experimental Newtonian solver that keeps track
|
| 844 |
+
of the speed of convergence and accelerates it using Steffensen's method if
|
| 845 |
+
necessary::
|
| 846 |
+
|
| 847 |
+
>>> findroot(f, -10, solver='anewton', verbose=True)
|
| 848 |
+
x: -9.88888888888888888889
|
| 849 |
+
error: 0.111111111111111111111
|
| 850 |
+
converging slowly
|
| 851 |
+
x: -9.77890011223344556678
|
| 852 |
+
error: 0.10998877665544332211
|
| 853 |
+
converging slowly
|
| 854 |
+
x: -9.67002233332199662166
|
| 855 |
+
error: 0.108877778911448945119
|
| 856 |
+
converging slowly
|
| 857 |
+
accelerating convergence
|
| 858 |
+
x: -9.5622443299551077669
|
| 859 |
+
error: 0.107778003366888854764
|
| 860 |
+
converging slowly
|
| 861 |
+
x: 0.99999999999999999214
|
| 862 |
+
error: 10.562244329955107759
|
| 863 |
+
x: 1.0
|
| 864 |
+
error: 7.8598304758094664213e-18
|
| 865 |
+
ZeroDivisionError: canceled with x = 1.0
|
| 866 |
+
1.0
|
| 867 |
+
|
| 868 |
+
**Complex roots**
|
| 869 |
+
|
| 870 |
+
For complex roots it's recommended to use Muller's method as it converges
|
| 871 |
+
even for real starting points very fast::
|
| 872 |
+
|
| 873 |
+
>>> findroot(lambda x: x**4 + x + 1, (0, 1, 2), solver='muller')
|
| 874 |
+
(0.727136084491197 + 0.934099289460529j)
|
| 875 |
+
|
| 876 |
+
|
| 877 |
+
**Intersection methods**
|
| 878 |
+
|
| 879 |
+
When you need to find a root in a known interval, it's highly recommended to
|
| 880 |
+
use an intersection-based solver like ``'anderson'`` or ``'ridder'``.
|
| 881 |
+
Usually they converge faster and more reliable. They have however problems
|
| 882 |
+
with multiple roots and usually need a sign change to find a root::
|
| 883 |
+
|
| 884 |
+
>>> findroot(lambda x: x**3, (-1, 1), solver='anderson')
|
| 885 |
+
0.0
|
| 886 |
+
|
| 887 |
+
Be careful with symmetric functions::
|
| 888 |
+
|
| 889 |
+
>>> findroot(lambda x: x**2, (-1, 1), solver='anderson') #doctest:+ELLIPSIS
|
| 890 |
+
Traceback (most recent call last):
|
| 891 |
+
...
|
| 892 |
+
ZeroDivisionError
|
| 893 |
+
|
| 894 |
+
It fails even for better starting points, because there is no sign change::
|
| 895 |
+
|
| 896 |
+
>>> findroot(lambda x: x**2, (-1, .5), solver='anderson')
|
| 897 |
+
Traceback (most recent call last):
|
| 898 |
+
...
|
| 899 |
+
ValueError: Could not find root within given tolerance. (1.0 > 2.16840434497100886801e-19)
|
| 900 |
+
Try another starting point or tweak arguments.
|
| 901 |
+
|
| 902 |
+
"""
|
| 903 |
+
prec = ctx.prec
|
| 904 |
+
try:
|
| 905 |
+
ctx.prec += 20
|
| 906 |
+
|
| 907 |
+
# initialize arguments
|
| 908 |
+
if tol is None:
|
| 909 |
+
tol = ctx.eps * 2**10
|
| 910 |
+
|
| 911 |
+
kwargs['verbose'] = kwargs.get('verbose', verbose)
|
| 912 |
+
|
| 913 |
+
if 'd1f' in kwargs:
|
| 914 |
+
kwargs['df'] = kwargs['d1f']
|
| 915 |
+
|
| 916 |
+
kwargs['tol'] = tol
|
| 917 |
+
if isinstance(x0, (list, tuple)):
|
| 918 |
+
x0 = [ctx.convert(x) for x in x0]
|
| 919 |
+
else:
|
| 920 |
+
x0 = [ctx.convert(x0)]
|
| 921 |
+
|
| 922 |
+
if isinstance(solver, str):
|
| 923 |
+
try:
|
| 924 |
+
solver = str2solver[solver]
|
| 925 |
+
except KeyError:
|
| 926 |
+
raise ValueError('could not recognize solver')
|
| 927 |
+
|
| 928 |
+
# accept list of functions
|
| 929 |
+
if isinstance(f, (list, tuple)):
|
| 930 |
+
f2 = copy(f)
|
| 931 |
+
def tmp(*args):
|
| 932 |
+
return [fn(*args) for fn in f2]
|
| 933 |
+
f = tmp
|
| 934 |
+
|
| 935 |
+
# detect multidimensional functions
|
| 936 |
+
try:
|
| 937 |
+
fx = f(*x0)
|
| 938 |
+
multidimensional = isinstance(fx, (list, tuple, ctx.matrix))
|
| 939 |
+
except TypeError:
|
| 940 |
+
fx = f(x0[0])
|
| 941 |
+
multidimensional = False
|
| 942 |
+
if 'multidimensional' in kwargs:
|
| 943 |
+
multidimensional = kwargs['multidimensional']
|
| 944 |
+
if multidimensional:
|
| 945 |
+
# only one multidimensional solver available at the moment
|
| 946 |
+
solver = MDNewton
|
| 947 |
+
if not 'norm' in kwargs:
|
| 948 |
+
norm = lambda x: ctx.norm(x, 'inf')
|
| 949 |
+
kwargs['norm'] = norm
|
| 950 |
+
else:
|
| 951 |
+
norm = kwargs['norm']
|
| 952 |
+
else:
|
| 953 |
+
norm = abs
|
| 954 |
+
|
| 955 |
+
# happily return starting point if it's a root
|
| 956 |
+
if norm(fx) == 0:
|
| 957 |
+
if multidimensional:
|
| 958 |
+
return ctx.matrix(x0)
|
| 959 |
+
else:
|
| 960 |
+
return x0[0]
|
| 961 |
+
|
| 962 |
+
# use solver
|
| 963 |
+
iterations = solver(ctx, f, x0, **kwargs)
|
| 964 |
+
if 'maxsteps' in kwargs:
|
| 965 |
+
maxsteps = kwargs['maxsteps']
|
| 966 |
+
else:
|
| 967 |
+
maxsteps = iterations.maxsteps
|
| 968 |
+
i = 0
|
| 969 |
+
for x, error in iterations:
|
| 970 |
+
if verbose:
|
| 971 |
+
print('x: ', x)
|
| 972 |
+
print('error:', error)
|
| 973 |
+
i += 1
|
| 974 |
+
if error < tol * max(1, norm(x)) or i >= maxsteps:
|
| 975 |
+
break
|
| 976 |
+
else:
|
| 977 |
+
if not i:
|
| 978 |
+
raise ValueError('Could not find root using the given solver.\n'
|
| 979 |
+
'Try another starting point or tweak arguments.')
|
| 980 |
+
if not isinstance(x, (list, tuple, ctx.matrix)):
|
| 981 |
+
xl = [x]
|
| 982 |
+
else:
|
| 983 |
+
xl = x
|
| 984 |
+
if verify and norm(f(*xl))**2 > tol: # TODO: better condition?
|
| 985 |
+
raise ValueError('Could not find root within given tolerance. '
|
| 986 |
+
'(%s > %s)\n'
|
| 987 |
+
'Try another starting point or tweak arguments.'
|
| 988 |
+
% (norm(f(*xl))**2, tol))
|
| 989 |
+
return x
|
| 990 |
+
finally:
|
| 991 |
+
ctx.prec = prec
|
| 992 |
+
|
| 993 |
+
|
| 994 |
+
def multiplicity(ctx, f, root, tol=None, maxsteps=10, **kwargs):
|
| 995 |
+
"""
|
| 996 |
+
Return the multiplicity of a given root of f.
|
| 997 |
+
|
| 998 |
+
Internally, numerical derivatives are used. This might be inefficient for
|
| 999 |
+
higher order derviatives. Due to this, ``multiplicity`` cancels after
|
| 1000 |
+
evaluating 10 derivatives by default. You can be specify the n-th derivative
|
| 1001 |
+
using the dnf keyword.
|
| 1002 |
+
|
| 1003 |
+
>>> from mpmath import *
|
| 1004 |
+
>>> multiplicity(lambda x: sin(x) - 1, pi/2)
|
| 1005 |
+
2
|
| 1006 |
+
|
| 1007 |
+
"""
|
| 1008 |
+
if tol is None:
|
| 1009 |
+
tol = ctx.eps ** 0.8
|
| 1010 |
+
kwargs['d0f'] = f
|
| 1011 |
+
for i in xrange(maxsteps):
|
| 1012 |
+
dfstr = 'd' + str(i) + 'f'
|
| 1013 |
+
if dfstr in kwargs:
|
| 1014 |
+
df = kwargs[dfstr]
|
| 1015 |
+
else:
|
| 1016 |
+
df = lambda x: ctx.diff(f, x, i)
|
| 1017 |
+
if not abs(df(root)) < tol:
|
| 1018 |
+
break
|
| 1019 |
+
return i
|
| 1020 |
+
|
| 1021 |
+
def steffensen(f):
|
| 1022 |
+
"""
|
| 1023 |
+
linear convergent function -> quadratic convergent function
|
| 1024 |
+
|
| 1025 |
+
Steffensen's method for quadratic convergence of a linear converging
|
| 1026 |
+
sequence.
|
| 1027 |
+
Don not use it for higher rates of convergence.
|
| 1028 |
+
It may even work for divergent sequences.
|
| 1029 |
+
|
| 1030 |
+
Definition:
|
| 1031 |
+
F(x) = (x*f(f(x)) - f(x)**2) / (f(f(x)) - 2*f(x) + x)
|
| 1032 |
+
|
| 1033 |
+
Example
|
| 1034 |
+
.......
|
| 1035 |
+
|
| 1036 |
+
You can use Steffensen's method to accelerate a fixpoint iteration of linear
|
| 1037 |
+
(or less) convergence.
|
| 1038 |
+
|
| 1039 |
+
x* is a fixpoint of the iteration x_{k+1} = phi(x_k) if x* = phi(x*). For
|
| 1040 |
+
phi(x) = x**2 there are two fixpoints: 0 and 1.
|
| 1041 |
+
|
| 1042 |
+
Let's try Steffensen's method:
|
| 1043 |
+
|
| 1044 |
+
>>> f = lambda x: x**2
|
| 1045 |
+
>>> from mpmath.calculus.optimization import steffensen
|
| 1046 |
+
>>> F = steffensen(f)
|
| 1047 |
+
>>> for x in [0.5, 0.9, 2.0]:
|
| 1048 |
+
... fx = Fx = x
|
| 1049 |
+
... for i in xrange(9):
|
| 1050 |
+
... try:
|
| 1051 |
+
... fx = f(fx)
|
| 1052 |
+
... except OverflowError:
|
| 1053 |
+
... pass
|
| 1054 |
+
... try:
|
| 1055 |
+
... Fx = F(Fx)
|
| 1056 |
+
... except ZeroDivisionError:
|
| 1057 |
+
... pass
|
| 1058 |
+
... print('%20g %20g' % (fx, Fx))
|
| 1059 |
+
0.25 -0.5
|
| 1060 |
+
0.0625 0.1
|
| 1061 |
+
0.00390625 -0.0011236
|
| 1062 |
+
1.52588e-05 1.41691e-09
|
| 1063 |
+
2.32831e-10 -2.84465e-27
|
| 1064 |
+
5.42101e-20 2.30189e-80
|
| 1065 |
+
2.93874e-39 -1.2197e-239
|
| 1066 |
+
8.63617e-78 0
|
| 1067 |
+
7.45834e-155 0
|
| 1068 |
+
0.81 1.02676
|
| 1069 |
+
0.6561 1.00134
|
| 1070 |
+
0.430467 1
|
| 1071 |
+
0.185302 1
|
| 1072 |
+
0.0343368 1
|
| 1073 |
+
0.00117902 1
|
| 1074 |
+
1.39008e-06 1
|
| 1075 |
+
1.93233e-12 1
|
| 1076 |
+
3.73392e-24 1
|
| 1077 |
+
4 1.6
|
| 1078 |
+
16 1.2962
|
| 1079 |
+
256 1.10194
|
| 1080 |
+
65536 1.01659
|
| 1081 |
+
4.29497e+09 1.00053
|
| 1082 |
+
1.84467e+19 1
|
| 1083 |
+
3.40282e+38 1
|
| 1084 |
+
1.15792e+77 1
|
| 1085 |
+
1.34078e+154 1
|
| 1086 |
+
|
| 1087 |
+
Unmodified, the iteration converges only towards 0. Modified it converges
|
| 1088 |
+
not only much faster, it converges even to the repelling fixpoint 1.
|
| 1089 |
+
"""
|
| 1090 |
+
def F(x):
|
| 1091 |
+
fx = f(x)
|
| 1092 |
+
ffx = f(fx)
|
| 1093 |
+
return (x*ffx - fx**2) / (ffx - 2*fx + x)
|
| 1094 |
+
return F
|
| 1095 |
+
|
| 1096 |
+
OptimizationMethods.jacobian = jacobian
|
| 1097 |
+
OptimizationMethods.findroot = findroot
|
| 1098 |
+
OptimizationMethods.multiplicity = multiplicity
|
| 1099 |
+
|
| 1100 |
+
if __name__ == '__main__':
|
| 1101 |
+
import doctest
|
| 1102 |
+
doctest.testmod()
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/polynomials.py
ADDED
|
@@ -0,0 +1,213 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from ..libmp.backend import xrange
|
| 2 |
+
from .calculus import defun
|
| 3 |
+
|
| 4 |
+
#----------------------------------------------------------------------------#
|
| 5 |
+
# Polynomials #
|
| 6 |
+
#----------------------------------------------------------------------------#
|
| 7 |
+
|
| 8 |
+
# XXX: extra precision
|
| 9 |
+
@defun
|
| 10 |
+
def polyval(ctx, coeffs, x, derivative=False):
|
| 11 |
+
r"""
|
| 12 |
+
Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
|
| 13 |
+
:func:`~mpmath.polyval` evaluates the polynomial
|
| 14 |
+
|
| 15 |
+
.. math ::
|
| 16 |
+
|
| 17 |
+
P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
|
| 18 |
+
|
| 19 |
+
If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
|
| 20 |
+
evaluates `P(x)` with the derivative, `P'(x)`, and returns the
|
| 21 |
+
tuple `(P(x), P'(x))`.
|
| 22 |
+
|
| 23 |
+
>>> from mpmath import *
|
| 24 |
+
>>> mp.pretty = True
|
| 25 |
+
>>> polyval([3, 0, 2], 0.5)
|
| 26 |
+
2.75
|
| 27 |
+
>>> polyval([3, 0, 2], 0.5, derivative=True)
|
| 28 |
+
(2.75, 3.0)
|
| 29 |
+
|
| 30 |
+
The coefficients and the evaluation point may be any combination
|
| 31 |
+
of real or complex numbers.
|
| 32 |
+
"""
|
| 33 |
+
if not coeffs:
|
| 34 |
+
return ctx.zero
|
| 35 |
+
p = ctx.convert(coeffs[0])
|
| 36 |
+
q = ctx.zero
|
| 37 |
+
for c in coeffs[1:]:
|
| 38 |
+
if derivative:
|
| 39 |
+
q = p + x*q
|
| 40 |
+
p = c + x*p
|
| 41 |
+
if derivative:
|
| 42 |
+
return p, q
|
| 43 |
+
else:
|
| 44 |
+
return p
|
| 45 |
+
|
| 46 |
+
@defun
|
| 47 |
+
def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
|
| 48 |
+
error=False, roots_init=None):
|
| 49 |
+
"""
|
| 50 |
+
Computes all roots (real or complex) of a given polynomial.
|
| 51 |
+
|
| 52 |
+
The roots are returned as a sorted list, where real roots appear first
|
| 53 |
+
followed by complex conjugate roots as adjacent elements. The polynomial
|
| 54 |
+
should be given as a list of coefficients, in the format used by
|
| 55 |
+
:func:`~mpmath.polyval`. The leading coefficient must be nonzero.
|
| 56 |
+
|
| 57 |
+
With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
|
| 58 |
+
where *err* is an estimate of the maximum error among the computed roots.
|
| 59 |
+
|
| 60 |
+
**Examples**
|
| 61 |
+
|
| 62 |
+
Finding the three real roots of `x^3 - x^2 - 14x + 24`::
|
| 63 |
+
|
| 64 |
+
>>> from mpmath import *
|
| 65 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 66 |
+
>>> nprint(polyroots([1,-1,-14,24]), 4)
|
| 67 |
+
[-4.0, 2.0, 3.0]
|
| 68 |
+
|
| 69 |
+
Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
|
| 70 |
+
error estimate::
|
| 71 |
+
|
| 72 |
+
>>> roots, err = polyroots([4,3,2], error=True)
|
| 73 |
+
>>> for r in roots:
|
| 74 |
+
... print(r)
|
| 75 |
+
...
|
| 76 |
+
(-0.375 + 0.59947894041409j)
|
| 77 |
+
(-0.375 - 0.59947894041409j)
|
| 78 |
+
>>>
|
| 79 |
+
>>> err
|
| 80 |
+
2.22044604925031e-16
|
| 81 |
+
>>>
|
| 82 |
+
>>> polyval([4,3,2], roots[0])
|
| 83 |
+
(2.22044604925031e-16 + 0.0j)
|
| 84 |
+
>>> polyval([4,3,2], roots[1])
|
| 85 |
+
(2.22044604925031e-16 + 0.0j)
|
| 86 |
+
|
| 87 |
+
The following example computes all the 5th roots of unity; that is,
|
| 88 |
+
the roots of `x^5 - 1`::
|
| 89 |
+
|
| 90 |
+
>>> mp.dps = 20
|
| 91 |
+
>>> for r in polyroots([1, 0, 0, 0, 0, -1]):
|
| 92 |
+
... print(r)
|
| 93 |
+
...
|
| 94 |
+
1.0
|
| 95 |
+
(-0.8090169943749474241 + 0.58778525229247312917j)
|
| 96 |
+
(-0.8090169943749474241 - 0.58778525229247312917j)
|
| 97 |
+
(0.3090169943749474241 + 0.95105651629515357212j)
|
| 98 |
+
(0.3090169943749474241 - 0.95105651629515357212j)
|
| 99 |
+
|
| 100 |
+
**Precision and conditioning**
|
| 101 |
+
|
| 102 |
+
The roots are computed to the current working precision accuracy. If this
|
| 103 |
+
accuracy cannot be achieved in ``maxsteps`` steps, then a
|
| 104 |
+
``NoConvergence`` exception is raised. The algorithm internally is using
|
| 105 |
+
the current working precision extended by ``extraprec``. If
|
| 106 |
+
``NoConvergence`` was raised, that is caused either by not having enough
|
| 107 |
+
extra precision to achieve convergence (in which case increasing
|
| 108 |
+
``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
|
| 109 |
+
case increasing ``maxsteps`` should fix the problem), or a combination of
|
| 110 |
+
both.
|
| 111 |
+
|
| 112 |
+
The user should always do a convergence study with regards to
|
| 113 |
+
``extraprec`` to ensure accurate results. It is possible to get
|
| 114 |
+
convergence to a wrong answer with too low ``extraprec``.
|
| 115 |
+
|
| 116 |
+
Provided there are no repeated roots, :func:`~mpmath.polyroots` can
|
| 117 |
+
typically compute all roots of an arbitrary polynomial to high precision::
|
| 118 |
+
|
| 119 |
+
>>> mp.dps = 60
|
| 120 |
+
>>> for r in polyroots([1, 0, -10, 0, 1]):
|
| 121 |
+
... print(r)
|
| 122 |
+
...
|
| 123 |
+
-3.14626436994197234232913506571557044551247712918732870123249
|
| 124 |
+
-0.317837245195782244725757617296174288373133378433432554879127
|
| 125 |
+
0.317837245195782244725757617296174288373133378433432554879127
|
| 126 |
+
3.14626436994197234232913506571557044551247712918732870123249
|
| 127 |
+
>>>
|
| 128 |
+
>>> sqrt(3) + sqrt(2)
|
| 129 |
+
3.14626436994197234232913506571557044551247712918732870123249
|
| 130 |
+
>>> sqrt(3) - sqrt(2)
|
| 131 |
+
0.317837245195782244725757617296174288373133378433432554879127
|
| 132 |
+
|
| 133 |
+
**Algorithm**
|
| 134 |
+
|
| 135 |
+
:func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
|
| 136 |
+
uses complex arithmetic to locate all roots simultaneously.
|
| 137 |
+
The Durand-Kerner method can be viewed as approximately performing
|
| 138 |
+
simultaneous Newton iteration for all the roots. In particular,
|
| 139 |
+
the convergence to simple roots is quadratic, just like Newton's
|
| 140 |
+
method.
|
| 141 |
+
|
| 142 |
+
Although all roots are internally calculated using complex arithmetic, any
|
| 143 |
+
root found to have an imaginary part smaller than the estimated numerical
|
| 144 |
+
error is truncated to a real number (small real parts are also chopped).
|
| 145 |
+
Real roots are placed first in the returned list, sorted by value. The
|
| 146 |
+
remaining complex roots are sorted by their real parts so that conjugate
|
| 147 |
+
roots end up next to each other.
|
| 148 |
+
|
| 149 |
+
**References**
|
| 150 |
+
|
| 151 |
+
1. http://en.wikipedia.org/wiki/Durand-Kerner_method
|
| 152 |
+
|
| 153 |
+
"""
|
| 154 |
+
if len(coeffs) <= 1:
|
| 155 |
+
if not coeffs or not coeffs[0]:
|
| 156 |
+
raise ValueError("Input to polyroots must not be the zero polynomial")
|
| 157 |
+
# Constant polynomial with no roots
|
| 158 |
+
return []
|
| 159 |
+
|
| 160 |
+
orig = ctx.prec
|
| 161 |
+
tol = +ctx.eps
|
| 162 |
+
with ctx.extraprec(extraprec):
|
| 163 |
+
deg = len(coeffs) - 1
|
| 164 |
+
# Must be monic
|
| 165 |
+
lead = ctx.convert(coeffs[0])
|
| 166 |
+
if lead == 1:
|
| 167 |
+
coeffs = [ctx.convert(c) for c in coeffs]
|
| 168 |
+
else:
|
| 169 |
+
coeffs = [c/lead for c in coeffs]
|
| 170 |
+
f = lambda x: ctx.polyval(coeffs, x)
|
| 171 |
+
if roots_init is None:
|
| 172 |
+
roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
|
| 173 |
+
else:
|
| 174 |
+
roots = [None]*deg;
|
| 175 |
+
deg_init = min(deg, len(roots_init))
|
| 176 |
+
roots[:deg_init] = list(roots_init[:deg_init])
|
| 177 |
+
roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
|
| 178 |
+
in xrange(deg_init,deg)]
|
| 179 |
+
err = [ctx.one for n in xrange(deg)]
|
| 180 |
+
# Durand-Kerner iteration until convergence
|
| 181 |
+
for step in xrange(maxsteps):
|
| 182 |
+
if abs(max(err)) < tol:
|
| 183 |
+
break
|
| 184 |
+
for i in xrange(deg):
|
| 185 |
+
p = roots[i]
|
| 186 |
+
x = f(p)
|
| 187 |
+
for j in range(deg):
|
| 188 |
+
if i != j:
|
| 189 |
+
try:
|
| 190 |
+
x /= (p-roots[j])
|
| 191 |
+
except ZeroDivisionError:
|
| 192 |
+
continue
|
| 193 |
+
roots[i] = p - x
|
| 194 |
+
err[i] = abs(x)
|
| 195 |
+
if abs(max(err)) >= tol:
|
| 196 |
+
raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
|
| 197 |
+
% maxsteps)
|
| 198 |
+
# Remove small real or imaginary parts
|
| 199 |
+
if cleanup:
|
| 200 |
+
for i in xrange(deg):
|
| 201 |
+
if abs(roots[i]) < tol:
|
| 202 |
+
roots[i] = ctx.zero
|
| 203 |
+
elif abs(ctx._im(roots[i])) < tol:
|
| 204 |
+
roots[i] = roots[i].real
|
| 205 |
+
elif abs(ctx._re(roots[i])) < tol:
|
| 206 |
+
roots[i] = roots[i].imag * 1j
|
| 207 |
+
roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
|
| 208 |
+
if error:
|
| 209 |
+
err = max(err)
|
| 210 |
+
err = max(err, ctx.ldexp(1, -orig+1))
|
| 211 |
+
return [+r for r in roots], +err
|
| 212 |
+
else:
|
| 213 |
+
return [+r for r in roots]
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/calculus/quadrature.py
ADDED
|
@@ -0,0 +1,1115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import math
|
| 2 |
+
|
| 3 |
+
from ..libmp.backend import xrange
|
| 4 |
+
|
| 5 |
+
class QuadratureRule(object):
|
| 6 |
+
"""
|
| 7 |
+
Quadrature rules are implemented using this class, in order to
|
| 8 |
+
simplify the code and provide a common infrastructure
|
| 9 |
+
for tasks such as error estimation and node caching.
|
| 10 |
+
|
| 11 |
+
You can implement a custom quadrature rule by subclassing
|
| 12 |
+
:class:`QuadratureRule` and implementing the appropriate
|
| 13 |
+
methods. The subclass can then be used by :func:`~mpmath.quad` by
|
| 14 |
+
passing it as the *method* argument.
|
| 15 |
+
|
| 16 |
+
:class:`QuadratureRule` instances are supposed to be singletons.
|
| 17 |
+
:class:`QuadratureRule` therefore implements instance caching
|
| 18 |
+
in :func:`~mpmath.__new__`.
|
| 19 |
+
"""
|
| 20 |
+
|
| 21 |
+
def __init__(self, ctx):
|
| 22 |
+
self.ctx = ctx
|
| 23 |
+
self.standard_cache = {}
|
| 24 |
+
self.transformed_cache = {}
|
| 25 |
+
self.interval_count = {}
|
| 26 |
+
|
| 27 |
+
def clear(self):
|
| 28 |
+
"""
|
| 29 |
+
Delete cached node data.
|
| 30 |
+
"""
|
| 31 |
+
self.standard_cache = {}
|
| 32 |
+
self.transformed_cache = {}
|
| 33 |
+
self.interval_count = {}
|
| 34 |
+
|
| 35 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 36 |
+
r"""
|
| 37 |
+
Compute nodes for the standard interval `[-1, 1]`. Subclasses
|
| 38 |
+
should probably implement only this method, and use
|
| 39 |
+
:func:`~mpmath.get_nodes` method to retrieve the nodes.
|
| 40 |
+
"""
|
| 41 |
+
raise NotImplementedError
|
| 42 |
+
|
| 43 |
+
def get_nodes(self, a, b, degree, prec, verbose=False):
|
| 44 |
+
"""
|
| 45 |
+
Return nodes for given interval, degree and precision. The
|
| 46 |
+
nodes are retrieved from a cache if already computed;
|
| 47 |
+
otherwise they are computed by calling :func:`~mpmath.calc_nodes`
|
| 48 |
+
and are then cached.
|
| 49 |
+
|
| 50 |
+
Subclasses should probably not implement this method,
|
| 51 |
+
but just implement :func:`~mpmath.calc_nodes` for the actual
|
| 52 |
+
node computation.
|
| 53 |
+
"""
|
| 54 |
+
key = (a, b, degree, prec)
|
| 55 |
+
if key in self.transformed_cache:
|
| 56 |
+
return self.transformed_cache[key]
|
| 57 |
+
orig = self.ctx.prec
|
| 58 |
+
try:
|
| 59 |
+
self.ctx.prec = prec+20
|
| 60 |
+
# Get nodes on standard interval
|
| 61 |
+
if (degree, prec) in self.standard_cache:
|
| 62 |
+
nodes = self.standard_cache[degree, prec]
|
| 63 |
+
else:
|
| 64 |
+
nodes = self.calc_nodes(degree, prec, verbose)
|
| 65 |
+
self.standard_cache[degree, prec] = nodes
|
| 66 |
+
# Transform to general interval
|
| 67 |
+
nodes = self.transform_nodes(nodes, a, b, verbose)
|
| 68 |
+
if key in self.interval_count:
|
| 69 |
+
self.transformed_cache[key] = nodes
|
| 70 |
+
else:
|
| 71 |
+
self.interval_count[key] = True
|
| 72 |
+
finally:
|
| 73 |
+
self.ctx.prec = orig
|
| 74 |
+
return nodes
|
| 75 |
+
|
| 76 |
+
def transform_nodes(self, nodes, a, b, verbose=False):
|
| 77 |
+
r"""
|
| 78 |
+
Rescale standardized nodes (for `[-1, 1]`) to a general
|
| 79 |
+
interval `[a, b]`. For a finite interval, a simple linear
|
| 80 |
+
change of variables is used. Otherwise, the following
|
| 81 |
+
transformations are used:
|
| 82 |
+
|
| 83 |
+
.. math ::
|
| 84 |
+
|
| 85 |
+
\lbrack a, \infty \rbrack : t = \frac{1}{x} + (a-1)
|
| 86 |
+
|
| 87 |
+
\lbrack -\infty, b \rbrack : t = (b+1) - \frac{1}{x}
|
| 88 |
+
|
| 89 |
+
\lbrack -\infty, \infty \rbrack : t = \frac{x}{\sqrt{1-x^2}}
|
| 90 |
+
|
| 91 |
+
"""
|
| 92 |
+
ctx = self.ctx
|
| 93 |
+
a = ctx.convert(a)
|
| 94 |
+
b = ctx.convert(b)
|
| 95 |
+
one = ctx.one
|
| 96 |
+
if (a, b) == (-one, one):
|
| 97 |
+
return nodes
|
| 98 |
+
half = ctx.mpf(0.5)
|
| 99 |
+
new_nodes = []
|
| 100 |
+
if ctx.isinf(a) or ctx.isinf(b):
|
| 101 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
| 102 |
+
p05 = -half
|
| 103 |
+
for x, w in nodes:
|
| 104 |
+
x2 = x*x
|
| 105 |
+
px1 = one-x2
|
| 106 |
+
spx1 = px1**p05
|
| 107 |
+
x = x*spx1
|
| 108 |
+
w *= spx1/px1
|
| 109 |
+
new_nodes.append((x, w))
|
| 110 |
+
elif a == ctx.ninf:
|
| 111 |
+
b1 = b+1
|
| 112 |
+
for x, w in nodes:
|
| 113 |
+
u = 2/(x+one)
|
| 114 |
+
x = b1-u
|
| 115 |
+
w *= half*u**2
|
| 116 |
+
new_nodes.append((x, w))
|
| 117 |
+
elif b == ctx.inf:
|
| 118 |
+
a1 = a-1
|
| 119 |
+
for x, w in nodes:
|
| 120 |
+
u = 2/(x+one)
|
| 121 |
+
x = a1+u
|
| 122 |
+
w *= half*u**2
|
| 123 |
+
new_nodes.append((x, w))
|
| 124 |
+
elif a == ctx.inf or b == ctx.ninf:
|
| 125 |
+
return [(x,-w) for (x,w) in self.transform_nodes(nodes, b, a, verbose)]
|
| 126 |
+
else:
|
| 127 |
+
raise NotImplementedError
|
| 128 |
+
else:
|
| 129 |
+
# Simple linear change of variables
|
| 130 |
+
C = (b-a)/2
|
| 131 |
+
D = (b+a)/2
|
| 132 |
+
for x, w in nodes:
|
| 133 |
+
new_nodes.append((D+C*x, C*w))
|
| 134 |
+
return new_nodes
|
| 135 |
+
|
| 136 |
+
def guess_degree(self, prec):
|
| 137 |
+
"""
|
| 138 |
+
Given a desired precision `p` in bits, estimate the degree `m`
|
| 139 |
+
of the quadrature required to accomplish full accuracy for
|
| 140 |
+
typical integrals. By default, :func:`~mpmath.quad` will perform up
|
| 141 |
+
to `m` iterations. The value of `m` should be a slight
|
| 142 |
+
overestimate, so that "slightly bad" integrals can be dealt
|
| 143 |
+
with automatically using a few extra iterations. On the
|
| 144 |
+
other hand, it should not be too big, so :func:`~mpmath.quad` can
|
| 145 |
+
quit within a reasonable amount of time when it is given
|
| 146 |
+
an "unsolvable" integral.
|
| 147 |
+
|
| 148 |
+
The default formula used by :func:`~mpmath.guess_degree` is tuned
|
| 149 |
+
for both :class:`TanhSinh` and :class:`GaussLegendre`.
|
| 150 |
+
The output is roughly as follows:
|
| 151 |
+
|
| 152 |
+
+---------+---------+
|
| 153 |
+
| `p` | `m` |
|
| 154 |
+
+=========+=========+
|
| 155 |
+
| 50 | 6 |
|
| 156 |
+
+---------+---------+
|
| 157 |
+
| 100 | 7 |
|
| 158 |
+
+---------+---------+
|
| 159 |
+
| 500 | 10 |
|
| 160 |
+
+---------+---------+
|
| 161 |
+
| 3000 | 12 |
|
| 162 |
+
+---------+---------+
|
| 163 |
+
|
| 164 |
+
This formula is based purely on a limited amount of
|
| 165 |
+
experimentation and will sometimes be wrong.
|
| 166 |
+
"""
|
| 167 |
+
# Expected degree
|
| 168 |
+
# XXX: use mag
|
| 169 |
+
g = int(4 + max(0, self.ctx.log(prec/30.0, 2)))
|
| 170 |
+
# Reasonable "worst case"
|
| 171 |
+
g += 2
|
| 172 |
+
return g
|
| 173 |
+
|
| 174 |
+
def estimate_error(self, results, prec, epsilon):
|
| 175 |
+
r"""
|
| 176 |
+
Given results from integrations `[I_1, I_2, \ldots, I_k]` done
|
| 177 |
+
with a quadrature of rule of degree `1, 2, \ldots, k`, estimate
|
| 178 |
+
the error of `I_k`.
|
| 179 |
+
|
| 180 |
+
For `k = 2`, we estimate `|I_{\infty}-I_2|` as `|I_2-I_1|`.
|
| 181 |
+
|
| 182 |
+
For `k > 2`, we extrapolate `|I_{\infty}-I_k| \approx |I_{k+1}-I_k|`
|
| 183 |
+
from `|I_k-I_{k-1}|` and `|I_k-I_{k-2}|` under the assumption
|
| 184 |
+
that each degree increment roughly doubles the accuracy of
|
| 185 |
+
the quadrature rule (this is true for both :class:`TanhSinh`
|
| 186 |
+
and :class:`GaussLegendre`). The extrapolation formula is given
|
| 187 |
+
by Borwein, Bailey & Girgensohn. Although not very conservative,
|
| 188 |
+
this method seems to be very robust in practice.
|
| 189 |
+
"""
|
| 190 |
+
if len(results) == 2:
|
| 191 |
+
return abs(results[0]-results[1])
|
| 192 |
+
try:
|
| 193 |
+
if results[-1] == results[-2] == results[-3]:
|
| 194 |
+
return self.ctx.zero
|
| 195 |
+
D1 = self.ctx.log(abs(results[-1]-results[-2]), 10)
|
| 196 |
+
D2 = self.ctx.log(abs(results[-1]-results[-3]), 10)
|
| 197 |
+
except ValueError:
|
| 198 |
+
return epsilon
|
| 199 |
+
D3 = -prec
|
| 200 |
+
D4 = min(0, max(D1**2/D2, 2*D1, D3))
|
| 201 |
+
return self.ctx.mpf(10) ** int(D4)
|
| 202 |
+
|
| 203 |
+
def summation(self, f, points, prec, epsilon, max_degree, verbose=False):
|
| 204 |
+
"""
|
| 205 |
+
Main integration function. Computes the 1D integral over
|
| 206 |
+
the interval specified by *points*. For each subinterval,
|
| 207 |
+
performs quadrature of degree from 1 up to *max_degree*
|
| 208 |
+
until :func:`~mpmath.estimate_error` signals convergence.
|
| 209 |
+
|
| 210 |
+
:func:`~mpmath.summation` transforms each subintegration to
|
| 211 |
+
the standard interval and then calls :func:`~mpmath.sum_next`.
|
| 212 |
+
"""
|
| 213 |
+
ctx = self.ctx
|
| 214 |
+
I = total_err = ctx.zero
|
| 215 |
+
for i in xrange(len(points)-1):
|
| 216 |
+
a, b = points[i], points[i+1]
|
| 217 |
+
if a == b:
|
| 218 |
+
continue
|
| 219 |
+
# XXX: we could use a single variable transformation,
|
| 220 |
+
# but this is not good in practice. We get better accuracy
|
| 221 |
+
# by having 0 as an endpoint.
|
| 222 |
+
if (a, b) == (ctx.ninf, ctx.inf):
|
| 223 |
+
_f = f
|
| 224 |
+
f = lambda x: _f(-x) + _f(x)
|
| 225 |
+
a, b = (ctx.zero, ctx.inf)
|
| 226 |
+
results = []
|
| 227 |
+
err = ctx.zero
|
| 228 |
+
for degree in xrange(1, max_degree+1):
|
| 229 |
+
nodes = self.get_nodes(a, b, degree, prec, verbose)
|
| 230 |
+
if verbose:
|
| 231 |
+
print("Integrating from %s to %s (degree %s of %s)" % \
|
| 232 |
+
(ctx.nstr(a), ctx.nstr(b), degree, max_degree))
|
| 233 |
+
result = self.sum_next(f, nodes, degree, prec, results, verbose)
|
| 234 |
+
results.append(result)
|
| 235 |
+
if degree > 1:
|
| 236 |
+
err = self.estimate_error(results, prec, epsilon)
|
| 237 |
+
if verbose:
|
| 238 |
+
print("Estimated error:", ctx.nstr(err), " epsilon:", ctx.nstr(epsilon), " result: ", ctx.nstr(result))
|
| 239 |
+
if err <= epsilon:
|
| 240 |
+
break
|
| 241 |
+
I += results[-1]
|
| 242 |
+
total_err += err
|
| 243 |
+
if total_err > epsilon:
|
| 244 |
+
if verbose:
|
| 245 |
+
print("Failed to reach full accuracy. Estimated error:", ctx.nstr(total_err))
|
| 246 |
+
return I, total_err
|
| 247 |
+
|
| 248 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
| 249 |
+
r"""
|
| 250 |
+
Evaluates the step sum `\sum w_k f(x_k)` where the *nodes* list
|
| 251 |
+
contains the `(w_k, x_k)` pairs.
|
| 252 |
+
|
| 253 |
+
:func:`~mpmath.summation` will supply the list *results* of
|
| 254 |
+
values computed by :func:`~mpmath.sum_next` at previous degrees, in
|
| 255 |
+
case the quadrature rule is able to reuse them.
|
| 256 |
+
"""
|
| 257 |
+
return self.ctx.fdot((w, f(x)) for (x,w) in nodes)
|
| 258 |
+
|
| 259 |
+
|
| 260 |
+
class TanhSinh(QuadratureRule):
|
| 261 |
+
r"""
|
| 262 |
+
This class implements "tanh-sinh" or "doubly exponential"
|
| 263 |
+
quadrature. This quadrature rule is based on the Euler-Maclaurin
|
| 264 |
+
integral formula. By performing a change of variables involving
|
| 265 |
+
nested exponentials / hyperbolic functions (hence the name), the
|
| 266 |
+
derivatives at the endpoints vanish rapidly. Since the error term
|
| 267 |
+
in the Euler-Maclaurin formula depends on the derivatives at the
|
| 268 |
+
endpoints, a simple step sum becomes extremely accurate. In
|
| 269 |
+
practice, this means that doubling the number of evaluation
|
| 270 |
+
points roughly doubles the number of accurate digits.
|
| 271 |
+
|
| 272 |
+
Comparison to Gauss-Legendre:
|
| 273 |
+
* Initial computation of nodes is usually faster
|
| 274 |
+
* Handles endpoint singularities better
|
| 275 |
+
* Handles infinite integration intervals better
|
| 276 |
+
* Is slower for smooth integrands once nodes have been computed
|
| 277 |
+
|
| 278 |
+
The implementation of the tanh-sinh algorithm is based on the
|
| 279 |
+
description given in Borwein, Bailey & Girgensohn, "Experimentation
|
| 280 |
+
in Mathematics - Computational Paths to Discovery", A K Peters,
|
| 281 |
+
2003, pages 312-313. In the present implementation, a few
|
| 282 |
+
improvements have been made:
|
| 283 |
+
|
| 284 |
+
* A more efficient scheme is used to compute nodes (exploiting
|
| 285 |
+
recurrence for the exponential function)
|
| 286 |
+
* The nodes are computed successively instead of all at once
|
| 287 |
+
|
| 288 |
+
**References**
|
| 289 |
+
|
| 290 |
+
* [Bailey]_
|
| 291 |
+
* http://users.cs.dal.ca/~jborwein/tanh-sinh.pdf
|
| 292 |
+
|
| 293 |
+
"""
|
| 294 |
+
|
| 295 |
+
def sum_next(self, f, nodes, degree, prec, previous, verbose=False):
|
| 296 |
+
"""
|
| 297 |
+
Step sum for tanh-sinh quadrature of degree `m`. We exploit the
|
| 298 |
+
fact that half of the abscissas at degree `m` are precisely the
|
| 299 |
+
abscissas from degree `m-1`. Thus reusing the result from
|
| 300 |
+
the previous level allows a 2x speedup.
|
| 301 |
+
"""
|
| 302 |
+
h = self.ctx.mpf(2)**(-degree)
|
| 303 |
+
# Abscissas overlap, so reusing saves half of the time
|
| 304 |
+
if previous:
|
| 305 |
+
S = previous[-1]/(h*2)
|
| 306 |
+
else:
|
| 307 |
+
S = self.ctx.zero
|
| 308 |
+
S += self.ctx.fdot((w,f(x)) for (x,w) in nodes)
|
| 309 |
+
return h*S
|
| 310 |
+
|
| 311 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 312 |
+
r"""
|
| 313 |
+
The abscissas and weights for tanh-sinh quadrature of degree
|
| 314 |
+
`m` are given by
|
| 315 |
+
|
| 316 |
+
.. math::
|
| 317 |
+
|
| 318 |
+
x_k = \tanh(\pi/2 \sinh(t_k))
|
| 319 |
+
|
| 320 |
+
w_k = \pi/2 \cosh(t_k) / \cosh(\pi/2 \sinh(t_k))^2
|
| 321 |
+
|
| 322 |
+
where `t_k = t_0 + hk` for a step length `h \sim 2^{-m}`. The
|
| 323 |
+
list of nodes is actually infinite, but the weights die off so
|
| 324 |
+
rapidly that only a few are needed.
|
| 325 |
+
"""
|
| 326 |
+
ctx = self.ctx
|
| 327 |
+
nodes = []
|
| 328 |
+
|
| 329 |
+
extra = 20
|
| 330 |
+
ctx.prec += extra
|
| 331 |
+
tol = ctx.ldexp(1, -prec-10)
|
| 332 |
+
pi4 = ctx.pi/4
|
| 333 |
+
|
| 334 |
+
# For simplicity, we work in steps h = 1/2^n, with the first point
|
| 335 |
+
# offset so that we can reuse the sum from the previous degree
|
| 336 |
+
|
| 337 |
+
# We define degree 1 to include the "degree 0" steps, including
|
| 338 |
+
# the point x = 0. (It doesn't work well otherwise; not sure why.)
|
| 339 |
+
t0 = ctx.ldexp(1, -degree)
|
| 340 |
+
if degree == 1:
|
| 341 |
+
#nodes.append((mpf(0), pi4))
|
| 342 |
+
#nodes.append((-mpf(0), pi4))
|
| 343 |
+
nodes.append((ctx.zero, ctx.pi/2))
|
| 344 |
+
h = t0
|
| 345 |
+
else:
|
| 346 |
+
h = t0*2
|
| 347 |
+
|
| 348 |
+
# Since h is fixed, we can compute the next exponential
|
| 349 |
+
# by simply multiplying by exp(h)
|
| 350 |
+
expt0 = ctx.exp(t0)
|
| 351 |
+
a = pi4 * expt0
|
| 352 |
+
b = pi4 / expt0
|
| 353 |
+
udelta = ctx.exp(h)
|
| 354 |
+
urdelta = 1/udelta
|
| 355 |
+
|
| 356 |
+
for k in xrange(0, 20*2**degree+1):
|
| 357 |
+
# Reference implementation:
|
| 358 |
+
# t = t0 + k*h
|
| 359 |
+
# x = tanh(pi/2 * sinh(t))
|
| 360 |
+
# w = pi/2 * cosh(t) / cosh(pi/2 * sinh(t))**2
|
| 361 |
+
|
| 362 |
+
# Fast implementation. Note that c = exp(pi/2 * sinh(t))
|
| 363 |
+
c = ctx.exp(a-b)
|
| 364 |
+
d = 1/c
|
| 365 |
+
co = (c+d)/2
|
| 366 |
+
si = (c-d)/2
|
| 367 |
+
x = si / co
|
| 368 |
+
w = (a+b) / co**2
|
| 369 |
+
diff = abs(x-1)
|
| 370 |
+
if diff <= tol:
|
| 371 |
+
break
|
| 372 |
+
|
| 373 |
+
nodes.append((x, w))
|
| 374 |
+
nodes.append((-x, w))
|
| 375 |
+
|
| 376 |
+
a *= udelta
|
| 377 |
+
b *= urdelta
|
| 378 |
+
|
| 379 |
+
if verbose and k % 300 == 150:
|
| 380 |
+
# Note: the number displayed is rather arbitrary. Should
|
| 381 |
+
# figure out how to print something that looks more like a
|
| 382 |
+
# percentage
|
| 383 |
+
print("Calculating nodes:", ctx.nstr(-ctx.log(diff, 10) / prec))
|
| 384 |
+
|
| 385 |
+
ctx.prec -= extra
|
| 386 |
+
return nodes
|
| 387 |
+
|
| 388 |
+
|
| 389 |
+
class GaussLegendre(QuadratureRule):
|
| 390 |
+
r"""
|
| 391 |
+
This class implements Gauss-Legendre quadrature, which is
|
| 392 |
+
exceptionally efficient for polynomials and polynomial-like (i.e.
|
| 393 |
+
very smooth) integrands.
|
| 394 |
+
|
| 395 |
+
The abscissas and weights are given by roots and values of
|
| 396 |
+
Legendre polynomials, which are the orthogonal polynomials
|
| 397 |
+
on `[-1, 1]` with respect to the unit weight
|
| 398 |
+
(see :func:`~mpmath.legendre`).
|
| 399 |
+
|
| 400 |
+
In this implementation, we take the "degree" `m` of the quadrature
|
| 401 |
+
to denote a Gauss-Legendre rule of degree `3 \cdot 2^m` (following
|
| 402 |
+
Borwein, Bailey & Girgensohn). This way we get quadratic, rather
|
| 403 |
+
than linear, convergence as the degree is incremented.
|
| 404 |
+
|
| 405 |
+
Comparison to tanh-sinh quadrature:
|
| 406 |
+
* Is faster for smooth integrands once nodes have been computed
|
| 407 |
+
* Initial computation of nodes is usually slower
|
| 408 |
+
* Handles endpoint singularities worse
|
| 409 |
+
* Handles infinite integration intervals worse
|
| 410 |
+
|
| 411 |
+
"""
|
| 412 |
+
|
| 413 |
+
def calc_nodes(self, degree, prec, verbose=False):
|
| 414 |
+
r"""
|
| 415 |
+
Calculates the abscissas and weights for Gauss-Legendre
|
| 416 |
+
quadrature of degree of given degree (actually `3 \cdot 2^m`).
|
| 417 |
+
"""
|
| 418 |
+
ctx = self.ctx
|
| 419 |
+
# It is important that the epsilon is set lower than the
|
| 420 |
+
# "real" epsilon
|
| 421 |
+
epsilon = ctx.ldexp(1, -prec-8)
|
| 422 |
+
# Fairly high precision might be required for accurate
|
| 423 |
+
# evaluation of the roots
|
| 424 |
+
orig = ctx.prec
|
| 425 |
+
ctx.prec = int(prec*1.5)
|
| 426 |
+
if degree == 1:
|
| 427 |
+
x = ctx.sqrt(ctx.mpf(3)/5)
|
| 428 |
+
w = ctx.mpf(5)/9
|
| 429 |
+
nodes = [(-x,w),(ctx.zero,ctx.mpf(8)/9),(x,w)]
|
| 430 |
+
ctx.prec = orig
|
| 431 |
+
return nodes
|
| 432 |
+
nodes = []
|
| 433 |
+
n = 3*2**(degree-1)
|
| 434 |
+
upto = n//2 + 1
|
| 435 |
+
for j in xrange(1, upto):
|
| 436 |
+
# Asymptotic formula for the roots
|
| 437 |
+
r = ctx.mpf(math.cos(math.pi*(j-0.25)/(n+0.5)))
|
| 438 |
+
# Newton iteration
|
| 439 |
+
while 1:
|
| 440 |
+
t1, t2 = 1, 0
|
| 441 |
+
# Evaluates the Legendre polynomial using its defining
|
| 442 |
+
# recurrence relation
|
| 443 |
+
for j1 in xrange(1,n+1):
|
| 444 |
+
t3, t2, t1 = t2, t1, ((2*j1-1)*r*t1 - (j1-1)*t2)/j1
|
| 445 |
+
t4 = n*(r*t1-t2)/(r**2-1)
|
| 446 |
+
a = t1/t4
|
| 447 |
+
r = r - a
|
| 448 |
+
if abs(a) < epsilon:
|
| 449 |
+
break
|
| 450 |
+
x = r
|
| 451 |
+
w = 2/((1-r**2)*t4**2)
|
| 452 |
+
if verbose and j % 30 == 15:
|
| 453 |
+
print("Computing nodes (%i of %i)" % (j, upto))
|
| 454 |
+
nodes.append((x, w))
|
| 455 |
+
nodes.append((-x, w))
|
| 456 |
+
ctx.prec = orig
|
| 457 |
+
return nodes
|
| 458 |
+
|
| 459 |
+
class QuadratureMethods(object):
|
| 460 |
+
|
| 461 |
+
def __init__(ctx, *args, **kwargs):
|
| 462 |
+
ctx._gauss_legendre = GaussLegendre(ctx)
|
| 463 |
+
ctx._tanh_sinh = TanhSinh(ctx)
|
| 464 |
+
|
| 465 |
+
def quad(ctx, f, *points, **kwargs):
|
| 466 |
+
r"""
|
| 467 |
+
Computes a single, double or triple integral over a given
|
| 468 |
+
1D interval, 2D rectangle, or 3D cuboid. A basic example::
|
| 469 |
+
|
| 470 |
+
>>> from mpmath import *
|
| 471 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 472 |
+
>>> quad(sin, [0, pi])
|
| 473 |
+
2.0
|
| 474 |
+
|
| 475 |
+
A basic 2D integral::
|
| 476 |
+
|
| 477 |
+
>>> f = lambda x, y: cos(x+y/2)
|
| 478 |
+
>>> quad(f, [-pi/2, pi/2], [0, pi])
|
| 479 |
+
4.0
|
| 480 |
+
|
| 481 |
+
**Interval format**
|
| 482 |
+
|
| 483 |
+
The integration range for each dimension may be specified
|
| 484 |
+
using a list or tuple. Arguments are interpreted as follows:
|
| 485 |
+
|
| 486 |
+
``quad(f, [x1, x2])`` -- calculates
|
| 487 |
+
`\int_{x_1}^{x_2} f(x) \, dx`
|
| 488 |
+
|
| 489 |
+
``quad(f, [x1, x2], [y1, y2])`` -- calculates
|
| 490 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} f(x,y) \, dy \, dx`
|
| 491 |
+
|
| 492 |
+
``quad(f, [x1, x2], [y1, y2], [z1, z2])`` -- calculates
|
| 493 |
+
`\int_{x_1}^{x_2} \int_{y_1}^{y_2} \int_{z_1}^{z_2} f(x,y,z)
|
| 494 |
+
\, dz \, dy \, dx`
|
| 495 |
+
|
| 496 |
+
Endpoints may be finite or infinite. An interval descriptor
|
| 497 |
+
may also contain more than two points. In this
|
| 498 |
+
case, the integration is split into subintervals, between
|
| 499 |
+
each pair of consecutive points. This is useful for
|
| 500 |
+
dealing with mid-interval discontinuities, or integrating
|
| 501 |
+
over large intervals where the function is irregular or
|
| 502 |
+
oscillates.
|
| 503 |
+
|
| 504 |
+
**Options**
|
| 505 |
+
|
| 506 |
+
:func:`~mpmath.quad` recognizes the following keyword arguments:
|
| 507 |
+
|
| 508 |
+
*method*
|
| 509 |
+
Chooses integration algorithm (described below).
|
| 510 |
+
*error*
|
| 511 |
+
If set to true, :func:`~mpmath.quad` returns `(v, e)` where `v` is the
|
| 512 |
+
integral and `e` is the estimated error.
|
| 513 |
+
*maxdegree*
|
| 514 |
+
Maximum degree of the quadrature rule to try before
|
| 515 |
+
quitting.
|
| 516 |
+
*verbose*
|
| 517 |
+
Print details about progress.
|
| 518 |
+
|
| 519 |
+
**Algorithms**
|
| 520 |
+
|
| 521 |
+
Mpmath presently implements two integration algorithms: tanh-sinh
|
| 522 |
+
quadrature and Gauss-Legendre quadrature. These can be selected
|
| 523 |
+
using *method='tanh-sinh'* or *method='gauss-legendre'* or by
|
| 524 |
+
passing the classes *method=TanhSinh*, *method=GaussLegendre*.
|
| 525 |
+
The functions :func:`~mpmath.quadts` and :func:`~mpmath.quadgl` are also available
|
| 526 |
+
as shortcuts.
|
| 527 |
+
|
| 528 |
+
Both algorithms have the property that doubling the number of
|
| 529 |
+
evaluation points roughly doubles the accuracy, so both are ideal
|
| 530 |
+
for high precision quadrature (hundreds or thousands of digits).
|
| 531 |
+
|
| 532 |
+
At high precision, computing the nodes and weights for the
|
| 533 |
+
integration can be expensive (more expensive than computing the
|
| 534 |
+
function values). To make repeated integrations fast, nodes
|
| 535 |
+
are automatically cached.
|
| 536 |
+
|
| 537 |
+
The advantages of the tanh-sinh algorithm are that it tends to
|
| 538 |
+
handle endpoint singularities well, and that the nodes are cheap
|
| 539 |
+
to compute on the first run. For these reasons, it is used by
|
| 540 |
+
:func:`~mpmath.quad` as the default algorithm.
|
| 541 |
+
|
| 542 |
+
Gauss-Legendre quadrature often requires fewer function
|
| 543 |
+
evaluations, and is therefore often faster for repeated use, but
|
| 544 |
+
the algorithm does not handle endpoint singularities as well and
|
| 545 |
+
the nodes are more expensive to compute. Gauss-Legendre quadrature
|
| 546 |
+
can be a better choice if the integrand is smooth and repeated
|
| 547 |
+
integrations are required (e.g. for multiple integrals).
|
| 548 |
+
|
| 549 |
+
See the documentation for :class:`TanhSinh` and
|
| 550 |
+
:class:`GaussLegendre` for additional details.
|
| 551 |
+
|
| 552 |
+
**Examples of 1D integrals**
|
| 553 |
+
|
| 554 |
+
Intervals may be infinite or half-infinite. The following two
|
| 555 |
+
examples evaluate the limits of the inverse tangent function
|
| 556 |
+
(`\int 1/(1+x^2) = \tan^{-1} x`), and the Gaussian integral
|
| 557 |
+
`\int_{\infty}^{\infty} \exp(-x^2)\,dx = \sqrt{\pi}`::
|
| 558 |
+
|
| 559 |
+
>>> mp.dps = 15
|
| 560 |
+
>>> quad(lambda x: 2/(x**2+1), [0, inf])
|
| 561 |
+
3.14159265358979
|
| 562 |
+
>>> quad(lambda x: exp(-x**2), [-inf, inf])**2
|
| 563 |
+
3.14159265358979
|
| 564 |
+
|
| 565 |
+
Integrals can typically be resolved to high precision.
|
| 566 |
+
The following computes 50 digits of `\pi` by integrating the
|
| 567 |
+
area of the half-circle defined by `x^2 + y^2 \le 1`,
|
| 568 |
+
`-1 \le x \le 1`, `y \ge 0`::
|
| 569 |
+
|
| 570 |
+
>>> mp.dps = 50
|
| 571 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1])
|
| 572 |
+
3.1415926535897932384626433832795028841971693993751
|
| 573 |
+
|
| 574 |
+
One can just as well compute 1000 digits (output truncated)::
|
| 575 |
+
|
| 576 |
+
>>> mp.dps = 1000
|
| 577 |
+
>>> 2*quad(lambda x: sqrt(1-x**2), [-1, 1]) #doctest:+ELLIPSIS
|
| 578 |
+
3.141592653589793238462643383279502884...216420199
|
| 579 |
+
|
| 580 |
+
Complex integrals are supported. The following computes
|
| 581 |
+
a residue at `z = 0` by integrating counterclockwise along the
|
| 582 |
+
diamond-shaped path from `1` to `+i` to `-1` to `-i` to `1`::
|
| 583 |
+
|
| 584 |
+
>>> mp.dps = 15
|
| 585 |
+
>>> chop(quad(lambda z: 1/z, [1,j,-1,-j,1]))
|
| 586 |
+
(0.0 + 6.28318530717959j)
|
| 587 |
+
|
| 588 |
+
**Examples of 2D and 3D integrals**
|
| 589 |
+
|
| 590 |
+
Here are several nice examples of analytically solvable
|
| 591 |
+
2D integrals (taken from MathWorld [1]) that can be evaluated
|
| 592 |
+
to high precision fairly rapidly by :func:`~mpmath.quad`::
|
| 593 |
+
|
| 594 |
+
>>> mp.dps = 30
|
| 595 |
+
>>> f = lambda x, y: (x-1)/((1-x*y)*log(x*y))
|
| 596 |
+
>>> quad(f, [0, 1], [0, 1])
|
| 597 |
+
0.577215664901532860606512090082
|
| 598 |
+
>>> +euler
|
| 599 |
+
0.577215664901532860606512090082
|
| 600 |
+
|
| 601 |
+
>>> f = lambda x, y: 1/sqrt(1+x**2+y**2)
|
| 602 |
+
>>> quad(f, [-1, 1], [-1, 1])
|
| 603 |
+
3.17343648530607134219175646705
|
| 604 |
+
>>> 4*log(2+sqrt(3))-2*pi/3
|
| 605 |
+
3.17343648530607134219175646705
|
| 606 |
+
|
| 607 |
+
>>> f = lambda x, y: 1/(1-x**2 * y**2)
|
| 608 |
+
>>> quad(f, [0, 1], [0, 1])
|
| 609 |
+
1.23370055013616982735431137498
|
| 610 |
+
>>> pi**2 / 8
|
| 611 |
+
1.23370055013616982735431137498
|
| 612 |
+
|
| 613 |
+
>>> quad(lambda x, y: 1/(1-x*y), [0, 1], [0, 1])
|
| 614 |
+
1.64493406684822643647241516665
|
| 615 |
+
>>> pi**2 / 6
|
| 616 |
+
1.64493406684822643647241516665
|
| 617 |
+
|
| 618 |
+
Multiple integrals may be done over infinite ranges::
|
| 619 |
+
|
| 620 |
+
>>> mp.dps = 15
|
| 621 |
+
>>> print(quad(lambda x,y: exp(-x-y), [0, inf], [1, inf]))
|
| 622 |
+
0.367879441171442
|
| 623 |
+
>>> print(1/e)
|
| 624 |
+
0.367879441171442
|
| 625 |
+
|
| 626 |
+
For nonrectangular areas, one can call :func:`~mpmath.quad` recursively.
|
| 627 |
+
For example, we can replicate the earlier example of calculating
|
| 628 |
+
`\pi` by integrating over the unit-circle, and actually use double
|
| 629 |
+
quadrature to actually measure the area circle::
|
| 630 |
+
|
| 631 |
+
>>> f = lambda x: quad(lambda y: 1, [-sqrt(1-x**2), sqrt(1-x**2)])
|
| 632 |
+
>>> quad(f, [-1, 1])
|
| 633 |
+
3.14159265358979
|
| 634 |
+
|
| 635 |
+
Here is a simple triple integral::
|
| 636 |
+
|
| 637 |
+
>>> mp.dps = 15
|
| 638 |
+
>>> f = lambda x,y,z: x*y/(1+z)
|
| 639 |
+
>>> quad(f, [0,1], [0,1], [1,2], method='gauss-legendre')
|
| 640 |
+
0.101366277027041
|
| 641 |
+
>>> (log(3)-log(2))/4
|
| 642 |
+
0.101366277027041
|
| 643 |
+
|
| 644 |
+
**Singularities**
|
| 645 |
+
|
| 646 |
+
Both tanh-sinh and Gauss-Legendre quadrature are designed to
|
| 647 |
+
integrate smooth (infinitely differentiable) functions. Neither
|
| 648 |
+
algorithm copes well with mid-interval singularities (such as
|
| 649 |
+
mid-interval discontinuities in `f(x)` or `f'(x)`).
|
| 650 |
+
The best solution is to split the integral into parts::
|
| 651 |
+
|
| 652 |
+
>>> mp.dps = 15
|
| 653 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi]) # Bad
|
| 654 |
+
3.99900894176779
|
| 655 |
+
>>> quad(lambda x: abs(sin(x)), [0, pi, 2*pi]) # Good
|
| 656 |
+
4.0
|
| 657 |
+
|
| 658 |
+
The tanh-sinh rule often works well for integrands having a
|
| 659 |
+
singularity at one or both endpoints::
|
| 660 |
+
|
| 661 |
+
>>> mp.dps = 15
|
| 662 |
+
>>> quad(log, [0, 1], method='tanh-sinh') # Good
|
| 663 |
+
-1.0
|
| 664 |
+
>>> quad(log, [0, 1], method='gauss-legendre') # Bad
|
| 665 |
+
-0.999932197413801
|
| 666 |
+
|
| 667 |
+
However, the result may still be inaccurate for some functions::
|
| 668 |
+
|
| 669 |
+
>>> quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
| 670 |
+
1.99999999946942
|
| 671 |
+
|
| 672 |
+
This problem is not due to the quadrature rule per se, but to
|
| 673 |
+
numerical amplification of errors in the nodes. The problem can be
|
| 674 |
+
circumvented by temporarily increasing the precision::
|
| 675 |
+
|
| 676 |
+
>>> mp.dps = 30
|
| 677 |
+
>>> a = quad(lambda x: 1/sqrt(x), [0, 1], method='tanh-sinh')
|
| 678 |
+
>>> mp.dps = 15
|
| 679 |
+
>>> +a
|
| 680 |
+
2.0
|
| 681 |
+
|
| 682 |
+
**Highly variable functions**
|
| 683 |
+
|
| 684 |
+
For functions that are smooth (in the sense of being infinitely
|
| 685 |
+
differentiable) but contain sharp mid-interval peaks or many
|
| 686 |
+
"bumps", :func:`~mpmath.quad` may fail to provide full accuracy. For
|
| 687 |
+
example, with default settings, :func:`~mpmath.quad` is able to integrate
|
| 688 |
+
`\sin(x)` accurately over an interval of length 100 but not over
|
| 689 |
+
length 1000::
|
| 690 |
+
|
| 691 |
+
>>> quad(sin, [0, 100]); 1-cos(100) # Good
|
| 692 |
+
0.137681127712316
|
| 693 |
+
0.137681127712316
|
| 694 |
+
>>> quad(sin, [0, 1000]); 1-cos(1000) # Bad
|
| 695 |
+
-37.8587612408485
|
| 696 |
+
0.437620923709297
|
| 697 |
+
|
| 698 |
+
One solution is to break the integration into 10 intervals of
|
| 699 |
+
length 100::
|
| 700 |
+
|
| 701 |
+
>>> quad(sin, linspace(0, 1000, 10)) # Good
|
| 702 |
+
0.437620923709297
|
| 703 |
+
|
| 704 |
+
Another is to increase the degree of the quadrature::
|
| 705 |
+
|
| 706 |
+
>>> quad(sin, [0, 1000], maxdegree=10) # Also good
|
| 707 |
+
0.437620923709297
|
| 708 |
+
|
| 709 |
+
Whether splitting the interval or increasing the degree is
|
| 710 |
+
more efficient differs from case to case. Another example is the
|
| 711 |
+
function `1/(1+x^2)`, which has a sharp peak centered around
|
| 712 |
+
`x = 0`::
|
| 713 |
+
|
| 714 |
+
>>> f = lambda x: 1/(1+x**2)
|
| 715 |
+
>>> quad(f, [-100, 100]) # Bad
|
| 716 |
+
3.64804647105268
|
| 717 |
+
>>> quad(f, [-100, 100], maxdegree=10) # Good
|
| 718 |
+
3.12159332021646
|
| 719 |
+
>>> quad(f, [-100, 0, 100]) # Also good
|
| 720 |
+
3.12159332021646
|
| 721 |
+
|
| 722 |
+
**References**
|
| 723 |
+
|
| 724 |
+
1. http://mathworld.wolfram.com/DoubleIntegral.html
|
| 725 |
+
|
| 726 |
+
"""
|
| 727 |
+
rule = kwargs.get('method', 'tanh-sinh')
|
| 728 |
+
if type(rule) is str:
|
| 729 |
+
if rule == 'tanh-sinh':
|
| 730 |
+
rule = ctx._tanh_sinh
|
| 731 |
+
elif rule == 'gauss-legendre':
|
| 732 |
+
rule = ctx._gauss_legendre
|
| 733 |
+
else:
|
| 734 |
+
raise ValueError("unknown quadrature rule: %s" % rule)
|
| 735 |
+
else:
|
| 736 |
+
rule = rule(ctx)
|
| 737 |
+
verbose = kwargs.get('verbose')
|
| 738 |
+
dim = len(points)
|
| 739 |
+
orig = prec = ctx.prec
|
| 740 |
+
epsilon = ctx.eps/8
|
| 741 |
+
m = kwargs.get('maxdegree') or rule.guess_degree(prec)
|
| 742 |
+
points = [ctx._as_points(p) for p in points]
|
| 743 |
+
try:
|
| 744 |
+
ctx.prec += 20
|
| 745 |
+
if dim == 1:
|
| 746 |
+
v, err = rule.summation(f, points[0], prec, epsilon, m, verbose)
|
| 747 |
+
elif dim == 2:
|
| 748 |
+
v, err = rule.summation(lambda x: \
|
| 749 |
+
rule.summation(lambda y: f(x,y), \
|
| 750 |
+
points[1], prec, epsilon, m)[0],
|
| 751 |
+
points[0], prec, epsilon, m, verbose)
|
| 752 |
+
elif dim == 3:
|
| 753 |
+
v, err = rule.summation(lambda x: \
|
| 754 |
+
rule.summation(lambda y: \
|
| 755 |
+
rule.summation(lambda z: f(x,y,z), \
|
| 756 |
+
points[2], prec, epsilon, m)[0],
|
| 757 |
+
points[1], prec, epsilon, m)[0],
|
| 758 |
+
points[0], prec, epsilon, m, verbose)
|
| 759 |
+
else:
|
| 760 |
+
raise NotImplementedError("quadrature must have dim 1, 2 or 3")
|
| 761 |
+
finally:
|
| 762 |
+
ctx.prec = orig
|
| 763 |
+
if kwargs.get("error"):
|
| 764 |
+
return +v, err
|
| 765 |
+
return +v
|
| 766 |
+
|
| 767 |
+
def quadts(ctx, *args, **kwargs):
|
| 768 |
+
"""
|
| 769 |
+
Performs tanh-sinh quadrature. The call
|
| 770 |
+
|
| 771 |
+
quadts(func, *points, ...)
|
| 772 |
+
|
| 773 |
+
is simply a shortcut for:
|
| 774 |
+
|
| 775 |
+
quad(func, *points, ..., method=TanhSinh)
|
| 776 |
+
|
| 777 |
+
For example, a single integral and a double integral:
|
| 778 |
+
|
| 779 |
+
quadts(lambda x: exp(cos(x)), [0, 1])
|
| 780 |
+
quadts(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
| 781 |
+
|
| 782 |
+
See the documentation for quad for information about how points
|
| 783 |
+
arguments and keyword arguments are parsed.
|
| 784 |
+
|
| 785 |
+
See documentation for TanhSinh for algorithmic information about
|
| 786 |
+
tanh-sinh quadrature.
|
| 787 |
+
"""
|
| 788 |
+
kwargs['method'] = 'tanh-sinh'
|
| 789 |
+
return ctx.quad(*args, **kwargs)
|
| 790 |
+
|
| 791 |
+
def quadgl(ctx, *args, **kwargs):
|
| 792 |
+
"""
|
| 793 |
+
Performs Gauss-Legendre quadrature. The call
|
| 794 |
+
|
| 795 |
+
quadgl(func, *points, ...)
|
| 796 |
+
|
| 797 |
+
is simply a shortcut for:
|
| 798 |
+
|
| 799 |
+
quad(func, *points, ..., method=GaussLegendre)
|
| 800 |
+
|
| 801 |
+
For example, a single integral and a double integral:
|
| 802 |
+
|
| 803 |
+
quadgl(lambda x: exp(cos(x)), [0, 1])
|
| 804 |
+
quadgl(lambda x, y: exp(cos(x+y)), [0, 1], [0, 1])
|
| 805 |
+
|
| 806 |
+
See the documentation for quad for information about how points
|
| 807 |
+
arguments and keyword arguments are parsed.
|
| 808 |
+
|
| 809 |
+
See documentation for TanhSinh for algorithmic information about
|
| 810 |
+
tanh-sinh quadrature.
|
| 811 |
+
"""
|
| 812 |
+
kwargs['method'] = 'gauss-legendre'
|
| 813 |
+
return ctx.quad(*args, **kwargs)
|
| 814 |
+
|
| 815 |
+
def quadosc(ctx, f, interval, omega=None, period=None, zeros=None):
|
| 816 |
+
r"""
|
| 817 |
+
Calculates
|
| 818 |
+
|
| 819 |
+
.. math ::
|
| 820 |
+
|
| 821 |
+
I = \int_a^b f(x) dx
|
| 822 |
+
|
| 823 |
+
where at least one of `a` and `b` is infinite and where
|
| 824 |
+
`f(x) = g(x) \cos(\omega x + \phi)` for some slowly
|
| 825 |
+
decreasing function `g(x)`. With proper input, :func:`~mpmath.quadosc`
|
| 826 |
+
can also handle oscillatory integrals where the oscillation
|
| 827 |
+
rate is different from a pure sine or cosine wave.
|
| 828 |
+
|
| 829 |
+
In the standard case when `|a| < \infty, b = \infty`,
|
| 830 |
+
:func:`~mpmath.quadosc` works by evaluating the infinite series
|
| 831 |
+
|
| 832 |
+
.. math ::
|
| 833 |
+
|
| 834 |
+
I = \int_a^{x_1} f(x) dx +
|
| 835 |
+
\sum_{k=1}^{\infty} \int_{x_k}^{x_{k+1}} f(x) dx
|
| 836 |
+
|
| 837 |
+
where `x_k` are consecutive zeros (alternatively
|
| 838 |
+
some other periodic reference point) of `f(x)`.
|
| 839 |
+
Accordingly, :func:`~mpmath.quadosc` requires information about the
|
| 840 |
+
zeros of `f(x)`. For a periodic function, you can specify
|
| 841 |
+
the zeros by either providing the angular frequency `\omega`
|
| 842 |
+
(*omega*) or the *period* `2 \pi/\omega`. In general, you can
|
| 843 |
+
specify the `n`-th zero by providing the *zeros* arguments.
|
| 844 |
+
Below is an example of each::
|
| 845 |
+
|
| 846 |
+
>>> from mpmath import *
|
| 847 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 848 |
+
>>> f = lambda x: sin(3*x)/(x**2+1)
|
| 849 |
+
>>> quadosc(f, [0,inf], omega=3)
|
| 850 |
+
0.37833007080198
|
| 851 |
+
>>> quadosc(f, [0,inf], period=2*pi/3)
|
| 852 |
+
0.37833007080198
|
| 853 |
+
>>> quadosc(f, [0,inf], zeros=lambda n: pi*n/3)
|
| 854 |
+
0.37833007080198
|
| 855 |
+
>>> (ei(3)*exp(-3)-exp(3)*ei(-3))/2 # Computed by Mathematica
|
| 856 |
+
0.37833007080198
|
| 857 |
+
|
| 858 |
+
Note that *zeros* was specified to multiply `n` by the
|
| 859 |
+
*half-period*, not the full period. In theory, it does not matter
|
| 860 |
+
whether each partial integral is done over a half period or a full
|
| 861 |
+
period. However, if done over half-periods, the infinite series
|
| 862 |
+
passed to :func:`~mpmath.nsum` becomes an *alternating series* and this
|
| 863 |
+
typically makes the extrapolation much more efficient.
|
| 864 |
+
|
| 865 |
+
Here is an example of an integration over the entire real line,
|
| 866 |
+
and a half-infinite integration starting at `-\infty`::
|
| 867 |
+
|
| 868 |
+
>>> quadosc(lambda x: cos(x)/(1+x**2), [-inf, inf], omega=1)
|
| 869 |
+
1.15572734979092
|
| 870 |
+
>>> pi/e
|
| 871 |
+
1.15572734979092
|
| 872 |
+
>>> quadosc(lambda x: cos(x)/x**2, [-inf, -1], period=2*pi)
|
| 873 |
+
-0.0844109505595739
|
| 874 |
+
>>> cos(1)+si(1)-pi/2
|
| 875 |
+
-0.0844109505595738
|
| 876 |
+
|
| 877 |
+
Of course, the integrand may contain a complex exponential just as
|
| 878 |
+
well as a real sine or cosine::
|
| 879 |
+
|
| 880 |
+
>>> quadosc(lambda x: exp(3*j*x)/(1+x**2), [-inf,inf], omega=3)
|
| 881 |
+
(0.156410688228254 + 0.0j)
|
| 882 |
+
>>> pi/e**3
|
| 883 |
+
0.156410688228254
|
| 884 |
+
>>> quadosc(lambda x: exp(3*j*x)/(2+x+x**2), [-inf,inf], omega=3)
|
| 885 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
| 886 |
+
>>> 2*pi/sqrt(7)/exp(3*(j+sqrt(7))/2)
|
| 887 |
+
(0.00317486988463794 - 0.0447701735209082j)
|
| 888 |
+
|
| 889 |
+
**Non-periodic functions**
|
| 890 |
+
|
| 891 |
+
If `f(x) = g(x) h(x)` for some function `h(x)` that is not
|
| 892 |
+
strictly periodic, *omega* or *period* might not work, and it might
|
| 893 |
+
be necessary to use *zeros*.
|
| 894 |
+
|
| 895 |
+
A notable exception can be made for Bessel functions which, though not
|
| 896 |
+
periodic, are "asymptotically periodic" in a sufficiently strong sense
|
| 897 |
+
that the sum extrapolation will work out::
|
| 898 |
+
|
| 899 |
+
>>> quadosc(j0, [0, inf], period=2*pi)
|
| 900 |
+
1.0
|
| 901 |
+
>>> quadosc(j1, [0, inf], period=2*pi)
|
| 902 |
+
1.0
|
| 903 |
+
|
| 904 |
+
More properly, one should provide the exact Bessel function zeros::
|
| 905 |
+
|
| 906 |
+
>>> j0zero = lambda n: findroot(j0, pi*(n-0.25))
|
| 907 |
+
>>> quadosc(j0, [0, inf], zeros=j0zero)
|
| 908 |
+
1.0
|
| 909 |
+
|
| 910 |
+
For an example where *zeros* becomes necessary, consider the
|
| 911 |
+
complete Fresnel integrals
|
| 912 |
+
|
| 913 |
+
.. math ::
|
| 914 |
+
|
| 915 |
+
\int_0^{\infty} \cos x^2\,dx = \int_0^{\infty} \sin x^2\,dx
|
| 916 |
+
= \sqrt{\frac{\pi}{8}}.
|
| 917 |
+
|
| 918 |
+
Although the integrands do not decrease in magnitude as
|
| 919 |
+
`x \to \infty`, the integrals are convergent since the oscillation
|
| 920 |
+
rate increases (causing consecutive periods to asymptotically
|
| 921 |
+
cancel out). These integrals are virtually impossible to calculate
|
| 922 |
+
to any kind of accuracy using standard quadrature rules. However,
|
| 923 |
+
if one provides the correct asymptotic distribution of zeros
|
| 924 |
+
(`x_n \sim \sqrt{n}`), :func:`~mpmath.quadosc` works::
|
| 925 |
+
|
| 926 |
+
>>> mp.dps = 30
|
| 927 |
+
>>> f = lambda x: cos(x**2)
|
| 928 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
| 929 |
+
0.626657068657750125603941321203
|
| 930 |
+
>>> f = lambda x: sin(x**2)
|
| 931 |
+
>>> quadosc(f, [0,inf], zeros=lambda n:sqrt(pi*n))
|
| 932 |
+
0.626657068657750125603941321203
|
| 933 |
+
>>> sqrt(pi/8)
|
| 934 |
+
0.626657068657750125603941321203
|
| 935 |
+
|
| 936 |
+
(Interestingly, these integrals can still be evaluated if one
|
| 937 |
+
places some other constant than `\pi` in the square root sign.)
|
| 938 |
+
|
| 939 |
+
In general, if `f(x) \sim g(x) \cos(h(x))`, the zeros follow
|
| 940 |
+
the inverse-function distribution `h^{-1}(x)`::
|
| 941 |
+
|
| 942 |
+
>>> mp.dps = 15
|
| 943 |
+
>>> f = lambda x: sin(exp(x))
|
| 944 |
+
>>> quadosc(f, [1,inf], zeros=lambda n: log(n))
|
| 945 |
+
-0.25024394235267
|
| 946 |
+
>>> pi/2-si(e)
|
| 947 |
+
-0.250243942352671
|
| 948 |
+
|
| 949 |
+
**Non-alternating functions**
|
| 950 |
+
|
| 951 |
+
If the integrand oscillates around a positive value, without
|
| 952 |
+
alternating signs, the extrapolation might fail. A simple trick
|
| 953 |
+
that sometimes works is to multiply or divide the frequency by 2::
|
| 954 |
+
|
| 955 |
+
>>> f = lambda x: 1/x**2+sin(x)/x**4
|
| 956 |
+
>>> quadosc(f, [1,inf], omega=1) # Bad
|
| 957 |
+
1.28642190869861
|
| 958 |
+
>>> quadosc(f, [1,inf], omega=0.5) # Perfect
|
| 959 |
+
1.28652953559617
|
| 960 |
+
>>> 1+(cos(1)+ci(1)+sin(1))/6
|
| 961 |
+
1.28652953559617
|
| 962 |
+
|
| 963 |
+
**Fast decay**
|
| 964 |
+
|
| 965 |
+
:func:`~mpmath.quadosc` is primarily useful for slowly decaying
|
| 966 |
+
integrands. If the integrand decreases exponentially or faster,
|
| 967 |
+
:func:`~mpmath.quad` will likely handle it without trouble (and generally be
|
| 968 |
+
much faster than :func:`~mpmath.quadosc`)::
|
| 969 |
+
|
| 970 |
+
>>> quadosc(lambda x: cos(x)/exp(x), [0, inf], omega=1)
|
| 971 |
+
0.5
|
| 972 |
+
>>> quad(lambda x: cos(x)/exp(x), [0, inf])
|
| 973 |
+
0.5
|
| 974 |
+
|
| 975 |
+
"""
|
| 976 |
+
a, b = ctx._as_points(interval)
|
| 977 |
+
a = ctx.convert(a)
|
| 978 |
+
b = ctx.convert(b)
|
| 979 |
+
if [omega, period, zeros].count(None) != 2:
|
| 980 |
+
raise ValueError( \
|
| 981 |
+
"must specify exactly one of omega, period, zeros")
|
| 982 |
+
if a == ctx.ninf and b == ctx.inf:
|
| 983 |
+
s1 = ctx.quadosc(f, [a, 0], omega=omega, zeros=zeros, period=period)
|
| 984 |
+
s2 = ctx.quadosc(f, [0, b], omega=omega, zeros=zeros, period=period)
|
| 985 |
+
return s1 + s2
|
| 986 |
+
if a == ctx.ninf:
|
| 987 |
+
if zeros:
|
| 988 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], lambda n: zeros(-n))
|
| 989 |
+
else:
|
| 990 |
+
return ctx.quadosc(lambda x:f(-x), [-b,-a], omega=omega, period=period)
|
| 991 |
+
if b != ctx.inf:
|
| 992 |
+
raise ValueError("quadosc requires an infinite integration interval")
|
| 993 |
+
if not zeros:
|
| 994 |
+
if omega:
|
| 995 |
+
period = 2*ctx.pi/omega
|
| 996 |
+
zeros = lambda n: n*period/2
|
| 997 |
+
#for n in range(1,10):
|
| 998 |
+
# p = zeros(n)
|
| 999 |
+
# if p > a:
|
| 1000 |
+
# break
|
| 1001 |
+
#if n >= 9:
|
| 1002 |
+
# raise ValueError("zeros do not appear to be correctly indexed")
|
| 1003 |
+
n = 1
|
| 1004 |
+
s = ctx.quadgl(f, [a, zeros(n)])
|
| 1005 |
+
def term(k):
|
| 1006 |
+
return ctx.quadgl(f, [zeros(k), zeros(k+1)])
|
| 1007 |
+
s += ctx.nsum(term, [n, ctx.inf])
|
| 1008 |
+
return s
|
| 1009 |
+
|
| 1010 |
+
def quadsubdiv(ctx, f, interval, tol=None, maxintervals=None, **kwargs):
|
| 1011 |
+
"""
|
| 1012 |
+
Computes the integral of *f* over the interval or path specified
|
| 1013 |
+
by *interval*, using :func:`~mpmath.quad` together with adaptive
|
| 1014 |
+
subdivision of the interval.
|
| 1015 |
+
|
| 1016 |
+
This function gives an accurate answer for some integrals where
|
| 1017 |
+
:func:`~mpmath.quad` fails::
|
| 1018 |
+
|
| 1019 |
+
>>> from mpmath import *
|
| 1020 |
+
>>> mp.dps = 15; mp.pretty = True
|
| 1021 |
+
>>> quad(lambda x: abs(sin(x)), [0, 2*pi])
|
| 1022 |
+
3.99900894176779
|
| 1023 |
+
>>> quadsubdiv(lambda x: abs(sin(x)), [0, 2*pi])
|
| 1024 |
+
4.0
|
| 1025 |
+
>>> quadsubdiv(sin, [0, 1000])
|
| 1026 |
+
0.437620923709297
|
| 1027 |
+
>>> quadsubdiv(lambda x: 1/(1+x**2), [-100, 100])
|
| 1028 |
+
3.12159332021646
|
| 1029 |
+
>>> quadsubdiv(lambda x: ceil(x), [0, 100])
|
| 1030 |
+
5050.0
|
| 1031 |
+
>>> quadsubdiv(lambda x: sin(x+exp(x)), [0,8])
|
| 1032 |
+
0.347400172657248
|
| 1033 |
+
|
| 1034 |
+
The argument *maxintervals* can be set to limit the permissible
|
| 1035 |
+
subdivision::
|
| 1036 |
+
|
| 1037 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=5, error=True)
|
| 1038 |
+
(-5.40487904307774, 5.011)
|
| 1039 |
+
>>> quadsubdiv(lambda x: sin(x**2), [0,100], maxintervals=100, error=True)
|
| 1040 |
+
(0.631417921866934, 1.10101120134116e-17)
|
| 1041 |
+
|
| 1042 |
+
Subdivision does not guarantee a correct answer since, the error
|
| 1043 |
+
estimate on subintervals may be inaccurate::
|
| 1044 |
+
|
| 1045 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
| 1046 |
+
(0.210802735500549, 1.0001111101e-17)
|
| 1047 |
+
>>> mp.dps = 20
|
| 1048 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, [0,1], error=True)
|
| 1049 |
+
(0.21080273550054927738, 2.200000001e-24)
|
| 1050 |
+
|
| 1051 |
+
The second answer is correct. We can get an accurate result at lower
|
| 1052 |
+
precision by forcing a finer initial subdivision::
|
| 1053 |
+
|
| 1054 |
+
>>> mp.dps = 15
|
| 1055 |
+
>>> quadsubdiv(lambda x: sech(10*x-2)**2 + sech(100*x-40)**4 + sech(1000*x-600)**6, linspace(0,1,5))
|
| 1056 |
+
0.210802735500549
|
| 1057 |
+
|
| 1058 |
+
The following integral is too oscillatory for convergence, but we can get a
|
| 1059 |
+
reasonable estimate::
|
| 1060 |
+
|
| 1061 |
+
>>> v, err = fp.quadsubdiv(lambda x: fp.sin(1/x), [0,1], error=True)
|
| 1062 |
+
>>> round(v, 6), round(err, 6)
|
| 1063 |
+
(0.504067, 1e-06)
|
| 1064 |
+
>>> sin(1) - ci(1)
|
| 1065 |
+
0.504067061906928
|
| 1066 |
+
|
| 1067 |
+
"""
|
| 1068 |
+
queue = []
|
| 1069 |
+
for i in range(len(interval)-1):
|
| 1070 |
+
queue.append((interval[i], interval[i+1]))
|
| 1071 |
+
total = ctx.zero
|
| 1072 |
+
total_error = ctx.zero
|
| 1073 |
+
if maxintervals is None:
|
| 1074 |
+
maxintervals = 10 * ctx.prec
|
| 1075 |
+
count = 0
|
| 1076 |
+
quad_args = kwargs.copy()
|
| 1077 |
+
quad_args["verbose"] = False
|
| 1078 |
+
quad_args["error"] = True
|
| 1079 |
+
if tol is None:
|
| 1080 |
+
tol = +ctx.eps
|
| 1081 |
+
orig = ctx.prec
|
| 1082 |
+
try:
|
| 1083 |
+
ctx.prec += 5
|
| 1084 |
+
while queue:
|
| 1085 |
+
a, b = queue.pop()
|
| 1086 |
+
s, err = ctx.quad(f, [a, b], **quad_args)
|
| 1087 |
+
if kwargs.get("verbose"):
|
| 1088 |
+
print("subinterval", count, a, b, err)
|
| 1089 |
+
if err < tol or count > maxintervals:
|
| 1090 |
+
total += s
|
| 1091 |
+
total_error += err
|
| 1092 |
+
else:
|
| 1093 |
+
count += 1
|
| 1094 |
+
if count == maxintervals and kwargs.get("verbose"):
|
| 1095 |
+
print("warning: number of intervals exceeded maxintervals")
|
| 1096 |
+
if a == -ctx.inf and b == ctx.inf:
|
| 1097 |
+
m = 0
|
| 1098 |
+
elif a == -ctx.inf:
|
| 1099 |
+
m = min(b-1, 2*b)
|
| 1100 |
+
elif b == ctx.inf:
|
| 1101 |
+
m = max(a+1, 2*a)
|
| 1102 |
+
else:
|
| 1103 |
+
m = a + (b - a) / 2
|
| 1104 |
+
queue.append((a, m))
|
| 1105 |
+
queue.append((m, b))
|
| 1106 |
+
finally:
|
| 1107 |
+
ctx.prec = orig
|
| 1108 |
+
if kwargs.get("error"):
|
| 1109 |
+
return +total, +total_error
|
| 1110 |
+
else:
|
| 1111 |
+
return +total
|
| 1112 |
+
|
| 1113 |
+
if __name__ == '__main__':
|
| 1114 |
+
import doctest
|
| 1115 |
+
doctest.testmod()
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/function_docs.py
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/theta.cpython-311.pyc
ADDED
|
Binary file (53.5 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/elliptic.py
ADDED
|
@@ -0,0 +1,1431 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
r"""
|
| 2 |
+
Elliptic functions historically comprise the elliptic integrals
|
| 3 |
+
and their inverses, and originate from the problem of computing the
|
| 4 |
+
arc length of an ellipse. From a more modern point of view,
|
| 5 |
+
an elliptic function is defined as a doubly periodic function, i.e.
|
| 6 |
+
a function which satisfies
|
| 7 |
+
|
| 8 |
+
.. math ::
|
| 9 |
+
|
| 10 |
+
f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z)
|
| 11 |
+
|
| 12 |
+
for some half-periods `\omega_1, \omega_2` with
|
| 13 |
+
`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic
|
| 14 |
+
functions are the Jacobi elliptic functions. More broadly, this section
|
| 15 |
+
includes quasi-doubly periodic functions (such as the Jacobi theta
|
| 16 |
+
functions) and other functions useful in the study of elliptic functions.
|
| 17 |
+
|
| 18 |
+
Many different conventions for the arguments of
|
| 19 |
+
elliptic functions are in use. It is even standard to use
|
| 20 |
+
different parameterizations for different functions in the same
|
| 21 |
+
text or software (and mpmath is no exception).
|
| 22 |
+
The usual parameters are the elliptic nome `q`, which usually
|
| 23 |
+
must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary
|
| 24 |
+
complex number); the elliptic modulus `k` (an arbitrary complex
|
| 25 |
+
number); and the half-period ratio `\tau`, which usually must
|
| 26 |
+
satisfy `\mathrm{Im}[\tau] > 0`.
|
| 27 |
+
These quantities can be expressed in terms of each other
|
| 28 |
+
using the following relations:
|
| 29 |
+
|
| 30 |
+
.. math ::
|
| 31 |
+
|
| 32 |
+
m = k^2
|
| 33 |
+
|
| 34 |
+
.. math ::
|
| 35 |
+
|
| 36 |
+
\tau = i \frac{K(1-m)}{K(m)}
|
| 37 |
+
|
| 38 |
+
.. math ::
|
| 39 |
+
|
| 40 |
+
q = e^{i \pi \tau}
|
| 41 |
+
|
| 42 |
+
.. math ::
|
| 43 |
+
|
| 44 |
+
k = \frac{\vartheta_2^2(q)}{\vartheta_3^2(q)}
|
| 45 |
+
|
| 46 |
+
In addition, an alternative definition is used for the nome in
|
| 47 |
+
number theory, which we here denote by q-bar:
|
| 48 |
+
|
| 49 |
+
.. math ::
|
| 50 |
+
|
| 51 |
+
\bar{q} = q^2 = e^{2 i \pi \tau}
|
| 52 |
+
|
| 53 |
+
For convenience, mpmath provides functions to convert
|
| 54 |
+
between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`,
|
| 55 |
+
:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`).
|
| 56 |
+
|
| 57 |
+
**References**
|
| 58 |
+
|
| 59 |
+
1. [AbramowitzStegun]_
|
| 60 |
+
|
| 61 |
+
2. [WhittakerWatson]_
|
| 62 |
+
|
| 63 |
+
"""
|
| 64 |
+
|
| 65 |
+
from .functions import defun, defun_wrapped
|
| 66 |
+
|
| 67 |
+
@defun_wrapped
|
| 68 |
+
def eta(ctx, tau):
|
| 69 |
+
r"""
|
| 70 |
+
Returns the Dedekind eta function of tau in the upper half-plane.
|
| 71 |
+
|
| 72 |
+
>>> from mpmath import *
|
| 73 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 74 |
+
>>> eta(1j); gamma(0.25) / (2*pi**0.75)
|
| 75 |
+
(0.7682254223260566590025942 + 0.0j)
|
| 76 |
+
0.7682254223260566590025942
|
| 77 |
+
>>> tau = sqrt(2) + sqrt(5)*1j
|
| 78 |
+
>>> eta(-1/tau); sqrt(-1j*tau) * eta(tau)
|
| 79 |
+
(0.9022859908439376463573294 + 0.07985093673948098408048575j)
|
| 80 |
+
(0.9022859908439376463573295 + 0.07985093673948098408048575j)
|
| 81 |
+
>>> eta(tau+1); exp(pi*1j/12) * eta(tau)
|
| 82 |
+
(0.4493066139717553786223114 + 0.3290014793877986663915939j)
|
| 83 |
+
(0.4493066139717553786223114 + 0.3290014793877986663915939j)
|
| 84 |
+
>>> f = lambda z: diff(eta, z) / eta(z)
|
| 85 |
+
>>> chop(36*diff(f,tau)**2 - 24*diff(f,tau,2)*f(tau) + diff(f,tau,3))
|
| 86 |
+
0.0
|
| 87 |
+
|
| 88 |
+
"""
|
| 89 |
+
if ctx.im(tau) <= 0.0:
|
| 90 |
+
raise ValueError("eta is only defined in the upper half-plane")
|
| 91 |
+
q = ctx.expjpi(tau/12)
|
| 92 |
+
return q * ctx.qp(q**24)
|
| 93 |
+
|
| 94 |
+
def nome(ctx, m):
|
| 95 |
+
m = ctx.convert(m)
|
| 96 |
+
if not m:
|
| 97 |
+
return m
|
| 98 |
+
if m == ctx.one:
|
| 99 |
+
return m
|
| 100 |
+
if ctx.isnan(m):
|
| 101 |
+
return m
|
| 102 |
+
if ctx.isinf(m):
|
| 103 |
+
if m == ctx.ninf:
|
| 104 |
+
return type(m)(-1)
|
| 105 |
+
else:
|
| 106 |
+
return ctx.mpc(-1)
|
| 107 |
+
a = ctx.ellipk(ctx.one-m)
|
| 108 |
+
b = ctx.ellipk(m)
|
| 109 |
+
v = ctx.exp(-ctx.pi*a/b)
|
| 110 |
+
if not ctx._im(m) and ctx._re(m) < 1:
|
| 111 |
+
if ctx._is_real_type(m):
|
| 112 |
+
return v.real
|
| 113 |
+
else:
|
| 114 |
+
return v.real + 0j
|
| 115 |
+
elif m == 2:
|
| 116 |
+
v = ctx.mpc(0, v.imag)
|
| 117 |
+
return v
|
| 118 |
+
|
| 119 |
+
@defun_wrapped
|
| 120 |
+
def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 121 |
+
r"""
|
| 122 |
+
Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`::
|
| 123 |
+
|
| 124 |
+
>>> from mpmath import *
|
| 125 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 126 |
+
>>> qfrom(q=0.25)
|
| 127 |
+
0.25
|
| 128 |
+
>>> qfrom(m=mfrom(q=0.25))
|
| 129 |
+
0.25
|
| 130 |
+
>>> qfrom(k=kfrom(q=0.25))
|
| 131 |
+
0.25
|
| 132 |
+
>>> qfrom(tau=taufrom(q=0.25))
|
| 133 |
+
(0.25 + 0.0j)
|
| 134 |
+
>>> qfrom(qbar=qbarfrom(q=0.25))
|
| 135 |
+
0.25
|
| 136 |
+
|
| 137 |
+
"""
|
| 138 |
+
if q is not None:
|
| 139 |
+
return ctx.convert(q)
|
| 140 |
+
if m is not None:
|
| 141 |
+
return nome(ctx, m)
|
| 142 |
+
if k is not None:
|
| 143 |
+
return nome(ctx, ctx.convert(k)**2)
|
| 144 |
+
if tau is not None:
|
| 145 |
+
return ctx.expjpi(tau)
|
| 146 |
+
if qbar is not None:
|
| 147 |
+
return ctx.sqrt(qbar)
|
| 148 |
+
|
| 149 |
+
@defun_wrapped
|
| 150 |
+
def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 151 |
+
r"""
|
| 152 |
+
Returns the number-theoretic nome `\bar q`, given any of
|
| 153 |
+
`q, m, k, \tau, \bar{q}`::
|
| 154 |
+
|
| 155 |
+
>>> from mpmath import *
|
| 156 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 157 |
+
>>> qbarfrom(qbar=0.25)
|
| 158 |
+
0.25
|
| 159 |
+
>>> qbarfrom(q=qfrom(qbar=0.25))
|
| 160 |
+
0.25
|
| 161 |
+
>>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned
|
| 162 |
+
0.25
|
| 163 |
+
>>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned
|
| 164 |
+
0.25
|
| 165 |
+
>>> qbarfrom(tau=taufrom(qbar=0.25))
|
| 166 |
+
(0.25 + 0.0j)
|
| 167 |
+
|
| 168 |
+
"""
|
| 169 |
+
if qbar is not None:
|
| 170 |
+
return ctx.convert(qbar)
|
| 171 |
+
if q is not None:
|
| 172 |
+
return ctx.convert(q) ** 2
|
| 173 |
+
if m is not None:
|
| 174 |
+
return nome(ctx, m) ** 2
|
| 175 |
+
if k is not None:
|
| 176 |
+
return nome(ctx, ctx.convert(k)**2) ** 2
|
| 177 |
+
if tau is not None:
|
| 178 |
+
return ctx.expjpi(2*tau)
|
| 179 |
+
|
| 180 |
+
@defun_wrapped
|
| 181 |
+
def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 182 |
+
r"""
|
| 183 |
+
Returns the elliptic half-period ratio `\tau`, given any of
|
| 184 |
+
`q, m, k, \tau, \bar{q}`::
|
| 185 |
+
|
| 186 |
+
>>> from mpmath import *
|
| 187 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 188 |
+
>>> taufrom(tau=0.5j)
|
| 189 |
+
(0.0 + 0.5j)
|
| 190 |
+
>>> taufrom(q=qfrom(tau=0.5j))
|
| 191 |
+
(0.0 + 0.5j)
|
| 192 |
+
>>> taufrom(m=mfrom(tau=0.5j))
|
| 193 |
+
(0.0 + 0.5j)
|
| 194 |
+
>>> taufrom(k=kfrom(tau=0.5j))
|
| 195 |
+
(0.0 + 0.5j)
|
| 196 |
+
>>> taufrom(qbar=qbarfrom(tau=0.5j))
|
| 197 |
+
(0.0 + 0.5j)
|
| 198 |
+
|
| 199 |
+
"""
|
| 200 |
+
if tau is not None:
|
| 201 |
+
return ctx.convert(tau)
|
| 202 |
+
if m is not None:
|
| 203 |
+
m = ctx.convert(m)
|
| 204 |
+
return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m)
|
| 205 |
+
if k is not None:
|
| 206 |
+
k = ctx.convert(k)
|
| 207 |
+
return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2)
|
| 208 |
+
if q is not None:
|
| 209 |
+
return ctx.log(q) / (ctx.pi*ctx.j)
|
| 210 |
+
if qbar is not None:
|
| 211 |
+
qbar = ctx.convert(qbar)
|
| 212 |
+
return ctx.log(qbar) / (2*ctx.pi*ctx.j)
|
| 213 |
+
|
| 214 |
+
@defun_wrapped
|
| 215 |
+
def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 216 |
+
r"""
|
| 217 |
+
Returns the elliptic modulus `k`, given any of
|
| 218 |
+
`q, m, k, \tau, \bar{q}`::
|
| 219 |
+
|
| 220 |
+
>>> from mpmath import *
|
| 221 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 222 |
+
>>> kfrom(k=0.25)
|
| 223 |
+
0.25
|
| 224 |
+
>>> kfrom(m=mfrom(k=0.25))
|
| 225 |
+
0.25
|
| 226 |
+
>>> kfrom(q=qfrom(k=0.25))
|
| 227 |
+
0.25
|
| 228 |
+
>>> kfrom(tau=taufrom(k=0.25))
|
| 229 |
+
(0.25 + 0.0j)
|
| 230 |
+
>>> kfrom(qbar=qbarfrom(k=0.25))
|
| 231 |
+
0.25
|
| 232 |
+
|
| 233 |
+
As `q \to 1` and `q \to -1`, `k` rapidly approaches
|
| 234 |
+
`1` and `i \infty` respectively::
|
| 235 |
+
|
| 236 |
+
>>> kfrom(q=0.75)
|
| 237 |
+
0.9999999999999899166471767
|
| 238 |
+
>>> kfrom(q=-0.75)
|
| 239 |
+
(0.0 + 7041781.096692038332790615j)
|
| 240 |
+
>>> kfrom(q=1)
|
| 241 |
+
1
|
| 242 |
+
>>> kfrom(q=-1)
|
| 243 |
+
(0.0 + +infj)
|
| 244 |
+
"""
|
| 245 |
+
if k is not None:
|
| 246 |
+
return ctx.convert(k)
|
| 247 |
+
if m is not None:
|
| 248 |
+
return ctx.sqrt(m)
|
| 249 |
+
if tau is not None:
|
| 250 |
+
q = ctx.expjpi(tau)
|
| 251 |
+
if qbar is not None:
|
| 252 |
+
q = ctx.sqrt(qbar)
|
| 253 |
+
if q == 1:
|
| 254 |
+
return q
|
| 255 |
+
if q == -1:
|
| 256 |
+
return ctx.mpc(0,'inf')
|
| 257 |
+
return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2
|
| 258 |
+
|
| 259 |
+
@defun_wrapped
|
| 260 |
+
def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None):
|
| 261 |
+
r"""
|
| 262 |
+
Returns the elliptic parameter `m`, given any of
|
| 263 |
+
`q, m, k, \tau, \bar{q}`::
|
| 264 |
+
|
| 265 |
+
>>> from mpmath import *
|
| 266 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 267 |
+
>>> mfrom(m=0.25)
|
| 268 |
+
0.25
|
| 269 |
+
>>> mfrom(q=qfrom(m=0.25))
|
| 270 |
+
0.25
|
| 271 |
+
>>> mfrom(k=kfrom(m=0.25))
|
| 272 |
+
0.25
|
| 273 |
+
>>> mfrom(tau=taufrom(m=0.25))
|
| 274 |
+
(0.25 + 0.0j)
|
| 275 |
+
>>> mfrom(qbar=qbarfrom(m=0.25))
|
| 276 |
+
0.25
|
| 277 |
+
|
| 278 |
+
As `q \to 1` and `q \to -1`, `m` rapidly approaches
|
| 279 |
+
`1` and `-\infty` respectively::
|
| 280 |
+
|
| 281 |
+
>>> mfrom(q=0.75)
|
| 282 |
+
0.9999999999999798332943533
|
| 283 |
+
>>> mfrom(q=-0.75)
|
| 284 |
+
-49586681013729.32611558353
|
| 285 |
+
>>> mfrom(q=1)
|
| 286 |
+
1.0
|
| 287 |
+
>>> mfrom(q=-1)
|
| 288 |
+
-inf
|
| 289 |
+
|
| 290 |
+
The inverse nome as a function of `q` has an integer
|
| 291 |
+
Taylor series expansion::
|
| 292 |
+
|
| 293 |
+
>>> taylor(lambda q: mfrom(q), 0, 7)
|
| 294 |
+
[0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0]
|
| 295 |
+
|
| 296 |
+
"""
|
| 297 |
+
if m is not None:
|
| 298 |
+
return m
|
| 299 |
+
if k is not None:
|
| 300 |
+
return k**2
|
| 301 |
+
if tau is not None:
|
| 302 |
+
q = ctx.expjpi(tau)
|
| 303 |
+
if qbar is not None:
|
| 304 |
+
q = ctx.sqrt(qbar)
|
| 305 |
+
if q == 1:
|
| 306 |
+
return ctx.convert(q)
|
| 307 |
+
if q == -1:
|
| 308 |
+
return q*ctx.inf
|
| 309 |
+
v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4
|
| 310 |
+
if ctx._is_real_type(q) and q < 0:
|
| 311 |
+
v = v.real
|
| 312 |
+
return v
|
| 313 |
+
|
| 314 |
+
jacobi_spec = {
|
| 315 |
+
'sn' : ([3],[2],[1],[4], 'sin', 'tanh'),
|
| 316 |
+
'cn' : ([4],[2],[2],[4], 'cos', 'sech'),
|
| 317 |
+
'dn' : ([4],[3],[3],[4], '1', 'sech'),
|
| 318 |
+
'ns' : ([2],[3],[4],[1], 'csc', 'coth'),
|
| 319 |
+
'nc' : ([2],[4],[4],[2], 'sec', 'cosh'),
|
| 320 |
+
'nd' : ([3],[4],[4],[3], '1', 'cosh'),
|
| 321 |
+
'sc' : ([3],[4],[1],[2], 'tan', 'sinh'),
|
| 322 |
+
'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'),
|
| 323 |
+
'cd' : ([3],[2],[2],[3], 'cos', '1'),
|
| 324 |
+
'cs' : ([4],[3],[2],[1], 'cot', 'csch'),
|
| 325 |
+
'dc' : ([2],[3],[3],[2], 'sec', '1'),
|
| 326 |
+
'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'),
|
| 327 |
+
'cc' : None,
|
| 328 |
+
'ss' : None,
|
| 329 |
+
'nn' : None,
|
| 330 |
+
'dd' : None
|
| 331 |
+
}
|
| 332 |
+
|
| 333 |
+
@defun
|
| 334 |
+
def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None):
|
| 335 |
+
try:
|
| 336 |
+
S = jacobi_spec[kind]
|
| 337 |
+
except KeyError:
|
| 338 |
+
raise ValueError("First argument must be a two-character string "
|
| 339 |
+
"containing 's', 'c', 'd' or 'n', e.g.: 'sn'")
|
| 340 |
+
if u is None:
|
| 341 |
+
def f(*args, **kwargs):
|
| 342 |
+
return ctx.ellipfun(kind, *args, **kwargs)
|
| 343 |
+
f.__name__ = kind
|
| 344 |
+
return f
|
| 345 |
+
prec = ctx.prec
|
| 346 |
+
try:
|
| 347 |
+
ctx.prec += 10
|
| 348 |
+
u = ctx.convert(u)
|
| 349 |
+
q = ctx.qfrom(m=m, q=q, k=k, tau=tau)
|
| 350 |
+
if S is None:
|
| 351 |
+
v = ctx.one + 0*q*u
|
| 352 |
+
elif q == ctx.zero:
|
| 353 |
+
if S[4] == '1': v = ctx.one
|
| 354 |
+
else: v = getattr(ctx, S[4])(u)
|
| 355 |
+
v += 0*q*u
|
| 356 |
+
elif q == ctx.one:
|
| 357 |
+
if S[5] == '1': v = ctx.one
|
| 358 |
+
else: v = getattr(ctx, S[5])(u)
|
| 359 |
+
v += 0*q*u
|
| 360 |
+
else:
|
| 361 |
+
t = u / ctx.jtheta(3, 0, q)**2
|
| 362 |
+
v = ctx.one
|
| 363 |
+
for a in S[0]: v *= ctx.jtheta(a, 0, q)
|
| 364 |
+
for b in S[1]: v /= ctx.jtheta(b, 0, q)
|
| 365 |
+
for c in S[2]: v *= ctx.jtheta(c, t, q)
|
| 366 |
+
for d in S[3]: v /= ctx.jtheta(d, t, q)
|
| 367 |
+
finally:
|
| 368 |
+
ctx.prec = prec
|
| 369 |
+
return +v
|
| 370 |
+
|
| 371 |
+
@defun_wrapped
|
| 372 |
+
def kleinj(ctx, tau=None, **kwargs):
|
| 373 |
+
r"""
|
| 374 |
+
Evaluates the Klein j-invariant, which is a modular function defined for
|
| 375 |
+
`\tau` in the upper half-plane as
|
| 376 |
+
|
| 377 |
+
.. math ::
|
| 378 |
+
|
| 379 |
+
J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)}
|
| 380 |
+
|
| 381 |
+
where `g_2` and `g_3` are the modular invariants of the Weierstrass
|
| 382 |
+
elliptic function,
|
| 383 |
+
|
| 384 |
+
.. math ::
|
| 385 |
+
|
| 386 |
+
g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4}
|
| 387 |
+
|
| 388 |
+
g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}.
|
| 389 |
+
|
| 390 |
+
An alternative, common notation is that of the j-function
|
| 391 |
+
`j(\tau) = 1728 J(\tau)`.
|
| 392 |
+
|
| 393 |
+
**Plots**
|
| 394 |
+
|
| 395 |
+
.. literalinclude :: /plots/kleinj.py
|
| 396 |
+
.. image :: /plots/kleinj.png
|
| 397 |
+
.. literalinclude :: /plots/kleinj2.py
|
| 398 |
+
.. image :: /plots/kleinj2.png
|
| 399 |
+
|
| 400 |
+
**Examples**
|
| 401 |
+
|
| 402 |
+
Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`::
|
| 403 |
+
|
| 404 |
+
>>> from mpmath import *
|
| 405 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 406 |
+
>>> tau = 0.625+0.75*j
|
| 407 |
+
>>> tau = 0.625+0.75*j
|
| 408 |
+
>>> kleinj(tau)
|
| 409 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
|
| 410 |
+
>>> kleinj(tau+1)
|
| 411 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228948j)
|
| 412 |
+
>>> kleinj(-1/tau)
|
| 413 |
+
(-0.1507492166511182267125242 + 0.07595948379084571927228946j)
|
| 414 |
+
|
| 415 |
+
The j-function has a famous Laurent series expansion in terms of the nome
|
| 416 |
+
`\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`::
|
| 417 |
+
|
| 418 |
+
>>> mp.dps = 15
|
| 419 |
+
>>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True)
|
| 420 |
+
[1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0]
|
| 421 |
+
|
| 422 |
+
The j-function admits exact evaluation at special algebraic points
|
| 423 |
+
related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163::
|
| 424 |
+
|
| 425 |
+
>>> @extraprec(10)
|
| 426 |
+
... def h(n):
|
| 427 |
+
... v = (1+sqrt(n)*j)
|
| 428 |
+
... if n > 2:
|
| 429 |
+
... v *= 0.5
|
| 430 |
+
... return v
|
| 431 |
+
...
|
| 432 |
+
>>> mp.dps = 25
|
| 433 |
+
>>> for n in [1,2,3,7,11,19,43,67,163]:
|
| 434 |
+
... n, chop(1728*kleinj(h(n)))
|
| 435 |
+
...
|
| 436 |
+
(1, 1728.0)
|
| 437 |
+
(2, 8000.0)
|
| 438 |
+
(3, 0.0)
|
| 439 |
+
(7, -3375.0)
|
| 440 |
+
(11, -32768.0)
|
| 441 |
+
(19, -884736.0)
|
| 442 |
+
(43, -884736000.0)
|
| 443 |
+
(67, -147197952000.0)
|
| 444 |
+
(163, -262537412640768000.0)
|
| 445 |
+
|
| 446 |
+
Also at other special points, the j-function assumes explicit
|
| 447 |
+
algebraic values, e.g.::
|
| 448 |
+
|
| 449 |
+
>>> chop(1728*kleinj(j*sqrt(5)))
|
| 450 |
+
1264538.909475140509320227
|
| 451 |
+
>>> identify(cbrt(_)) # note: not simplified
|
| 452 |
+
'((100+sqrt(13520))/2)'
|
| 453 |
+
>>> (50+26*sqrt(5))**3
|
| 454 |
+
1264538.909475140509320227
|
| 455 |
+
|
| 456 |
+
"""
|
| 457 |
+
q = ctx.qfrom(tau=tau, **kwargs)
|
| 458 |
+
t2 = ctx.jtheta(2,0,q)
|
| 459 |
+
t3 = ctx.jtheta(3,0,q)
|
| 460 |
+
t4 = ctx.jtheta(4,0,q)
|
| 461 |
+
P = (t2**8 + t3**8 + t4**8)**3
|
| 462 |
+
Q = 54*(t2*t3*t4)**8
|
| 463 |
+
return P/Q
|
| 464 |
+
|
| 465 |
+
|
| 466 |
+
def RF_calc(ctx, x, y, z, r):
|
| 467 |
+
if y == z: return RC_calc(ctx, x, y, r)
|
| 468 |
+
if x == z: return RC_calc(ctx, y, x, r)
|
| 469 |
+
if x == y: return RC_calc(ctx, z, x, r)
|
| 470 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)):
|
| 471 |
+
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z):
|
| 472 |
+
return x*y*z
|
| 473 |
+
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z):
|
| 474 |
+
return ctx.zero
|
| 475 |
+
xm,ym,zm = x,y,z
|
| 476 |
+
A0 = Am = (x+y+z)/3
|
| 477 |
+
Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z))
|
| 478 |
+
g = ctx.mpf(0.25)
|
| 479 |
+
pow4 = ctx.one
|
| 480 |
+
while 1:
|
| 481 |
+
xs = ctx.sqrt(xm)
|
| 482 |
+
ys = ctx.sqrt(ym)
|
| 483 |
+
zs = ctx.sqrt(zm)
|
| 484 |
+
lm = xs*ys + xs*zs + ys*zs
|
| 485 |
+
Am1 = (Am+lm)*g
|
| 486 |
+
xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g
|
| 487 |
+
if pow4 * Q < abs(Am):
|
| 488 |
+
break
|
| 489 |
+
Am = Am1
|
| 490 |
+
pow4 *= g
|
| 491 |
+
t = pow4/Am
|
| 492 |
+
X = (A0-x)*t
|
| 493 |
+
Y = (A0-y)*t
|
| 494 |
+
Z = -X-Y
|
| 495 |
+
E2 = X*Y-Z**2
|
| 496 |
+
E3 = X*Y*Z
|
| 497 |
+
return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240
|
| 498 |
+
|
| 499 |
+
def RC_calc(ctx, x, y, r, pv=True):
|
| 500 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y)):
|
| 501 |
+
if ctx.isinf(x) or ctx.isinf(y):
|
| 502 |
+
return 1/(x*y)
|
| 503 |
+
if y == 0:
|
| 504 |
+
return ctx.inf
|
| 505 |
+
if x == 0:
|
| 506 |
+
return ctx.pi / ctx.sqrt(y) / 2
|
| 507 |
+
raise ValueError
|
| 508 |
+
# Cauchy principal value
|
| 509 |
+
if pv and ctx._im(y) == 0 and ctx._re(y) < 0:
|
| 510 |
+
return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r)
|
| 511 |
+
if x == y:
|
| 512 |
+
return 1/ctx.sqrt(x)
|
| 513 |
+
extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x))
|
| 514 |
+
ctx.prec += extraprec
|
| 515 |
+
if ctx._is_real_type(x) and ctx._is_real_type(y):
|
| 516 |
+
x = ctx._re(x)
|
| 517 |
+
y = ctx._re(y)
|
| 518 |
+
a = ctx.sqrt(x/y)
|
| 519 |
+
if x < y:
|
| 520 |
+
b = ctx.sqrt(y-x)
|
| 521 |
+
v = ctx.acos(a)/b
|
| 522 |
+
else:
|
| 523 |
+
b = ctx.sqrt(x-y)
|
| 524 |
+
v = ctx.acosh(a)/b
|
| 525 |
+
else:
|
| 526 |
+
sx = ctx.sqrt(x)
|
| 527 |
+
sy = ctx.sqrt(y)
|
| 528 |
+
v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy)
|
| 529 |
+
ctx.prec -= extraprec
|
| 530 |
+
return v
|
| 531 |
+
|
| 532 |
+
def RJ_calc(ctx, x, y, z, p, r, integration):
|
| 533 |
+
"""
|
| 534 |
+
With integration == 0, computes RJ only using Carlson's algorithm
|
| 535 |
+
(may be wrong for some values).
|
| 536 |
+
With integration == 1, uses an initial integration to make sure
|
| 537 |
+
Carlson's algorithm is correct.
|
| 538 |
+
With integration == 2, uses only integration.
|
| 539 |
+
"""
|
| 540 |
+
if not (ctx.isnormal(x) and ctx.isnormal(y) and \
|
| 541 |
+
ctx.isnormal(z) and ctx.isnormal(p)):
|
| 542 |
+
if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p):
|
| 543 |
+
return x*y*z
|
| 544 |
+
if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p):
|
| 545 |
+
return ctx.zero
|
| 546 |
+
if not p:
|
| 547 |
+
return ctx.inf
|
| 548 |
+
if (not x) + (not y) + (not z) > 1:
|
| 549 |
+
return ctx.inf
|
| 550 |
+
# Check conditions and fall back on integration for argument
|
| 551 |
+
# reduction if needed. The following conditions might be needlessly
|
| 552 |
+
# restrictive.
|
| 553 |
+
initial_integral = ctx.zero
|
| 554 |
+
if integration >= 1:
|
| 555 |
+
ok = (x.real >= 0 and y.real >= 0 and z.real >= 0 and p.real > 0)
|
| 556 |
+
if not ok:
|
| 557 |
+
if x == p or y == p or z == p:
|
| 558 |
+
ok = True
|
| 559 |
+
if not ok:
|
| 560 |
+
if p.imag != 0 or p.real >= 0:
|
| 561 |
+
if (x.imag == 0 and x.real >= 0 and ctx.conj(y) == z):
|
| 562 |
+
ok = True
|
| 563 |
+
if (y.imag == 0 and y.real >= 0 and ctx.conj(x) == z):
|
| 564 |
+
ok = True
|
| 565 |
+
if (z.imag == 0 and z.real >= 0 and ctx.conj(x) == y):
|
| 566 |
+
ok = True
|
| 567 |
+
if not ok or (integration == 2):
|
| 568 |
+
N = ctx.ceil(-min(x.real, y.real, z.real, p.real)) + 1
|
| 569 |
+
# Integrate around any singularities
|
| 570 |
+
if all((t.imag >= 0 or t.real > 0) for t in [x, y, z, p]):
|
| 571 |
+
margin = ctx.j
|
| 572 |
+
elif all((t.imag < 0 or t.real > 0) for t in [x, y, z, p]):
|
| 573 |
+
margin = -ctx.j
|
| 574 |
+
else:
|
| 575 |
+
margin = 1
|
| 576 |
+
# Go through the upper half-plane, but low enough that any
|
| 577 |
+
# parameter starting in the lower plane doesn't cross the
|
| 578 |
+
# branch cut
|
| 579 |
+
for t in [x, y, z, p]:
|
| 580 |
+
if t.imag >= 0 or t.real > 0:
|
| 581 |
+
continue
|
| 582 |
+
margin = min(margin, abs(t.imag) * 0.5)
|
| 583 |
+
margin *= ctx.j
|
| 584 |
+
N += margin
|
| 585 |
+
F = lambda t: 1/(ctx.sqrt(t+x)*ctx.sqrt(t+y)*ctx.sqrt(t+z)*(t+p))
|
| 586 |
+
if integration == 2:
|
| 587 |
+
return 1.5 * ctx.quadsubdiv(F, [0, N, ctx.inf])
|
| 588 |
+
initial_integral = 1.5 * ctx.quadsubdiv(F, [0, N])
|
| 589 |
+
x += N; y += N; z += N; p += N
|
| 590 |
+
xm,ym,zm,pm = x,y,z,p
|
| 591 |
+
A0 = Am = (x + y + z + 2*p)/5
|
| 592 |
+
delta = (p-x)*(p-y)*(p-z)
|
| 593 |
+
Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p))
|
| 594 |
+
g = ctx.mpf(0.25)
|
| 595 |
+
pow4 = ctx.one
|
| 596 |
+
S = 0
|
| 597 |
+
while 1:
|
| 598 |
+
sx = ctx.sqrt(xm)
|
| 599 |
+
sy = ctx.sqrt(ym)
|
| 600 |
+
sz = ctx.sqrt(zm)
|
| 601 |
+
sp = ctx.sqrt(pm)
|
| 602 |
+
lm = sx*sy + sx*sz + sy*sz
|
| 603 |
+
Am1 = (Am+lm)*g
|
| 604 |
+
xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g
|
| 605 |
+
dm = (sp+sx) * (sp+sy) * (sp+sz)
|
| 606 |
+
em = delta * pow4**3 / dm**2
|
| 607 |
+
if pow4 * Q < abs(Am):
|
| 608 |
+
break
|
| 609 |
+
T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm
|
| 610 |
+
S += T
|
| 611 |
+
pow4 *= g
|
| 612 |
+
Am = Am1
|
| 613 |
+
t = pow4 / Am
|
| 614 |
+
X = (A0-x)*t
|
| 615 |
+
Y = (A0-y)*t
|
| 616 |
+
Z = (A0-z)*t
|
| 617 |
+
P = (-X-Y-Z)/2
|
| 618 |
+
E2 = X*Y + X*Z + Y*Z - 3*P**2
|
| 619 |
+
E3 = X*Y*Z + 2*E2*P + 4*P**3
|
| 620 |
+
E4 = (2*X*Y*Z + E2*P + 3*P**3)*P
|
| 621 |
+
E5 = X*Y*Z*P**2
|
| 622 |
+
P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5
|
| 623 |
+
Q = 24024
|
| 624 |
+
v1 = pow4 * ctx.power(Am, -1.5) * P/Q
|
| 625 |
+
v2 = 6*S
|
| 626 |
+
return initial_integral + v1 + v2
|
| 627 |
+
|
| 628 |
+
@defun
|
| 629 |
+
def elliprf(ctx, x, y, z):
|
| 630 |
+
r"""
|
| 631 |
+
Evaluates the Carlson symmetric elliptic integral of the first kind
|
| 632 |
+
|
| 633 |
+
.. math ::
|
| 634 |
+
|
| 635 |
+
R_F(x,y,z) = \frac{1}{2}
|
| 636 |
+
\int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}}
|
| 637 |
+
|
| 638 |
+
which is defined for `x,y,z \notin (-\infty,0)`, and with
|
| 639 |
+
at most one of `x,y,z` being zero.
|
| 640 |
+
|
| 641 |
+
For real `x,y,z \ge 0`, the principal square root is taken in the integrand.
|
| 642 |
+
For complex `x,y,z`, the principal square root is taken as `t \to \infty`
|
| 643 |
+
and as `t \to 0` non-principal branches are chosen as necessary so as to
|
| 644 |
+
make the integrand continuous.
|
| 645 |
+
|
| 646 |
+
**Examples**
|
| 647 |
+
|
| 648 |
+
Some basic values and limits::
|
| 649 |
+
|
| 650 |
+
>>> from mpmath import *
|
| 651 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 652 |
+
>>> elliprf(0,1,1); pi/2
|
| 653 |
+
1.570796326794896619231322
|
| 654 |
+
1.570796326794896619231322
|
| 655 |
+
>>> elliprf(0,1,inf)
|
| 656 |
+
0.0
|
| 657 |
+
>>> elliprf(1,1,1)
|
| 658 |
+
1.0
|
| 659 |
+
>>> elliprf(2,2,2)**2
|
| 660 |
+
0.5
|
| 661 |
+
>>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0)
|
| 662 |
+
+inf
|
| 663 |
+
+inf
|
| 664 |
+
+inf
|
| 665 |
+
+inf
|
| 666 |
+
|
| 667 |
+
Representing complete elliptic integrals in terms of `R_F`::
|
| 668 |
+
|
| 669 |
+
>>> m = mpf(0.75)
|
| 670 |
+
>>> ellipk(m); elliprf(0,1-m,1)
|
| 671 |
+
2.156515647499643235438675
|
| 672 |
+
2.156515647499643235438675
|
| 673 |
+
>>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3
|
| 674 |
+
1.211056027568459524803563
|
| 675 |
+
1.211056027568459524803563
|
| 676 |
+
|
| 677 |
+
Some symmetries and argument transformations::
|
| 678 |
+
|
| 679 |
+
>>> x,y,z = 2,3,4
|
| 680 |
+
>>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x)
|
| 681 |
+
0.5840828416771517066928492
|
| 682 |
+
0.5840828416771517066928492
|
| 683 |
+
0.5840828416771517066928492
|
| 684 |
+
>>> k = mpf(100000)
|
| 685 |
+
>>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z)
|
| 686 |
+
0.001847032121923321253219284
|
| 687 |
+
0.001847032121923321253219284
|
| 688 |
+
>>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x)
|
| 689 |
+
>>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l)
|
| 690 |
+
0.5840828416771517066928492
|
| 691 |
+
0.5840828416771517066928492
|
| 692 |
+
>>> elliprf((x+l)/4,(y+l)/4,(z+l)/4)
|
| 693 |
+
0.5840828416771517066928492
|
| 694 |
+
|
| 695 |
+
Comparing with numerical integration::
|
| 696 |
+
|
| 697 |
+
>>> x,y,z = 2,3,4
|
| 698 |
+
>>> elliprf(x,y,z)
|
| 699 |
+
0.5840828416771517066928492
|
| 700 |
+
>>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5)
|
| 701 |
+
>>> q = extradps(25)(quad)
|
| 702 |
+
>>> q(f, [0,inf])
|
| 703 |
+
0.5840828416771517066928492
|
| 704 |
+
|
| 705 |
+
With the following arguments, the square root in the integrand becomes
|
| 706 |
+
discontinuous at `t = 1/2` if the principal branch is used. To obtain
|
| 707 |
+
the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}`
|
| 708 |
+
on `t \in (0, 1/2)`::
|
| 709 |
+
|
| 710 |
+
>>> x,y,z = j-1,j,0
|
| 711 |
+
>>> elliprf(x,y,z)
|
| 712 |
+
(0.7961258658423391329305694 - 1.213856669836495986430094j)
|
| 713 |
+
>>> -q(f, [0,0.5]) + q(f, [0.5,inf])
|
| 714 |
+
(0.7961258658423391329305694 - 1.213856669836495986430094j)
|
| 715 |
+
|
| 716 |
+
The so-called *first lemniscate constant*, a transcendental number::
|
| 717 |
+
|
| 718 |
+
>>> elliprf(0,1,2)
|
| 719 |
+
1.31102877714605990523242
|
| 720 |
+
>>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1])
|
| 721 |
+
1.31102877714605990523242
|
| 722 |
+
>>> gamma('1/4')**2/(4*sqrt(2*pi))
|
| 723 |
+
1.31102877714605990523242
|
| 724 |
+
|
| 725 |
+
**References**
|
| 726 |
+
|
| 727 |
+
1. [Carlson]_
|
| 728 |
+
2. [DLMF]_ Chapter 19. Elliptic Integrals
|
| 729 |
+
|
| 730 |
+
"""
|
| 731 |
+
x = ctx.convert(x)
|
| 732 |
+
y = ctx.convert(y)
|
| 733 |
+
z = ctx.convert(z)
|
| 734 |
+
prec = ctx.prec
|
| 735 |
+
try:
|
| 736 |
+
ctx.prec += 20
|
| 737 |
+
tol = ctx.eps * 2**10
|
| 738 |
+
v = RF_calc(ctx, x, y, z, tol)
|
| 739 |
+
finally:
|
| 740 |
+
ctx.prec = prec
|
| 741 |
+
return +v
|
| 742 |
+
|
| 743 |
+
@defun
|
| 744 |
+
def elliprc(ctx, x, y, pv=True):
|
| 745 |
+
r"""
|
| 746 |
+
Evaluates the degenerate Carlson symmetric elliptic integral
|
| 747 |
+
of the first kind
|
| 748 |
+
|
| 749 |
+
.. math ::
|
| 750 |
+
|
| 751 |
+
R_C(x,y) = R_F(x,y,y) =
|
| 752 |
+
\frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}.
|
| 753 |
+
|
| 754 |
+
If `y \in (-\infty,0)`, either a value defined by continuity,
|
| 755 |
+
or with *pv=True* the Cauchy principal value, can be computed.
|
| 756 |
+
|
| 757 |
+
If `x \ge 0, y > 0`, the value can be expressed in terms of
|
| 758 |
+
elementary functions as
|
| 759 |
+
|
| 760 |
+
.. math ::
|
| 761 |
+
|
| 762 |
+
R_C(x,y) =
|
| 763 |
+
\begin{cases}
|
| 764 |
+
\dfrac{1}{\sqrt{y-x}}
|
| 765 |
+
\cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\
|
| 766 |
+
\dfrac{1}{\sqrt{y}}, & x = y \\
|
| 767 |
+
\dfrac{1}{\sqrt{x-y}}
|
| 768 |
+
\cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\
|
| 769 |
+
\end{cases}.
|
| 770 |
+
|
| 771 |
+
**Examples**
|
| 772 |
+
|
| 773 |
+
Some special values and limits::
|
| 774 |
+
|
| 775 |
+
>>> from mpmath import *
|
| 776 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 777 |
+
>>> elliprc(1,2)*4; elliprc(0,1)*2; +pi
|
| 778 |
+
3.141592653589793238462643
|
| 779 |
+
3.141592653589793238462643
|
| 780 |
+
3.141592653589793238462643
|
| 781 |
+
>>> elliprc(1,0)
|
| 782 |
+
+inf
|
| 783 |
+
>>> elliprc(5,5)**2
|
| 784 |
+
0.2
|
| 785 |
+
>>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf)
|
| 786 |
+
0.0
|
| 787 |
+
0.0
|
| 788 |
+
0.0
|
| 789 |
+
|
| 790 |
+
Comparing with the elementary closed-form solution::
|
| 791 |
+
|
| 792 |
+
>>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3'))
|
| 793 |
+
2.041630778983498390751238
|
| 794 |
+
2.041630778983498390751238
|
| 795 |
+
>>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5'))
|
| 796 |
+
1.875180765206547065111085
|
| 797 |
+
1.875180765206547065111085
|
| 798 |
+
|
| 799 |
+
Comparing with numerical integration::
|
| 800 |
+
|
| 801 |
+
>>> q = extradps(25)(quad)
|
| 802 |
+
>>> elliprc(2, -3, pv=True)
|
| 803 |
+
0.3333969101113672670749334
|
| 804 |
+
>>> elliprc(2, -3, pv=False)
|
| 805 |
+
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
|
| 806 |
+
>>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf])
|
| 807 |
+
(0.3333969101113672670749334 + 0.7024814731040726393156375j)
|
| 808 |
+
|
| 809 |
+
"""
|
| 810 |
+
x = ctx.convert(x)
|
| 811 |
+
y = ctx.convert(y)
|
| 812 |
+
prec = ctx.prec
|
| 813 |
+
try:
|
| 814 |
+
ctx.prec += 20
|
| 815 |
+
tol = ctx.eps * 2**10
|
| 816 |
+
v = RC_calc(ctx, x, y, tol, pv)
|
| 817 |
+
finally:
|
| 818 |
+
ctx.prec = prec
|
| 819 |
+
return +v
|
| 820 |
+
|
| 821 |
+
@defun
|
| 822 |
+
def elliprj(ctx, x, y, z, p, integration=1):
|
| 823 |
+
r"""
|
| 824 |
+
Evaluates the Carlson symmetric elliptic integral of the third kind
|
| 825 |
+
|
| 826 |
+
.. math ::
|
| 827 |
+
|
| 828 |
+
R_J(x,y,z,p) = \frac{3}{2}
|
| 829 |
+
\int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}.
|
| 830 |
+
|
| 831 |
+
Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand
|
| 832 |
+
is defined so as to be continuous along the path of integration for
|
| 833 |
+
complex values of the arguments.
|
| 834 |
+
|
| 835 |
+
**Examples**
|
| 836 |
+
|
| 837 |
+
Some values and limits::
|
| 838 |
+
|
| 839 |
+
>>> from mpmath import *
|
| 840 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 841 |
+
>>> elliprj(1,1,1,1)
|
| 842 |
+
1.0
|
| 843 |
+
>>> elliprj(2,2,2,2); 1/(2*sqrt(2))
|
| 844 |
+
0.3535533905932737622004222
|
| 845 |
+
0.3535533905932737622004222
|
| 846 |
+
>>> elliprj(0,1,2,2)
|
| 847 |
+
1.067937989667395702268688
|
| 848 |
+
>>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi))
|
| 849 |
+
1.067937989667395702268688
|
| 850 |
+
>>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4
|
| 851 |
+
1.380226776765915172432054
|
| 852 |
+
1.380226776765915172432054
|
| 853 |
+
>>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0)
|
| 854 |
+
+inf
|
| 855 |
+
+inf
|
| 856 |
+
+inf
|
| 857 |
+
>>> elliprj(1,inf,1,0); elliprj(1,1,1,inf)
|
| 858 |
+
0.0
|
| 859 |
+
0.0
|
| 860 |
+
>>> chop(elliprj(1+j, 1-j, 1, 1))
|
| 861 |
+
0.8505007163686739432927844
|
| 862 |
+
|
| 863 |
+
Scale transformation::
|
| 864 |
+
|
| 865 |
+
>>> x,y,z,p = 2,3,4,5
|
| 866 |
+
>>> k = mpf(100000)
|
| 867 |
+
>>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p)
|
| 868 |
+
4.521291677592745527851168e-9
|
| 869 |
+
4.521291677592745527851168e-9
|
| 870 |
+
|
| 871 |
+
Comparing with numerical integration::
|
| 872 |
+
|
| 873 |
+
>>> elliprj(1,2,3,4)
|
| 874 |
+
0.2398480997495677621758617
|
| 875 |
+
>>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3)))
|
| 876 |
+
>>> 1.5*quad(f, [0,inf])
|
| 877 |
+
0.2398480997495677621758617
|
| 878 |
+
>>> elliprj(1,2+1j,3,4-2j)
|
| 879 |
+
(0.216888906014633498739952 + 0.04081912627366673332369512j)
|
| 880 |
+
>>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3)))
|
| 881 |
+
>>> 1.5*quad(f, [0,inf])
|
| 882 |
+
(0.216888906014633498739952 + 0.04081912627366673332369511j)
|
| 883 |
+
|
| 884 |
+
"""
|
| 885 |
+
x = ctx.convert(x)
|
| 886 |
+
y = ctx.convert(y)
|
| 887 |
+
z = ctx.convert(z)
|
| 888 |
+
p = ctx.convert(p)
|
| 889 |
+
prec = ctx.prec
|
| 890 |
+
try:
|
| 891 |
+
ctx.prec += 20
|
| 892 |
+
tol = ctx.eps * 2**10
|
| 893 |
+
v = RJ_calc(ctx, x, y, z, p, tol, integration)
|
| 894 |
+
finally:
|
| 895 |
+
ctx.prec = prec
|
| 896 |
+
return +v
|
| 897 |
+
|
| 898 |
+
@defun
|
| 899 |
+
def elliprd(ctx, x, y, z):
|
| 900 |
+
r"""
|
| 901 |
+
Evaluates the degenerate Carlson symmetric elliptic integral
|
| 902 |
+
of the third kind or Carlson elliptic integral of the
|
| 903 |
+
second kind `R_D(x,y,z) = R_J(x,y,z,z)`.
|
| 904 |
+
|
| 905 |
+
See :func:`~mpmath.elliprj` for additional information.
|
| 906 |
+
|
| 907 |
+
**Examples**
|
| 908 |
+
|
| 909 |
+
>>> from mpmath import *
|
| 910 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 911 |
+
>>> elliprd(1,2,3)
|
| 912 |
+
0.2904602810289906442326534
|
| 913 |
+
>>> elliprj(1,2,3,3)
|
| 914 |
+
0.2904602810289906442326534
|
| 915 |
+
|
| 916 |
+
The so-called *second lemniscate constant*, a transcendental number::
|
| 917 |
+
|
| 918 |
+
>>> elliprd(0,2,1)/3
|
| 919 |
+
0.5990701173677961037199612
|
| 920 |
+
>>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1])
|
| 921 |
+
0.5990701173677961037199612
|
| 922 |
+
>>> gamma('3/4')**2/sqrt(2*pi)
|
| 923 |
+
0.5990701173677961037199612
|
| 924 |
+
|
| 925 |
+
"""
|
| 926 |
+
return ctx.elliprj(x,y,z,z)
|
| 927 |
+
|
| 928 |
+
@defun
|
| 929 |
+
def elliprg(ctx, x, y, z):
|
| 930 |
+
r"""
|
| 931 |
+
Evaluates the Carlson completely symmetric elliptic integral
|
| 932 |
+
of the second kind
|
| 933 |
+
|
| 934 |
+
.. math ::
|
| 935 |
+
|
| 936 |
+
R_G(x,y,z) = \frac{1}{4} \int_0^{\infty}
|
| 937 |
+
\frac{t}{\sqrt{(t+x)(t+y)(t+z)}}
|
| 938 |
+
\left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt.
|
| 939 |
+
|
| 940 |
+
**Examples**
|
| 941 |
+
|
| 942 |
+
Evaluation for real and complex arguments::
|
| 943 |
+
|
| 944 |
+
>>> from mpmath import *
|
| 945 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 946 |
+
>>> elliprg(0,1,1)*4; +pi
|
| 947 |
+
3.141592653589793238462643
|
| 948 |
+
3.141592653589793238462643
|
| 949 |
+
>>> elliprg(0,0.5,1)
|
| 950 |
+
0.6753219405238377512600874
|
| 951 |
+
>>> chop(elliprg(1+j, 1-j, 2))
|
| 952 |
+
1.172431327676416604532822
|
| 953 |
+
|
| 954 |
+
A double integral that can be evaluated in terms of `R_G`::
|
| 955 |
+
|
| 956 |
+
>>> x,y,z = 2,3,4
|
| 957 |
+
>>> def f(t,u):
|
| 958 |
+
... st = fp.sin(t); ct = fp.cos(t)
|
| 959 |
+
... su = fp.sin(u); cu = fp.cos(u)
|
| 960 |
+
... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st
|
| 961 |
+
...
|
| 962 |
+
>>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13)
|
| 963 |
+
1.725503028069
|
| 964 |
+
>>> nprint(elliprg(x,y,z), 13)
|
| 965 |
+
1.725503028069
|
| 966 |
+
|
| 967 |
+
"""
|
| 968 |
+
x = ctx.convert(x)
|
| 969 |
+
y = ctx.convert(y)
|
| 970 |
+
z = ctx.convert(z)
|
| 971 |
+
zeros = (not x) + (not y) + (not z)
|
| 972 |
+
if zeros == 3:
|
| 973 |
+
return (x+y+z)*0
|
| 974 |
+
if zeros == 2:
|
| 975 |
+
if x: return 0.5*ctx.sqrt(x)
|
| 976 |
+
if y: return 0.5*ctx.sqrt(y)
|
| 977 |
+
return 0.5*ctx.sqrt(z)
|
| 978 |
+
if zeros == 1:
|
| 979 |
+
if not z:
|
| 980 |
+
x, z = z, x
|
| 981 |
+
def terms():
|
| 982 |
+
T1 = 0.5*z*ctx.elliprf(x,y,z)
|
| 983 |
+
T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3
|
| 984 |
+
T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z)
|
| 985 |
+
return T1,T2,T3
|
| 986 |
+
return ctx.sum_accurately(terms)
|
| 987 |
+
|
| 988 |
+
|
| 989 |
+
@defun_wrapped
|
| 990 |
+
def ellipf(ctx, phi, m):
|
| 991 |
+
r"""
|
| 992 |
+
Evaluates the Legendre incomplete elliptic integral of the first kind
|
| 993 |
+
|
| 994 |
+
.. math ::
|
| 995 |
+
|
| 996 |
+
F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}}
|
| 997 |
+
|
| 998 |
+
or equivalently
|
| 999 |
+
|
| 1000 |
+
.. math ::
|
| 1001 |
+
|
| 1002 |
+
F(\phi,m) = \int_0^{\sin \phi}
|
| 1003 |
+
\frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}.
|
| 1004 |
+
|
| 1005 |
+
The function reduces to a complete elliptic integral of the first kind
|
| 1006 |
+
(see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is,
|
| 1007 |
+
|
| 1008 |
+
.. math ::
|
| 1009 |
+
|
| 1010 |
+
F\left(\frac{\pi}{2}, m\right) = K(m).
|
| 1011 |
+
|
| 1012 |
+
In the defining integral, it is assumed that the principal branch
|
| 1013 |
+
of the square root is taken and that the path of integration avoids
|
| 1014 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
|
| 1015 |
+
the function extends quasi-periodically as
|
| 1016 |
+
|
| 1017 |
+
.. math ::
|
| 1018 |
+
|
| 1019 |
+
F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}.
|
| 1020 |
+
|
| 1021 |
+
**Plots**
|
| 1022 |
+
|
| 1023 |
+
.. literalinclude :: /plots/ellipf.py
|
| 1024 |
+
.. image :: /plots/ellipf.png
|
| 1025 |
+
|
| 1026 |
+
**Examples**
|
| 1027 |
+
|
| 1028 |
+
Basic values and limits::
|
| 1029 |
+
|
| 1030 |
+
>>> from mpmath import *
|
| 1031 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1032 |
+
>>> ellipf(0,1)
|
| 1033 |
+
0.0
|
| 1034 |
+
>>> ellipf(0,0)
|
| 1035 |
+
0.0
|
| 1036 |
+
>>> ellipf(1,0); ellipf(2+3j,0)
|
| 1037 |
+
1.0
|
| 1038 |
+
(2.0 + 3.0j)
|
| 1039 |
+
>>> ellipf(1,1); log(sec(1)+tan(1))
|
| 1040 |
+
1.226191170883517070813061
|
| 1041 |
+
1.226191170883517070813061
|
| 1042 |
+
>>> ellipf(pi/2, -0.5); ellipk(-0.5)
|
| 1043 |
+
1.415737208425956198892166
|
| 1044 |
+
1.415737208425956198892166
|
| 1045 |
+
>>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1)
|
| 1046 |
+
+inf
|
| 1047 |
+
+inf
|
| 1048 |
+
>>> ellipf(1.5, 1)
|
| 1049 |
+
3.340677542798311003320813
|
| 1050 |
+
|
| 1051 |
+
Comparing with numerical integration::
|
| 1052 |
+
|
| 1053 |
+
>>> z,m = 0.5, 1.25
|
| 1054 |
+
>>> ellipf(z,m)
|
| 1055 |
+
0.5287219202206327872978255
|
| 1056 |
+
>>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z])
|
| 1057 |
+
0.5287219202206327872978255
|
| 1058 |
+
|
| 1059 |
+
The arguments may be complex numbers::
|
| 1060 |
+
|
| 1061 |
+
>>> ellipf(3j, 0.5)
|
| 1062 |
+
(0.0 + 1.713602407841590234804143j)
|
| 1063 |
+
>>> ellipf(3+4j, 5-6j)
|
| 1064 |
+
(1.269131241950351323305741 - 0.3561052815014558335412538j)
|
| 1065 |
+
>>> z,m = 2+3j, 1.25
|
| 1066 |
+
>>> k = 1011
|
| 1067 |
+
>>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m)
|
| 1068 |
+
(4086.184383622179764082821 - 3003.003538923749396546871j)
|
| 1069 |
+
(4086.184383622179764082821 - 3003.003538923749396546871j)
|
| 1070 |
+
|
| 1071 |
+
For `|\Re(z)| < \pi/2`, the function can be expressed as a
|
| 1072 |
+
hypergeometric series of two variables
|
| 1073 |
+
(see :func:`~mpmath.appellf1`)::
|
| 1074 |
+
|
| 1075 |
+
>>> z,m = 0.5, 0.25
|
| 1076 |
+
>>> ellipf(z,m)
|
| 1077 |
+
0.5050887275786480788831083
|
| 1078 |
+
>>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2)
|
| 1079 |
+
0.5050887275786480788831083
|
| 1080 |
+
|
| 1081 |
+
"""
|
| 1082 |
+
z = phi
|
| 1083 |
+
if not (ctx.isnormal(z) and ctx.isnormal(m)):
|
| 1084 |
+
if m == 0:
|
| 1085 |
+
return z + m
|
| 1086 |
+
if z == 0:
|
| 1087 |
+
return z * m
|
| 1088 |
+
if m == ctx.inf or m == ctx.ninf: return z/m
|
| 1089 |
+
raise ValueError
|
| 1090 |
+
x = z.real
|
| 1091 |
+
ctx.prec += max(0, ctx.mag(x))
|
| 1092 |
+
pi = +ctx.pi
|
| 1093 |
+
away = abs(x) > pi/2
|
| 1094 |
+
if m == 1:
|
| 1095 |
+
if away:
|
| 1096 |
+
return ctx.inf
|
| 1097 |
+
if away:
|
| 1098 |
+
d = ctx.nint(x/pi)
|
| 1099 |
+
z = z-pi*d
|
| 1100 |
+
P = 2*d*ctx.ellipk(m)
|
| 1101 |
+
else:
|
| 1102 |
+
P = 0
|
| 1103 |
+
c, s = ctx.cos_sin(z)
|
| 1104 |
+
return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P
|
| 1105 |
+
|
| 1106 |
+
@defun_wrapped
|
| 1107 |
+
def ellipe(ctx, *args):
|
| 1108 |
+
r"""
|
| 1109 |
+
Called with a single argument `m`, evaluates the Legendre complete
|
| 1110 |
+
elliptic integral of the second kind, `E(m)`, defined by
|
| 1111 |
+
|
| 1112 |
+
.. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\,
|
| 1113 |
+
\frac{\pi}{2}
|
| 1114 |
+
\,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right).
|
| 1115 |
+
|
| 1116 |
+
Called with two arguments `\phi, m`, evaluates the incomplete elliptic
|
| 1117 |
+
integral of the second kind
|
| 1118 |
+
|
| 1119 |
+
.. math ::
|
| 1120 |
+
|
| 1121 |
+
E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt =
|
| 1122 |
+
\int_0^{\sin z}
|
| 1123 |
+
\frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt.
|
| 1124 |
+
|
| 1125 |
+
The incomplete integral reduces to a complete integral when
|
| 1126 |
+
`\phi = \frac{\pi}{2}`; that is,
|
| 1127 |
+
|
| 1128 |
+
.. math ::
|
| 1129 |
+
|
| 1130 |
+
E\left(\frac{\pi}{2}, m\right) = E(m).
|
| 1131 |
+
|
| 1132 |
+
In the defining integral, it is assumed that the principal branch
|
| 1133 |
+
of the square root is taken and that the path of integration avoids
|
| 1134 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`,
|
| 1135 |
+
the function extends quasi-periodically as
|
| 1136 |
+
|
| 1137 |
+
.. math ::
|
| 1138 |
+
|
| 1139 |
+
E(\phi + n \pi, m) = 2 n E(m) + E(\phi,m), n \in \mathbb{Z}.
|
| 1140 |
+
|
| 1141 |
+
**Plots**
|
| 1142 |
+
|
| 1143 |
+
.. literalinclude :: /plots/ellipe.py
|
| 1144 |
+
.. image :: /plots/ellipe.png
|
| 1145 |
+
|
| 1146 |
+
**Examples for the complete integral**
|
| 1147 |
+
|
| 1148 |
+
Basic values and limits::
|
| 1149 |
+
|
| 1150 |
+
>>> from mpmath import *
|
| 1151 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1152 |
+
>>> ellipe(0)
|
| 1153 |
+
1.570796326794896619231322
|
| 1154 |
+
>>> ellipe(1)
|
| 1155 |
+
1.0
|
| 1156 |
+
>>> ellipe(-1)
|
| 1157 |
+
1.910098894513856008952381
|
| 1158 |
+
>>> ellipe(2)
|
| 1159 |
+
(0.5990701173677961037199612 + 0.5990701173677961037199612j)
|
| 1160 |
+
>>> ellipe(inf)
|
| 1161 |
+
(0.0 + +infj)
|
| 1162 |
+
>>> ellipe(-inf)
|
| 1163 |
+
+inf
|
| 1164 |
+
|
| 1165 |
+
Verifying the defining integral and hypergeometric
|
| 1166 |
+
representation::
|
| 1167 |
+
|
| 1168 |
+
>>> ellipe(0.5)
|
| 1169 |
+
1.350643881047675502520175
|
| 1170 |
+
>>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2])
|
| 1171 |
+
1.350643881047675502520175
|
| 1172 |
+
>>> pi/2*hyp2f1(0.5,-0.5,1,0.5)
|
| 1173 |
+
1.350643881047675502520175
|
| 1174 |
+
|
| 1175 |
+
Evaluation is supported for arbitrary complex `m`::
|
| 1176 |
+
|
| 1177 |
+
>>> ellipe(0.5+0.25j)
|
| 1178 |
+
(1.360868682163129682716687 - 0.1238733442561786843557315j)
|
| 1179 |
+
>>> ellipe(3+4j)
|
| 1180 |
+
(1.499553520933346954333612 - 1.577879007912758274533309j)
|
| 1181 |
+
|
| 1182 |
+
A definite integral::
|
| 1183 |
+
|
| 1184 |
+
>>> quad(ellipe, [0,1])
|
| 1185 |
+
1.333333333333333333333333
|
| 1186 |
+
|
| 1187 |
+
**Examples for the incomplete integral**
|
| 1188 |
+
|
| 1189 |
+
Basic values and limits::
|
| 1190 |
+
|
| 1191 |
+
>>> ellipe(0,1)
|
| 1192 |
+
0.0
|
| 1193 |
+
>>> ellipe(0,0)
|
| 1194 |
+
0.0
|
| 1195 |
+
>>> ellipe(1,0)
|
| 1196 |
+
1.0
|
| 1197 |
+
>>> ellipe(2+3j,0)
|
| 1198 |
+
(2.0 + 3.0j)
|
| 1199 |
+
>>> ellipe(1,1); sin(1)
|
| 1200 |
+
0.8414709848078965066525023
|
| 1201 |
+
0.8414709848078965066525023
|
| 1202 |
+
>>> ellipe(pi/2, -0.5); ellipe(-0.5)
|
| 1203 |
+
1.751771275694817862026502
|
| 1204 |
+
1.751771275694817862026502
|
| 1205 |
+
>>> ellipe(pi/2, 1); ellipe(-pi/2, 1)
|
| 1206 |
+
1.0
|
| 1207 |
+
-1.0
|
| 1208 |
+
>>> ellipe(1.5, 1)
|
| 1209 |
+
0.9974949866040544309417234
|
| 1210 |
+
|
| 1211 |
+
Comparing with numerical integration::
|
| 1212 |
+
|
| 1213 |
+
>>> z,m = 0.5, 1.25
|
| 1214 |
+
>>> ellipe(z,m)
|
| 1215 |
+
0.4740152182652628394264449
|
| 1216 |
+
>>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z])
|
| 1217 |
+
0.4740152182652628394264449
|
| 1218 |
+
|
| 1219 |
+
The arguments may be complex numbers::
|
| 1220 |
+
|
| 1221 |
+
>>> ellipe(3j, 0.5)
|
| 1222 |
+
(0.0 + 7.551991234890371873502105j)
|
| 1223 |
+
>>> ellipe(3+4j, 5-6j)
|
| 1224 |
+
(24.15299022574220502424466 + 75.2503670480325997418156j)
|
| 1225 |
+
>>> k = 35
|
| 1226 |
+
>>> z,m = 2+3j, 1.25
|
| 1227 |
+
>>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m)
|
| 1228 |
+
(48.30138799412005235090766 + 17.47255216721987688224357j)
|
| 1229 |
+
(48.30138799412005235090766 + 17.47255216721987688224357j)
|
| 1230 |
+
|
| 1231 |
+
For `|\Re(z)| < \pi/2`, the function can be expressed as a
|
| 1232 |
+
hypergeometric series of two variables
|
| 1233 |
+
(see :func:`~mpmath.appellf1`)::
|
| 1234 |
+
|
| 1235 |
+
>>> z,m = 0.5, 0.25
|
| 1236 |
+
>>> ellipe(z,m)
|
| 1237 |
+
0.4950017030164151928870375
|
| 1238 |
+
>>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2)
|
| 1239 |
+
0.4950017030164151928870376
|
| 1240 |
+
|
| 1241 |
+
"""
|
| 1242 |
+
if len(args) == 1:
|
| 1243 |
+
return ctx._ellipe(args[0])
|
| 1244 |
+
else:
|
| 1245 |
+
phi, m = args
|
| 1246 |
+
z = phi
|
| 1247 |
+
if not (ctx.isnormal(z) and ctx.isnormal(m)):
|
| 1248 |
+
if m == 0:
|
| 1249 |
+
return z + m
|
| 1250 |
+
if z == 0:
|
| 1251 |
+
return z * m
|
| 1252 |
+
if m == ctx.inf or m == ctx.ninf:
|
| 1253 |
+
return ctx.inf
|
| 1254 |
+
raise ValueError
|
| 1255 |
+
x = z.real
|
| 1256 |
+
ctx.prec += max(0, ctx.mag(x))
|
| 1257 |
+
pi = +ctx.pi
|
| 1258 |
+
away = abs(x) > pi/2
|
| 1259 |
+
if away:
|
| 1260 |
+
d = ctx.nint(x/pi)
|
| 1261 |
+
z = z-pi*d
|
| 1262 |
+
P = 2*d*ctx.ellipe(m)
|
| 1263 |
+
else:
|
| 1264 |
+
P = 0
|
| 1265 |
+
def terms():
|
| 1266 |
+
c, s = ctx.cos_sin(z)
|
| 1267 |
+
x = c**2
|
| 1268 |
+
y = 1-m*s**2
|
| 1269 |
+
RF = ctx.elliprf(x, y, 1)
|
| 1270 |
+
RD = ctx.elliprd(x, y, 1)
|
| 1271 |
+
return s*RF, -m*s**3*RD/3
|
| 1272 |
+
return ctx.sum_accurately(terms) + P
|
| 1273 |
+
|
| 1274 |
+
@defun_wrapped
|
| 1275 |
+
def ellippi(ctx, *args):
|
| 1276 |
+
r"""
|
| 1277 |
+
Called with three arguments `n, \phi, m`, evaluates the Legendre
|
| 1278 |
+
incomplete elliptic integral of the third kind
|
| 1279 |
+
|
| 1280 |
+
.. math ::
|
| 1281 |
+
|
| 1282 |
+
\Pi(n; \phi, m) = \int_0^{\phi}
|
| 1283 |
+
\frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} =
|
| 1284 |
+
\int_0^{\sin \phi}
|
| 1285 |
+
\frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}.
|
| 1286 |
+
|
| 1287 |
+
Called with two arguments `n, m`, evaluates the complete
|
| 1288 |
+
elliptic integral of the third kind
|
| 1289 |
+
`\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`.
|
| 1290 |
+
|
| 1291 |
+
In the defining integral, it is assumed that the principal branch
|
| 1292 |
+
of the square root is taken and that the path of integration avoids
|
| 1293 |
+
crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`,
|
| 1294 |
+
the function extends quasi-periodically as
|
| 1295 |
+
|
| 1296 |
+
.. math ::
|
| 1297 |
+
|
| 1298 |
+
\Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}.
|
| 1299 |
+
|
| 1300 |
+
**Plots**
|
| 1301 |
+
|
| 1302 |
+
.. literalinclude :: /plots/ellippi.py
|
| 1303 |
+
.. image :: /plots/ellippi.png
|
| 1304 |
+
|
| 1305 |
+
**Examples for the complete integral**
|
| 1306 |
+
|
| 1307 |
+
Some basic values and limits::
|
| 1308 |
+
|
| 1309 |
+
>>> from mpmath import *
|
| 1310 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1311 |
+
>>> ellippi(0,-5); ellipk(-5)
|
| 1312 |
+
0.9555039270640439337379334
|
| 1313 |
+
0.9555039270640439337379334
|
| 1314 |
+
>>> ellippi(inf,2)
|
| 1315 |
+
0.0
|
| 1316 |
+
>>> ellippi(2,inf)
|
| 1317 |
+
0.0
|
| 1318 |
+
>>> abs(ellippi(1,5))
|
| 1319 |
+
+inf
|
| 1320 |
+
>>> abs(ellippi(0.25,1))
|
| 1321 |
+
+inf
|
| 1322 |
+
|
| 1323 |
+
Evaluation in terms of simpler functions::
|
| 1324 |
+
|
| 1325 |
+
>>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25)
|
| 1326 |
+
1.956616279119236207279727
|
| 1327 |
+
1.956616279119236207279727
|
| 1328 |
+
>>> ellippi(3,0); pi/(2*sqrt(-2))
|
| 1329 |
+
(0.0 - 1.11072073453959156175397j)
|
| 1330 |
+
(0.0 - 1.11072073453959156175397j)
|
| 1331 |
+
>>> ellippi(-3,0); pi/(2*sqrt(4))
|
| 1332 |
+
0.7853981633974483096156609
|
| 1333 |
+
0.7853981633974483096156609
|
| 1334 |
+
|
| 1335 |
+
**Examples for the incomplete integral**
|
| 1336 |
+
|
| 1337 |
+
Basic values and limits::
|
| 1338 |
+
|
| 1339 |
+
>>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5)
|
| 1340 |
+
1.622944760954741603710555
|
| 1341 |
+
1.622944760954741603710555
|
| 1342 |
+
>>> ellippi(1,0,1)
|
| 1343 |
+
0.0
|
| 1344 |
+
>>> ellippi(inf,0,1)
|
| 1345 |
+
0.0
|
| 1346 |
+
>>> ellippi(0,0.25,0.5); ellipf(0.25,0.5)
|
| 1347 |
+
0.2513040086544925794134591
|
| 1348 |
+
0.2513040086544925794134591
|
| 1349 |
+
>>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2
|
| 1350 |
+
2.054332933256248668692452
|
| 1351 |
+
2.054332933256248668692452
|
| 1352 |
+
>>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75)
|
| 1353 |
+
135.240868757890840755058
|
| 1354 |
+
135.240868757890840755058
|
| 1355 |
+
>>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3)
|
| 1356 |
+
0.9190227391656969903987269
|
| 1357 |
+
0.9190227391656969903987269
|
| 1358 |
+
|
| 1359 |
+
Complex arguments are supported::
|
| 1360 |
+
|
| 1361 |
+
>>> ellippi(0.5, 5+6j-2*pi, -7-8j)
|
| 1362 |
+
(-0.3612856620076747660410167 + 0.5217735339984807829755815j)
|
| 1363 |
+
|
| 1364 |
+
Some degenerate cases::
|
| 1365 |
+
|
| 1366 |
+
>>> ellippi(1,1)
|
| 1367 |
+
+inf
|
| 1368 |
+
>>> ellippi(1,0)
|
| 1369 |
+
+inf
|
| 1370 |
+
>>> ellippi(1,2,0)
|
| 1371 |
+
+inf
|
| 1372 |
+
>>> ellippi(1,2,1)
|
| 1373 |
+
+inf
|
| 1374 |
+
>>> ellippi(1,0,1)
|
| 1375 |
+
0.0
|
| 1376 |
+
|
| 1377 |
+
"""
|
| 1378 |
+
if len(args) == 2:
|
| 1379 |
+
n, m = args
|
| 1380 |
+
complete = True
|
| 1381 |
+
z = phi = ctx.pi/2
|
| 1382 |
+
else:
|
| 1383 |
+
n, phi, m = args
|
| 1384 |
+
complete = False
|
| 1385 |
+
z = phi
|
| 1386 |
+
if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)):
|
| 1387 |
+
if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m):
|
| 1388 |
+
raise ValueError
|
| 1389 |
+
if complete:
|
| 1390 |
+
if m == 0:
|
| 1391 |
+
if n == 1:
|
| 1392 |
+
return ctx.inf
|
| 1393 |
+
return ctx.pi/(2*ctx.sqrt(1-n))
|
| 1394 |
+
if n == 0: return ctx.ellipk(m)
|
| 1395 |
+
if ctx.isinf(n) or ctx.isinf(m): return ctx.zero
|
| 1396 |
+
else:
|
| 1397 |
+
if z == 0: return z
|
| 1398 |
+
if ctx.isinf(n): return ctx.zero
|
| 1399 |
+
if ctx.isinf(m): return ctx.zero
|
| 1400 |
+
if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m):
|
| 1401 |
+
raise ValueError
|
| 1402 |
+
if complete:
|
| 1403 |
+
if m == 1:
|
| 1404 |
+
if n == 1:
|
| 1405 |
+
return ctx.inf
|
| 1406 |
+
return -ctx.inf/ctx.sign(n-1)
|
| 1407 |
+
away = False
|
| 1408 |
+
else:
|
| 1409 |
+
x = z.real
|
| 1410 |
+
ctx.prec += max(0, ctx.mag(x))
|
| 1411 |
+
pi = +ctx.pi
|
| 1412 |
+
away = abs(x) > pi/2
|
| 1413 |
+
if away:
|
| 1414 |
+
d = ctx.nint(x/pi)
|
| 1415 |
+
z = z-pi*d
|
| 1416 |
+
P = 2*d*ctx.ellippi(n,m)
|
| 1417 |
+
if ctx.isinf(P):
|
| 1418 |
+
return ctx.inf
|
| 1419 |
+
else:
|
| 1420 |
+
P = 0
|
| 1421 |
+
def terms():
|
| 1422 |
+
if complete:
|
| 1423 |
+
c, s = ctx.zero, ctx.one
|
| 1424 |
+
else:
|
| 1425 |
+
c, s = ctx.cos_sin(z)
|
| 1426 |
+
x = c**2
|
| 1427 |
+
y = 1-m*s**2
|
| 1428 |
+
RF = ctx.elliprf(x, y, 1)
|
| 1429 |
+
RJ = ctx.elliprj(x, y, 1, 1-n*s**2)
|
| 1430 |
+
return s*RF, n*s**3*RJ/3
|
| 1431 |
+
return ctx.sum_accurately(terms) + P
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/expintegrals.py
ADDED
|
@@ -0,0 +1,425 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .functions import defun, defun_wrapped
|
| 2 |
+
|
| 3 |
+
@defun_wrapped
|
| 4 |
+
def _erf_complex(ctx, z):
|
| 5 |
+
z2 = ctx.square_exp_arg(z, -1)
|
| 6 |
+
#z2 = -z**2
|
| 7 |
+
v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2)
|
| 8 |
+
if not ctx._re(z):
|
| 9 |
+
v = ctx._im(v)*ctx.j
|
| 10 |
+
return v
|
| 11 |
+
|
| 12 |
+
@defun_wrapped
|
| 13 |
+
def _erfc_complex(ctx, z):
|
| 14 |
+
if ctx.re(z) > 2:
|
| 15 |
+
z2 = ctx.square_exp_arg(z)
|
| 16 |
+
nz2 = ctx.fneg(z2, exact=True)
|
| 17 |
+
v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2)
|
| 18 |
+
else:
|
| 19 |
+
v = 1 - ctx._erf_complex(z)
|
| 20 |
+
if not ctx._re(z):
|
| 21 |
+
v = 1+ctx._im(v)*ctx.j
|
| 22 |
+
return v
|
| 23 |
+
|
| 24 |
+
@defun
|
| 25 |
+
def erf(ctx, z):
|
| 26 |
+
z = ctx.convert(z)
|
| 27 |
+
if ctx._is_real_type(z):
|
| 28 |
+
try:
|
| 29 |
+
return ctx._erf(z)
|
| 30 |
+
except NotImplementedError:
|
| 31 |
+
pass
|
| 32 |
+
if ctx._is_complex_type(z) and not z.imag:
|
| 33 |
+
try:
|
| 34 |
+
return type(z)(ctx._erf(z.real))
|
| 35 |
+
except NotImplementedError:
|
| 36 |
+
pass
|
| 37 |
+
return ctx._erf_complex(z)
|
| 38 |
+
|
| 39 |
+
@defun
|
| 40 |
+
def erfc(ctx, z):
|
| 41 |
+
z = ctx.convert(z)
|
| 42 |
+
if ctx._is_real_type(z):
|
| 43 |
+
try:
|
| 44 |
+
return ctx._erfc(z)
|
| 45 |
+
except NotImplementedError:
|
| 46 |
+
pass
|
| 47 |
+
if ctx._is_complex_type(z) and not z.imag:
|
| 48 |
+
try:
|
| 49 |
+
return type(z)(ctx._erfc(z.real))
|
| 50 |
+
except NotImplementedError:
|
| 51 |
+
pass
|
| 52 |
+
return ctx._erfc_complex(z)
|
| 53 |
+
|
| 54 |
+
@defun
|
| 55 |
+
def square_exp_arg(ctx, z, mult=1, reciprocal=False):
|
| 56 |
+
prec = ctx.prec*4+20
|
| 57 |
+
if reciprocal:
|
| 58 |
+
z2 = ctx.fmul(z, z, prec=prec)
|
| 59 |
+
z2 = ctx.fdiv(ctx.one, z2, prec=prec)
|
| 60 |
+
else:
|
| 61 |
+
z2 = ctx.fmul(z, z, prec=prec)
|
| 62 |
+
if mult != 1:
|
| 63 |
+
z2 = ctx.fmul(z2, mult, exact=True)
|
| 64 |
+
return z2
|
| 65 |
+
|
| 66 |
+
@defun_wrapped
|
| 67 |
+
def erfi(ctx, z):
|
| 68 |
+
if not z:
|
| 69 |
+
return z
|
| 70 |
+
z2 = ctx.square_exp_arg(z)
|
| 71 |
+
v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2)
|
| 72 |
+
if not ctx._re(z):
|
| 73 |
+
v = ctx._im(v)*ctx.j
|
| 74 |
+
return v
|
| 75 |
+
|
| 76 |
+
@defun_wrapped
|
| 77 |
+
def erfinv(ctx, x):
|
| 78 |
+
xre = ctx._re(x)
|
| 79 |
+
if (xre != x) or (xre < -1) or (xre > 1):
|
| 80 |
+
return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1")
|
| 81 |
+
x = xre
|
| 82 |
+
#if ctx.isnan(x): return x
|
| 83 |
+
if not x: return x
|
| 84 |
+
if x == 1: return ctx.inf
|
| 85 |
+
if x == -1: return ctx.ninf
|
| 86 |
+
if abs(x) < 0.9:
|
| 87 |
+
a = 0.53728*x**3 + 0.813198*x
|
| 88 |
+
else:
|
| 89 |
+
# An asymptotic formula
|
| 90 |
+
u = ctx.ln(2/ctx.pi/(abs(x)-1)**2)
|
| 91 |
+
a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2)
|
| 92 |
+
ctx.prec += 10
|
| 93 |
+
return ctx.findroot(lambda t: ctx.erf(t)-x, a)
|
| 94 |
+
|
| 95 |
+
@defun_wrapped
|
| 96 |
+
def npdf(ctx, x, mu=0, sigma=1):
|
| 97 |
+
sigma = ctx.convert(sigma)
|
| 98 |
+
return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi))
|
| 99 |
+
|
| 100 |
+
@defun_wrapped
|
| 101 |
+
def ncdf(ctx, x, mu=0, sigma=1):
|
| 102 |
+
a = (x-mu)/(sigma*ctx.sqrt(2))
|
| 103 |
+
if a < 0:
|
| 104 |
+
return ctx.erfc(-a)/2
|
| 105 |
+
else:
|
| 106 |
+
return (1+ctx.erf(a))/2
|
| 107 |
+
|
| 108 |
+
@defun_wrapped
|
| 109 |
+
def betainc(ctx, a, b, x1=0, x2=1, regularized=False):
|
| 110 |
+
if x1 == x2:
|
| 111 |
+
v = 0
|
| 112 |
+
elif not x1:
|
| 113 |
+
if x1 == 0 and x2 == 1:
|
| 114 |
+
v = ctx.beta(a, b)
|
| 115 |
+
else:
|
| 116 |
+
v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a
|
| 117 |
+
else:
|
| 118 |
+
m, d = ctx.nint_distance(a)
|
| 119 |
+
if m <= 0:
|
| 120 |
+
if d < -ctx.prec:
|
| 121 |
+
h = +ctx.eps
|
| 122 |
+
ctx.prec *= 2
|
| 123 |
+
a += h
|
| 124 |
+
elif d < -4:
|
| 125 |
+
ctx.prec -= d
|
| 126 |
+
s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2)
|
| 127 |
+
s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1)
|
| 128 |
+
v = (s1 - s2) / a
|
| 129 |
+
if regularized:
|
| 130 |
+
v /= ctx.beta(a,b)
|
| 131 |
+
return v
|
| 132 |
+
|
| 133 |
+
@defun
|
| 134 |
+
def gammainc(ctx, z, a=0, b=None, regularized=False):
|
| 135 |
+
regularized = bool(regularized)
|
| 136 |
+
z = ctx.convert(z)
|
| 137 |
+
if a is None:
|
| 138 |
+
a = ctx.zero
|
| 139 |
+
lower_modified = False
|
| 140 |
+
else:
|
| 141 |
+
a = ctx.convert(a)
|
| 142 |
+
lower_modified = a != ctx.zero
|
| 143 |
+
if b is None:
|
| 144 |
+
b = ctx.inf
|
| 145 |
+
upper_modified = False
|
| 146 |
+
else:
|
| 147 |
+
b = ctx.convert(b)
|
| 148 |
+
upper_modified = b != ctx.inf
|
| 149 |
+
# Complete gamma function
|
| 150 |
+
if not (upper_modified or lower_modified):
|
| 151 |
+
if regularized:
|
| 152 |
+
if ctx.re(z) < 0:
|
| 153 |
+
return ctx.inf
|
| 154 |
+
elif ctx.re(z) > 0:
|
| 155 |
+
return ctx.one
|
| 156 |
+
else:
|
| 157 |
+
return ctx.nan
|
| 158 |
+
return ctx.gamma(z)
|
| 159 |
+
if a == b:
|
| 160 |
+
return ctx.zero
|
| 161 |
+
# Standardize
|
| 162 |
+
if ctx.re(a) > ctx.re(b):
|
| 163 |
+
return -ctx.gammainc(z, b, a, regularized)
|
| 164 |
+
# Generalized gamma
|
| 165 |
+
if upper_modified and lower_modified:
|
| 166 |
+
return +ctx._gamma3(z, a, b, regularized)
|
| 167 |
+
# Upper gamma
|
| 168 |
+
elif lower_modified:
|
| 169 |
+
return ctx._upper_gamma(z, a, regularized)
|
| 170 |
+
# Lower gamma
|
| 171 |
+
elif upper_modified:
|
| 172 |
+
return ctx._lower_gamma(z, b, regularized)
|
| 173 |
+
|
| 174 |
+
@defun
|
| 175 |
+
def _lower_gamma(ctx, z, b, regularized=False):
|
| 176 |
+
# Pole
|
| 177 |
+
if ctx.isnpint(z):
|
| 178 |
+
return type(z)(ctx.inf)
|
| 179 |
+
G = [z] * regularized
|
| 180 |
+
negb = ctx.fneg(b, exact=True)
|
| 181 |
+
def h(z):
|
| 182 |
+
T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b
|
| 183 |
+
return (T1,)
|
| 184 |
+
return ctx.hypercomb(h, [z])
|
| 185 |
+
|
| 186 |
+
@defun
|
| 187 |
+
def _upper_gamma(ctx, z, a, regularized=False):
|
| 188 |
+
# Fast integer case, when available
|
| 189 |
+
if ctx.isint(z):
|
| 190 |
+
try:
|
| 191 |
+
if regularized:
|
| 192 |
+
# Gamma pole
|
| 193 |
+
if ctx.isnpint(z):
|
| 194 |
+
return type(z)(ctx.zero)
|
| 195 |
+
orig = ctx.prec
|
| 196 |
+
try:
|
| 197 |
+
ctx.prec += 10
|
| 198 |
+
return ctx._gamma_upper_int(z, a) / ctx.gamma(z)
|
| 199 |
+
finally:
|
| 200 |
+
ctx.prec = orig
|
| 201 |
+
else:
|
| 202 |
+
return ctx._gamma_upper_int(z, a)
|
| 203 |
+
except NotImplementedError:
|
| 204 |
+
pass
|
| 205 |
+
# hypercomb is unable to detect the exact zeros, so handle them here
|
| 206 |
+
if z == 2 and a == -1:
|
| 207 |
+
return (z+a)*0
|
| 208 |
+
if z == 3 and (a == -1-1j or a == -1+1j):
|
| 209 |
+
return (z+a)*0
|
| 210 |
+
nega = ctx.fneg(a, exact=True)
|
| 211 |
+
G = [z] * regularized
|
| 212 |
+
# Use 2F0 series when possible; fall back to lower gamma representation
|
| 213 |
+
try:
|
| 214 |
+
def h(z):
|
| 215 |
+
r = z-1
|
| 216 |
+
return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)]
|
| 217 |
+
return ctx.hypercomb(h, [z], force_series=True)
|
| 218 |
+
except ctx.NoConvergence:
|
| 219 |
+
def h(z):
|
| 220 |
+
T1 = [], [1, z-1], [z], G, [], [], 0
|
| 221 |
+
T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a
|
| 222 |
+
return T1, T2
|
| 223 |
+
return ctx.hypercomb(h, [z])
|
| 224 |
+
|
| 225 |
+
@defun
|
| 226 |
+
def _gamma3(ctx, z, a, b, regularized=False):
|
| 227 |
+
pole = ctx.isnpint(z)
|
| 228 |
+
if regularized and pole:
|
| 229 |
+
return ctx.zero
|
| 230 |
+
try:
|
| 231 |
+
ctx.prec += 15
|
| 232 |
+
# We don't know in advance whether it's better to write as a difference
|
| 233 |
+
# of lower or upper gamma functions, so try both
|
| 234 |
+
T1 = ctx.gammainc(z, a, regularized=regularized)
|
| 235 |
+
T2 = ctx.gammainc(z, b, regularized=regularized)
|
| 236 |
+
R = T1 - T2
|
| 237 |
+
if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
|
| 238 |
+
return R
|
| 239 |
+
if not pole:
|
| 240 |
+
T1 = ctx.gammainc(z, 0, b, regularized=regularized)
|
| 241 |
+
T2 = ctx.gammainc(z, 0, a, regularized=regularized)
|
| 242 |
+
R = T1 - T2
|
| 243 |
+
# May be ok, but should probably at least print a warning
|
| 244 |
+
# about possible cancellation
|
| 245 |
+
if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10:
|
| 246 |
+
return R
|
| 247 |
+
finally:
|
| 248 |
+
ctx.prec -= 15
|
| 249 |
+
raise NotImplementedError
|
| 250 |
+
|
| 251 |
+
@defun_wrapped
|
| 252 |
+
def expint(ctx, n, z):
|
| 253 |
+
if ctx.isint(n) and ctx._is_real_type(z):
|
| 254 |
+
try:
|
| 255 |
+
return ctx._expint_int(n, z)
|
| 256 |
+
except NotImplementedError:
|
| 257 |
+
pass
|
| 258 |
+
if ctx.isnan(n) or ctx.isnan(z):
|
| 259 |
+
return z*n
|
| 260 |
+
if z == ctx.inf:
|
| 261 |
+
return 1/z
|
| 262 |
+
if z == 0:
|
| 263 |
+
# integral from 1 to infinity of t^n
|
| 264 |
+
if ctx.re(n) <= 1:
|
| 265 |
+
# TODO: reasonable sign of infinity
|
| 266 |
+
return type(z)(ctx.inf)
|
| 267 |
+
else:
|
| 268 |
+
return ctx.one/(n-1)
|
| 269 |
+
if n == 0:
|
| 270 |
+
return ctx.exp(-z)/z
|
| 271 |
+
if n == -1:
|
| 272 |
+
return ctx.exp(-z)*(z+1)/z**2
|
| 273 |
+
return z**(n-1) * ctx.gammainc(1-n, z)
|
| 274 |
+
|
| 275 |
+
@defun_wrapped
|
| 276 |
+
def li(ctx, z, offset=False):
|
| 277 |
+
if offset:
|
| 278 |
+
if z == 2:
|
| 279 |
+
return ctx.zero
|
| 280 |
+
return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2)
|
| 281 |
+
if not z:
|
| 282 |
+
return z
|
| 283 |
+
if z == 1:
|
| 284 |
+
return ctx.ninf
|
| 285 |
+
return ctx.ei(ctx.ln(z))
|
| 286 |
+
|
| 287 |
+
@defun
|
| 288 |
+
def ei(ctx, z):
|
| 289 |
+
try:
|
| 290 |
+
return ctx._ei(z)
|
| 291 |
+
except NotImplementedError:
|
| 292 |
+
return ctx._ei_generic(z)
|
| 293 |
+
|
| 294 |
+
@defun_wrapped
|
| 295 |
+
def _ei_generic(ctx, z):
|
| 296 |
+
# Note: the following is currently untested because mp and fp
|
| 297 |
+
# both use special-case ei code
|
| 298 |
+
if z == ctx.inf:
|
| 299 |
+
return z
|
| 300 |
+
if z == ctx.ninf:
|
| 301 |
+
return ctx.zero
|
| 302 |
+
if ctx.mag(z) > 1:
|
| 303 |
+
try:
|
| 304 |
+
r = ctx.one/z
|
| 305 |
+
v = ctx.exp(z)*ctx.hyper([1,1],[],r,
|
| 306 |
+
maxterms=ctx.prec, force_series=True)/z
|
| 307 |
+
im = ctx._im(z)
|
| 308 |
+
if im > 0:
|
| 309 |
+
v += ctx.pi*ctx.j
|
| 310 |
+
if im < 0:
|
| 311 |
+
v -= ctx.pi*ctx.j
|
| 312 |
+
return v
|
| 313 |
+
except ctx.NoConvergence:
|
| 314 |
+
pass
|
| 315 |
+
v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler
|
| 316 |
+
if ctx._im(z):
|
| 317 |
+
v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z))
|
| 318 |
+
else:
|
| 319 |
+
v += ctx.log(abs(z))
|
| 320 |
+
return v
|
| 321 |
+
|
| 322 |
+
@defun
|
| 323 |
+
def e1(ctx, z):
|
| 324 |
+
try:
|
| 325 |
+
return ctx._e1(z)
|
| 326 |
+
except NotImplementedError:
|
| 327 |
+
return ctx.expint(1, z)
|
| 328 |
+
|
| 329 |
+
@defun
|
| 330 |
+
def ci(ctx, z):
|
| 331 |
+
try:
|
| 332 |
+
return ctx._ci(z)
|
| 333 |
+
except NotImplementedError:
|
| 334 |
+
return ctx._ci_generic(z)
|
| 335 |
+
|
| 336 |
+
@defun_wrapped
|
| 337 |
+
def _ci_generic(ctx, z):
|
| 338 |
+
if ctx.isinf(z):
|
| 339 |
+
if z == ctx.inf: return ctx.zero
|
| 340 |
+
if z == ctx.ninf: return ctx.pi*1j
|
| 341 |
+
jz = ctx.fmul(ctx.j,z,exact=True)
|
| 342 |
+
njz = ctx.fneg(jz,exact=True)
|
| 343 |
+
v = 0.5*(ctx.ei(jz) + ctx.ei(njz))
|
| 344 |
+
zreal = ctx._re(z)
|
| 345 |
+
zimag = ctx._im(z)
|
| 346 |
+
if zreal == 0:
|
| 347 |
+
if zimag > 0: v += ctx.pi*0.5j
|
| 348 |
+
if zimag < 0: v -= ctx.pi*0.5j
|
| 349 |
+
if zreal < 0:
|
| 350 |
+
if zimag >= 0: v += ctx.pi*1j
|
| 351 |
+
if zimag < 0: v -= ctx.pi*1j
|
| 352 |
+
if ctx._is_real_type(z) and zreal > 0:
|
| 353 |
+
v = ctx._re(v)
|
| 354 |
+
return v
|
| 355 |
+
|
| 356 |
+
@defun
|
| 357 |
+
def si(ctx, z):
|
| 358 |
+
try:
|
| 359 |
+
return ctx._si(z)
|
| 360 |
+
except NotImplementedError:
|
| 361 |
+
return ctx._si_generic(z)
|
| 362 |
+
|
| 363 |
+
@defun_wrapped
|
| 364 |
+
def _si_generic(ctx, z):
|
| 365 |
+
if ctx.isinf(z):
|
| 366 |
+
if z == ctx.inf: return 0.5*ctx.pi
|
| 367 |
+
if z == ctx.ninf: return -0.5*ctx.pi
|
| 368 |
+
# Suffers from cancellation near 0
|
| 369 |
+
if ctx.mag(z) >= -1:
|
| 370 |
+
jz = ctx.fmul(ctx.j,z,exact=True)
|
| 371 |
+
njz = ctx.fneg(jz,exact=True)
|
| 372 |
+
v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz))
|
| 373 |
+
zreal = ctx._re(z)
|
| 374 |
+
if zreal > 0:
|
| 375 |
+
v -= 0.5*ctx.pi
|
| 376 |
+
if zreal < 0:
|
| 377 |
+
v += 0.5*ctx.pi
|
| 378 |
+
if ctx._is_real_type(z):
|
| 379 |
+
v = ctx._re(v)
|
| 380 |
+
return v
|
| 381 |
+
else:
|
| 382 |
+
return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z)
|
| 383 |
+
|
| 384 |
+
@defun_wrapped
|
| 385 |
+
def chi(ctx, z):
|
| 386 |
+
nz = ctx.fneg(z, exact=True)
|
| 387 |
+
v = 0.5*(ctx.ei(z) + ctx.ei(nz))
|
| 388 |
+
zreal = ctx._re(z)
|
| 389 |
+
zimag = ctx._im(z)
|
| 390 |
+
if zimag > 0:
|
| 391 |
+
v += ctx.pi*0.5j
|
| 392 |
+
elif zimag < 0:
|
| 393 |
+
v -= ctx.pi*0.5j
|
| 394 |
+
elif zreal < 0:
|
| 395 |
+
v += ctx.pi*1j
|
| 396 |
+
return v
|
| 397 |
+
|
| 398 |
+
@defun_wrapped
|
| 399 |
+
def shi(ctx, z):
|
| 400 |
+
# Suffers from cancellation near 0
|
| 401 |
+
if ctx.mag(z) >= -1:
|
| 402 |
+
nz = ctx.fneg(z, exact=True)
|
| 403 |
+
v = 0.5*(ctx.ei(z) - ctx.ei(nz))
|
| 404 |
+
zimag = ctx._im(z)
|
| 405 |
+
if zimag > 0: v -= 0.5j*ctx.pi
|
| 406 |
+
if zimag < 0: v += 0.5j*ctx.pi
|
| 407 |
+
return v
|
| 408 |
+
else:
|
| 409 |
+
return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z)
|
| 410 |
+
|
| 411 |
+
@defun_wrapped
|
| 412 |
+
def fresnels(ctx, z):
|
| 413 |
+
if z == ctx.inf:
|
| 414 |
+
return ctx.mpf(0.5)
|
| 415 |
+
if z == ctx.ninf:
|
| 416 |
+
return ctx.mpf(-0.5)
|
| 417 |
+
return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16)
|
| 418 |
+
|
| 419 |
+
@defun_wrapped
|
| 420 |
+
def fresnelc(ctx, z):
|
| 421 |
+
if z == ctx.inf:
|
| 422 |
+
return ctx.mpf(0.5)
|
| 423 |
+
if z == ctx.ninf:
|
| 424 |
+
return ctx.mpf(-0.5)
|
| 425 |
+
return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/rszeta.py
ADDED
|
@@ -0,0 +1,1403 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
---------------------------------------------------------------------
|
| 3 |
+
.. sectionauthor:: Juan Arias de Reyna <arias@us.es>
|
| 4 |
+
|
| 5 |
+
This module implements zeta-related functions using the Riemann-Siegel
|
| 6 |
+
expansion: zeta_offline(s,k=0)
|
| 7 |
+
|
| 8 |
+
* coef(J, eps): Need in the computation of Rzeta(s,k)
|
| 9 |
+
|
| 10 |
+
* Rzeta_simul(s, der=0) computes Rzeta^(k)(s) and Rzeta^(k)(1-s) simultaneously
|
| 11 |
+
for 0 <= k <= der. Used by zeta_offline and z_offline
|
| 12 |
+
|
| 13 |
+
* Rzeta_set(s, derivatives) computes Rzeta^(k)(s) for given derivatives, used by
|
| 14 |
+
z_half(t,k) and zeta_half
|
| 15 |
+
|
| 16 |
+
* z_offline(w,k): Z(w) and its derivatives of order k <= 4
|
| 17 |
+
* z_half(t,k): Z(t) (Riemann Siegel function) and its derivatives of order k <= 4
|
| 18 |
+
* zeta_offline(s): zeta(s) and its derivatives of order k<= 4
|
| 19 |
+
* zeta_half(1/2+it,k): zeta(s) and its derivatives of order k<= 4
|
| 20 |
+
|
| 21 |
+
* rs_zeta(s,k=0) Computes zeta^(k)(s) Unifies zeta_half and zeta_offline
|
| 22 |
+
* rs_z(w,k=0) Computes Z^(k)(w) Unifies z_offline and z_half
|
| 23 |
+
----------------------------------------------------------------------
|
| 24 |
+
|
| 25 |
+
This program uses Riemann-Siegel expansion even to compute
|
| 26 |
+
zeta(s) on points s = sigma + i t with sigma arbitrary not
|
| 27 |
+
necessarily equal to 1/2.
|
| 28 |
+
|
| 29 |
+
It is founded on a new deduction of the formula, with rigorous
|
| 30 |
+
and sharp bounds for the terms and rest of this expansion.
|
| 31 |
+
|
| 32 |
+
More information on the papers:
|
| 33 |
+
|
| 34 |
+
J. Arias de Reyna, High Precision Computation of Riemann's
|
| 35 |
+
Zeta Function by the Riemann-Siegel Formula I, II
|
| 36 |
+
|
| 37 |
+
We refer to them as I, II.
|
| 38 |
+
|
| 39 |
+
In them we shall find detailed explanation of all the
|
| 40 |
+
procedure.
|
| 41 |
+
|
| 42 |
+
The program uses Riemann-Siegel expansion.
|
| 43 |
+
This is useful when t is big, ( say t > 10000 ).
|
| 44 |
+
The precision is limited, roughly it can compute zeta(sigma+it)
|
| 45 |
+
with an error less than exp(-c t) for some constant c depending
|
| 46 |
+
on sigma. The program gives an error when the Riemann-Siegel
|
| 47 |
+
formula can not compute to the wanted precision.
|
| 48 |
+
|
| 49 |
+
"""
|
| 50 |
+
|
| 51 |
+
import math
|
| 52 |
+
|
| 53 |
+
class RSCache(object):
|
| 54 |
+
def __init__(ctx):
|
| 55 |
+
ctx._rs_cache = [0, 10, {}, {}]
|
| 56 |
+
|
| 57 |
+
from .functions import defun
|
| 58 |
+
|
| 59 |
+
#-------------------------------------------------------------------------------#
|
| 60 |
+
# #
|
| 61 |
+
# coef(ctx, J, eps, _cache=[0, 10, {} ] ) #
|
| 62 |
+
# #
|
| 63 |
+
#-------------------------------------------------------------------------------#
|
| 64 |
+
|
| 65 |
+
# This function computes the coefficients c[n] defined on (I, equation (47))
|
| 66 |
+
# but see also (II, section 3.14).
|
| 67 |
+
#
|
| 68 |
+
# Since these coefficients are very difficult to compute we save the values
|
| 69 |
+
# in a cache. So if we compute several values of the functions Rzeta(s) for
|
| 70 |
+
# near values of s, we do not recompute these coefficients.
|
| 71 |
+
#
|
| 72 |
+
# c[n] are the Taylor coefficients of the function:
|
| 73 |
+
#
|
| 74 |
+
# F(z):= (exp(pi*j*(z*z/2+3/8))-j* sqrt(2) cos(pi*z/2))/(2*cos(pi *z))
|
| 75 |
+
#
|
| 76 |
+
#
|
| 77 |
+
|
| 78 |
+
def _coef(ctx, J, eps):
|
| 79 |
+
r"""
|
| 80 |
+
Computes the coefficients `c_n` for `0\le n\le 2J` with error less than eps
|
| 81 |
+
|
| 82 |
+
**Definition**
|
| 83 |
+
|
| 84 |
+
The coefficients c_n are defined by
|
| 85 |
+
|
| 86 |
+
.. math ::
|
| 87 |
+
|
| 88 |
+
\begin{equation}
|
| 89 |
+
F(z)=\frac{e^{\pi i
|
| 90 |
+
\bigl(\frac{z^2}{2}+\frac38\bigr)}-i\sqrt{2}\cos\frac{\pi}{2}z}{2\cos\pi
|
| 91 |
+
z}=\sum_{n=0}^\infty c_{2n} z^{2n}
|
| 92 |
+
\end{equation}
|
| 93 |
+
|
| 94 |
+
they are computed applying the relation
|
| 95 |
+
|
| 96 |
+
.. math ::
|
| 97 |
+
|
| 98 |
+
\begin{multline}
|
| 99 |
+
c_{2n}=-\frac{i}{\sqrt{2}}\Bigl(\frac{\pi}{2}\Bigr)^{2n}
|
| 100 |
+
\sum_{k=0}^n\frac{(-1)^k}{(2k)!}
|
| 101 |
+
2^{2n-2k}\frac{(-1)^{n-k}E_{2n-2k}}{(2n-2k)!}+\\
|
| 102 |
+
+e^{3\pi i/8}\sum_{j=0}^n(-1)^j\frac{
|
| 103 |
+
E_{2j}}{(2j)!}\frac{i^{n-j}\pi^{n+j}}{(n-j)!2^{n-j+1}}.
|
| 104 |
+
\end{multline}
|
| 105 |
+
"""
|
| 106 |
+
|
| 107 |
+
newJ = J+2 # compute more coefficients that are needed
|
| 108 |
+
neweps6 = eps/2. # compute with a slight more precision that are needed
|
| 109 |
+
|
| 110 |
+
# PREPARATION FOR THE COMPUTATION OF V(N) AND W(N)
|
| 111 |
+
# See II Section 3.16
|
| 112 |
+
#
|
| 113 |
+
# Computing the exponent wpvw of the error II equation (81)
|
| 114 |
+
wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6))
|
| 115 |
+
|
| 116 |
+
# Preparation of Euler numbers (we need until the 2*RS_NEWJ)
|
| 117 |
+
E = ctx._eulernum(2*newJ)
|
| 118 |
+
|
| 119 |
+
# Now we have in the cache all the needed Euler numbers.
|
| 120 |
+
#
|
| 121 |
+
# Computing the powers of pi
|
| 122 |
+
#
|
| 123 |
+
# We need to compute the powers pi**n for 1<= n <= 2*J
|
| 124 |
+
# with relative error less than 2**(-wpvw)
|
| 125 |
+
# it is easy to show that this is obtained
|
| 126 |
+
# taking wppi as the least d with
|
| 127 |
+
# 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw
|
| 128 |
+
# In II Section 3.9 we need also that
|
| 129 |
+
# wppi > wptcoef[0], and that the powers
|
| 130 |
+
# here computed 0<= k <= 2*newJ are more
|
| 131 |
+
# than those needed there that are 2*L-2.
|
| 132 |
+
# so we need J >= L this will be checked
|
| 133 |
+
# before computing tcoef[]
|
| 134 |
+
wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw)
|
| 135 |
+
ctx.prec = wppi
|
| 136 |
+
pipower = {}
|
| 137 |
+
pipower[0] = ctx.one
|
| 138 |
+
pipower[1] = ctx.pi
|
| 139 |
+
for n in range(2,2*newJ+1):
|
| 140 |
+
pipower[n] = pipower[n-1]*ctx.pi
|
| 141 |
+
|
| 142 |
+
# COMPUTING THE COEFFICIENTS v(n) AND w(n)
|
| 143 |
+
# see II equation (61) and equations (81) and (82)
|
| 144 |
+
ctx.prec = wpvw+2
|
| 145 |
+
v={}
|
| 146 |
+
w={}
|
| 147 |
+
for n in range(0,newJ+1):
|
| 148 |
+
va = (-1)**n * ctx._eulernum(2*n)
|
| 149 |
+
va = ctx.mpf(va)/ctx.fac(2*n)
|
| 150 |
+
v[n]=va*pipower[2*n]
|
| 151 |
+
for n in range(0,2*newJ+1):
|
| 152 |
+
wa = ctx.one/ctx.fac(n)
|
| 153 |
+
wa=wa/(2**n)
|
| 154 |
+
w[n]=wa*pipower[n]
|
| 155 |
+
|
| 156 |
+
# COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2
|
| 157 |
+
# See II Section 3.16
|
| 158 |
+
ctx.prec = 15
|
| 159 |
+
wpp1a = 9 - ctx.mag(neweps6)
|
| 160 |
+
P1 = {}
|
| 161 |
+
for n in range(0,newJ+1):
|
| 162 |
+
ctx.prec = 15
|
| 163 |
+
wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
|
| 164 |
+
ctx.prec = wpp1
|
| 165 |
+
sump = 0
|
| 166 |
+
for k in range(0,n+1):
|
| 167 |
+
sump += ((-1)**k) * v[k]*w[2*n-2*k]
|
| 168 |
+
P1[n]=((-1)**(n+1))*ctx.j*sump
|
| 169 |
+
P2={}
|
| 170 |
+
for n in range(0,newJ+1):
|
| 171 |
+
ctx.prec = 15
|
| 172 |
+
wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a)
|
| 173 |
+
ctx.prec = wpp2
|
| 174 |
+
sump = 0
|
| 175 |
+
for k in range(0,n+1):
|
| 176 |
+
sump += (ctx.j**(n-k)) * v[k]*w[n-k]
|
| 177 |
+
P2[n]=sump
|
| 178 |
+
# COMPUTING THE COEFFICIENTS c[2n]
|
| 179 |
+
# See II Section 3.14
|
| 180 |
+
ctx.prec = 15
|
| 181 |
+
wpc0 = 5 - ctx.mag(neweps6)
|
| 182 |
+
wpc = max(6,4*newJ+wpc0)
|
| 183 |
+
ctx.prec = wpc
|
| 184 |
+
mu = ctx.sqrt(ctx.mpf('2'))/2
|
| 185 |
+
nu = ctx.expjpi(3./8)/2
|
| 186 |
+
c={}
|
| 187 |
+
for n in range(0,newJ):
|
| 188 |
+
ctx.prec = 15
|
| 189 |
+
wpc = max(6,4*n+wpc0)
|
| 190 |
+
ctx.prec = wpc
|
| 191 |
+
c[2*n] = mu*P1[n]+nu*P2[n]
|
| 192 |
+
for n in range(1,2*newJ,2):
|
| 193 |
+
c[n] = 0
|
| 194 |
+
return [newJ, neweps6, c, pipower]
|
| 195 |
+
|
| 196 |
+
def coef(ctx, J, eps):
|
| 197 |
+
_cache = ctx._rs_cache
|
| 198 |
+
if J <= _cache[0] and eps >= _cache[1]:
|
| 199 |
+
return _cache[2], _cache[3]
|
| 200 |
+
orig = ctx._mp.prec
|
| 201 |
+
try:
|
| 202 |
+
data = _coef(ctx._mp, J, eps)
|
| 203 |
+
finally:
|
| 204 |
+
ctx._mp.prec = orig
|
| 205 |
+
if ctx is not ctx._mp:
|
| 206 |
+
data[2] = dict((k,ctx.convert(v)) for (k,v) in data[2].items())
|
| 207 |
+
data[3] = dict((k,ctx.convert(v)) for (k,v) in data[3].items())
|
| 208 |
+
ctx._rs_cache[:] = data
|
| 209 |
+
return ctx._rs_cache[2], ctx._rs_cache[3]
|
| 210 |
+
|
| 211 |
+
#-------------------------------------------------------------------------------#
|
| 212 |
+
# #
|
| 213 |
+
# Rzeta_simul(s,k=0) #
|
| 214 |
+
# #
|
| 215 |
+
#-------------------------------------------------------------------------------#
|
| 216 |
+
# This function return a list with the values:
|
| 217 |
+
# Rzeta(sigma+it), conj(Rzeta(1-sigma+it)),Rzeta'(sigma+it), conj(Rzeta'(1-sigma+it)),
|
| 218 |
+
# .... , Rzeta^{(k)}(sigma+it), conj(Rzeta^{(k)}(1-sigma+it))
|
| 219 |
+
#
|
| 220 |
+
# Useful to compute the function zeta(s) and Z(w) or its derivatives.
|
| 221 |
+
#
|
| 222 |
+
|
| 223 |
+
def aux_M_Fp(ctx, xA, xeps4, a, xB1, xL):
|
| 224 |
+
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
|
| 225 |
+
# See II Section 3.11 equations (47) and (48)
|
| 226 |
+
aux1 = 126.0657606*xA/xeps4 # 126.06.. = 316/sqrt(2*pi)
|
| 227 |
+
aux1 = ctx.ln(aux1)
|
| 228 |
+
aux2 = (2*ctx.ln(ctx.pi)+ctx.ln(xB1)+ctx.ln(a))/3 -ctx.ln(2*ctx.pi)/2
|
| 229 |
+
m = 3*xL-3
|
| 230 |
+
aux3= (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
|
| 231 |
+
while((aux1 < m*aux2+ aux3)and (m>1)):
|
| 232 |
+
m = m - 1
|
| 233 |
+
aux3 = (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.)
|
| 234 |
+
xM = m
|
| 235 |
+
return xM
|
| 236 |
+
|
| 237 |
+
def aux_J_needed(ctx, xA, xeps4, a, xB1, xM):
|
| 238 |
+
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
|
| 239 |
+
# IN THE TAYLOR SERIES OF F.
|
| 240 |
+
# See II Section 3.11 equation (49))
|
| 241 |
+
# Only determine one
|
| 242 |
+
h1 = xeps4/(632*xA)
|
| 243 |
+
h2 = xB1*a * 126.31337419529260248 # = pi^2*e^2*sqrt(3)
|
| 244 |
+
h2 = h1 * ctx.power((h2/xM**2),(xM-1)/3) / xM
|
| 245 |
+
h3 = min(h1,h2)
|
| 246 |
+
return h3
|
| 247 |
+
|
| 248 |
+
def Rzeta_simul(ctx, s, der=0):
|
| 249 |
+
# First we take the value of ctx.prec
|
| 250 |
+
wpinitial = ctx.prec
|
| 251 |
+
|
| 252 |
+
# INITIALIZATION
|
| 253 |
+
# Take the real and imaginary part of s
|
| 254 |
+
t = ctx._im(s)
|
| 255 |
+
xsigma = ctx._re(s)
|
| 256 |
+
ysigma = 1 - xsigma
|
| 257 |
+
|
| 258 |
+
# Now compute several parameter that appear on the program
|
| 259 |
+
ctx.prec = 15
|
| 260 |
+
a = ctx.sqrt(t/(2*ctx.pi))
|
| 261 |
+
xasigma = a ** xsigma
|
| 262 |
+
yasigma = a ** ysigma
|
| 263 |
+
|
| 264 |
+
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
|
| 265 |
+
xA1=ctx.power(2, ctx.mag(xasigma)-1)
|
| 266 |
+
yA1=ctx.power(2, ctx.mag(yasigma)-1)
|
| 267 |
+
|
| 268 |
+
# We compute various epsilon's (see II end of Section 3.1)
|
| 269 |
+
eps = ctx.power(2, -wpinitial)
|
| 270 |
+
eps1 = eps/6.
|
| 271 |
+
xeps2 = eps * xA1/3.
|
| 272 |
+
yeps2 = eps * yA1/3.
|
| 273 |
+
|
| 274 |
+
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
|
| 275 |
+
# ON sigma
|
| 276 |
+
# constant b and c (see I Theorem 2 formula (26) )
|
| 277 |
+
# coefficients A and B1 (see I Section 6.1 equation (50))
|
| 278 |
+
#
|
| 279 |
+
# here we not need high precision
|
| 280 |
+
ctx.prec = 15
|
| 281 |
+
if xsigma > 0:
|
| 282 |
+
xb = 2.
|
| 283 |
+
xc = math.pow(9,xsigma)/4.44288
|
| 284 |
+
# 4.44288 =(math.sqrt(2)*math.pi)
|
| 285 |
+
xA = math.pow(9,xsigma)
|
| 286 |
+
xB1 = 1
|
| 287 |
+
else:
|
| 288 |
+
xb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
|
| 289 |
+
xc = math.pow(2,-xsigma)/4.44288
|
| 290 |
+
xA = math.pow(2,-xsigma)
|
| 291 |
+
xB1 = 1.10789 # = 2*sqrt(1-log(2))
|
| 292 |
+
|
| 293 |
+
if(ysigma > 0):
|
| 294 |
+
yb = 2.
|
| 295 |
+
yc = math.pow(9,ysigma)/4.44288
|
| 296 |
+
# 4.44288 =(math.sqrt(2)*math.pi)
|
| 297 |
+
yA = math.pow(9,ysigma)
|
| 298 |
+
yB1 = 1
|
| 299 |
+
else:
|
| 300 |
+
yb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
|
| 301 |
+
yc = math.pow(2,-ysigma)/4.44288
|
| 302 |
+
yA = math.pow(2,-ysigma)
|
| 303 |
+
yB1 = 1.10789 # = 2*sqrt(1-log(2))
|
| 304 |
+
|
| 305 |
+
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
|
| 306 |
+
# CORRECTION
|
| 307 |
+
# See II Section 3.2
|
| 308 |
+
ctx.prec = 15
|
| 309 |
+
xL = 1
|
| 310 |
+
while 3*xc*ctx.gamma(xL*0.5) * ctx.power(xb*a,-xL) >= xeps2:
|
| 311 |
+
xL = xL+1
|
| 312 |
+
xL = max(2,xL)
|
| 313 |
+
yL = 1
|
| 314 |
+
while 3*yc*ctx.gamma(yL*0.5) * ctx.power(yb*a,-yL) >= yeps2:
|
| 315 |
+
yL = yL+1
|
| 316 |
+
yL = max(2,yL)
|
| 317 |
+
|
| 318 |
+
# The number L has to satify some conditions.
|
| 319 |
+
# If not RS can not compute Rzeta(s) with the prescribed precision
|
| 320 |
+
# (see II, Section 3.2 condition (20) ) and
|
| 321 |
+
# (II, Section 3.3 condition (22) ). Also we have added
|
| 322 |
+
# an additional technical condition in Section 3.17 Proposition 17
|
| 323 |
+
if ((3*xL >= 2*a*a/25.) or (3*xL+2+xsigma<0) or (abs(xsigma) > a/2.) or \
|
| 324 |
+
(3*yL >= 2*a*a/25.) or (3*yL+2+ysigma<0) or (abs(ysigma) > a/2.)):
|
| 325 |
+
ctx.prec = wpinitial
|
| 326 |
+
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
|
| 327 |
+
|
| 328 |
+
# We take the maximum of the two values
|
| 329 |
+
L = max(xL, yL)
|
| 330 |
+
|
| 331 |
+
# INITIALIZATION (CONTINUATION)
|
| 332 |
+
#
|
| 333 |
+
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
|
| 334 |
+
# each term of the RS correction must be computed with error <= eps3
|
| 335 |
+
xeps3 = xeps2/(4*xL)
|
| 336 |
+
yeps3 = yeps2/(4*yL)
|
| 337 |
+
|
| 338 |
+
# eps4 is defined on (II Section 3.6 equation (30) )
|
| 339 |
+
# each component of the formula (II Section 3.6 equation (29) )
|
| 340 |
+
# must be computed with error <= eps4
|
| 341 |
+
xeps4 = xeps3/(3*xL)
|
| 342 |
+
yeps4 = yeps3/(3*yL)
|
| 343 |
+
|
| 344 |
+
# COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
|
| 345 |
+
xM = aux_M_Fp(ctx, xA, xeps4, a, xB1, xL)
|
| 346 |
+
yM = aux_M_Fp(ctx, yA, yeps4, a, yB1, yL)
|
| 347 |
+
M = max(xM, yM)
|
| 348 |
+
|
| 349 |
+
# COMPUTING NUMBER OF TERMS J NEEDED
|
| 350 |
+
h3 = aux_J_needed(ctx, xA, xeps4, a, xB1, xM)
|
| 351 |
+
h4 = aux_J_needed(ctx, yA, yeps4, a, yB1, yM)
|
| 352 |
+
h3 = min(h3,h4)
|
| 353 |
+
J = 12
|
| 354 |
+
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
|
| 355 |
+
while jvalue > h3:
|
| 356 |
+
J = J+1
|
| 357 |
+
jvalue = (2*ctx.pi)*jvalue/J
|
| 358 |
+
|
| 359 |
+
# COMPUTING eps5[m] for 1 <= m <= 21
|
| 360 |
+
# See II Section 10 equation (43)
|
| 361 |
+
# We choose the minimum of the two possibilities
|
| 362 |
+
eps5={}
|
| 363 |
+
xforeps5 = math.pi*math.pi*xB1*a
|
| 364 |
+
yforeps5 = math.pi*math.pi*yB1*a
|
| 365 |
+
for m in range(0,22):
|
| 366 |
+
xaux1 = math.pow(xforeps5, m/3)/(316.*xA)
|
| 367 |
+
yaux1 = math.pow(yforeps5, m/3)/(316.*yA)
|
| 368 |
+
aux1 = min(xaux1, yaux1)
|
| 369 |
+
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
|
| 370 |
+
aux2 = math.sqrt(aux2)
|
| 371 |
+
eps5[m] = (aux1*aux2*min(xeps4,yeps4))
|
| 372 |
+
|
| 373 |
+
# COMPUTING wpfp
|
| 374 |
+
# See II Section 3.13 equation (59)
|
| 375 |
+
twenty = min(3*L-3, 21)+1
|
| 376 |
+
aux = 6812*J
|
| 377 |
+
wpfp = ctx.mag(44*J)
|
| 378 |
+
for m in range(0,twenty):
|
| 379 |
+
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
|
| 380 |
+
|
| 381 |
+
# COMPUTING N AND p
|
| 382 |
+
# See II Section
|
| 383 |
+
ctx.prec = wpfp + ctx.mag(t)+20
|
| 384 |
+
a = ctx.sqrt(t/(2*ctx.pi))
|
| 385 |
+
N = ctx.floor(a)
|
| 386 |
+
p = 1-2*(a-N)
|
| 387 |
+
|
| 388 |
+
# now we get a rounded version of p
|
| 389 |
+
# to the precision wpfp
|
| 390 |
+
# this possibly is not necessary
|
| 391 |
+
num=ctx.floor(p*(ctx.mpf('2')**wpfp))
|
| 392 |
+
difference = p * (ctx.mpf('2')**wpfp)-num
|
| 393 |
+
if (difference < 0.5):
|
| 394 |
+
num = num
|
| 395 |
+
else:
|
| 396 |
+
num = num+1
|
| 397 |
+
p = ctx.convert(num * (ctx.mpf('2')**(-wpfp)))
|
| 398 |
+
|
| 399 |
+
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
|
| 400 |
+
# We shall use the notation cc[n], since there is
|
| 401 |
+
# a constant that is called c
|
| 402 |
+
# See II Section 3.14
|
| 403 |
+
# We compute the coefficients and also save then in a
|
| 404 |
+
# cache. The bulk of the computation is passed to
|
| 405 |
+
# the function coef()
|
| 406 |
+
#
|
| 407 |
+
# eps6 is defined in II Section 3.13 equation (58)
|
| 408 |
+
eps6 = ctx.power(ctx.convert(2*ctx.pi), J)/(ctx.gamma(J+1)*3*J)
|
| 409 |
+
|
| 410 |
+
# Now we compute the coefficients
|
| 411 |
+
cc = {}
|
| 412 |
+
cont = {}
|
| 413 |
+
cont, pipowers = coef(ctx, J, eps6)
|
| 414 |
+
cc=cont.copy() # we need a copy since we have to change his values.
|
| 415 |
+
Fp={} # this is the adequate locus of this
|
| 416 |
+
for n in range(M, 3*L-2):
|
| 417 |
+
Fp[n] = 0
|
| 418 |
+
Fp={}
|
| 419 |
+
ctx.prec = wpfp
|
| 420 |
+
for m in range(0,M+1):
|
| 421 |
+
sumP = 0
|
| 422 |
+
for k in range(2*J-m-1,-1,-1):
|
| 423 |
+
sumP = (sumP * p)+ cc[k]
|
| 424 |
+
Fp[m] = sumP
|
| 425 |
+
# preparation of the new coefficients
|
| 426 |
+
for k in range(0,2*J-m-1):
|
| 427 |
+
cc[k] = (k+1)* cc[k+1]
|
| 428 |
+
|
| 429 |
+
# COMPUTING THE NUMBERS xd[u,n,k], yd[u,n,k]
|
| 430 |
+
# See II Section 3.17
|
| 431 |
+
#
|
| 432 |
+
# First we compute the working precisions xwpd[k]
|
| 433 |
+
# Se II equation (92)
|
| 434 |
+
xwpd={}
|
| 435 |
+
d1 = max(6,ctx.mag(40*L*L))
|
| 436 |
+
xd2 = 13+ctx.mag((1+abs(xsigma))*xA)-ctx.mag(xeps4)-1
|
| 437 |
+
xconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*xB1*xB1)) /2
|
| 438 |
+
for n in range(0,L):
|
| 439 |
+
xd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*xconst)+xd2
|
| 440 |
+
xwpd[n]=max(xd3,d1)
|
| 441 |
+
|
| 442 |
+
# procedure of II Section 3.17
|
| 443 |
+
ctx.prec = xwpd[1]+10
|
| 444 |
+
xpsigma = 1-(2*xsigma)
|
| 445 |
+
xd = {}
|
| 446 |
+
xd[0,0,-2]=0; xd[0,0,-1]=0; xd[0,0,0]=1; xd[0,0,1]=0
|
| 447 |
+
xd[0,-1,-2]=0; xd[0,-1,-1]=0; xd[0,-1,0]=1; xd[0,-1,1]=0
|
| 448 |
+
for n in range(1,L):
|
| 449 |
+
ctx.prec = xwpd[n]+10
|
| 450 |
+
for k in range(0,3*n//2+1):
|
| 451 |
+
m = 3*n-2*k
|
| 452 |
+
if(m!=0):
|
| 453 |
+
m1 = ctx.one/m
|
| 454 |
+
c1= m1/4
|
| 455 |
+
c2=(xpsigma*m1)/2
|
| 456 |
+
c3=-(m+1)
|
| 457 |
+
xd[0,n,k]=c3*xd[0,n-1,k-2]+c1*xd[0,n-1,k]+c2*xd[0,n-1,k-1]
|
| 458 |
+
else:
|
| 459 |
+
xd[0,n,k]=0
|
| 460 |
+
for r in range(0,k):
|
| 461 |
+
add=xd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
|
| 462 |
+
xd[0,n,k] -= ((-1)**(k-r))*add
|
| 463 |
+
xd[0,n,-2]=0; xd[0,n,-1]=0; xd[0,n,3*n//2+1]=0
|
| 464 |
+
for mu in range(-2,der+1):
|
| 465 |
+
for n in range(-2,L):
|
| 466 |
+
for k in range(-3,max(1,3*n//2+2)):
|
| 467 |
+
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
|
| 468 |
+
xd[mu,n,k] = 0
|
| 469 |
+
for mu in range(1,der+1):
|
| 470 |
+
for n in range(0,L):
|
| 471 |
+
ctx.prec = xwpd[n]+10
|
| 472 |
+
for k in range(0,3*n//2+1):
|
| 473 |
+
aux=(2*mu-2)*xd[mu-2,n-2,k-3]+2*(xsigma+n-2)*xd[mu-1,n-2,k-3]
|
| 474 |
+
xd[mu,n,k] = aux - xd[mu-1,n-1,k-1]
|
| 475 |
+
|
| 476 |
+
# Now we compute the working precisions ywpd[k]
|
| 477 |
+
# Se II equation (92)
|
| 478 |
+
ywpd={}
|
| 479 |
+
d1 = max(6,ctx.mag(40*L*L))
|
| 480 |
+
yd2 = 13+ctx.mag((1+abs(ysigma))*yA)-ctx.mag(yeps4)-1
|
| 481 |
+
yconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*yB1*yB1)) /2
|
| 482 |
+
for n in range(0,L):
|
| 483 |
+
yd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*yconst)+yd2
|
| 484 |
+
ywpd[n]=max(yd3,d1)
|
| 485 |
+
|
| 486 |
+
# procedure of II Section 3.17
|
| 487 |
+
ctx.prec = ywpd[1]+10
|
| 488 |
+
ypsigma = 1-(2*ysigma)
|
| 489 |
+
yd = {}
|
| 490 |
+
yd[0,0,-2]=0; yd[0,0,-1]=0; yd[0,0,0]=1; yd[0,0,1]=0
|
| 491 |
+
yd[0,-1,-2]=0; yd[0,-1,-1]=0; yd[0,-1,0]=1; yd[0,-1,1]=0
|
| 492 |
+
for n in range(1,L):
|
| 493 |
+
ctx.prec = ywpd[n]+10
|
| 494 |
+
for k in range(0,3*n//2+1):
|
| 495 |
+
m = 3*n-2*k
|
| 496 |
+
if(m!=0):
|
| 497 |
+
m1 = ctx.one/m
|
| 498 |
+
c1= m1/4
|
| 499 |
+
c2=(ypsigma*m1)/2
|
| 500 |
+
c3=-(m+1)
|
| 501 |
+
yd[0,n,k]=c3*yd[0,n-1,k-2]+c1*yd[0,n-1,k]+c2*yd[0,n-1,k-1]
|
| 502 |
+
else:
|
| 503 |
+
yd[0,n,k]=0
|
| 504 |
+
for r in range(0,k):
|
| 505 |
+
add=yd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r))
|
| 506 |
+
yd[0,n,k] -= ((-1)**(k-r))*add
|
| 507 |
+
yd[0,n,-2]=0; yd[0,n,-1]=0; yd[0,n,3*n//2+1]=0
|
| 508 |
+
|
| 509 |
+
for mu in range(-2,der+1):
|
| 510 |
+
for n in range(-2,L):
|
| 511 |
+
for k in range(-3,max(1,3*n//2+2)):
|
| 512 |
+
if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)):
|
| 513 |
+
yd[mu,n,k] = 0
|
| 514 |
+
for mu in range(1,der+1):
|
| 515 |
+
for n in range(0,L):
|
| 516 |
+
ctx.prec = ywpd[n]+10
|
| 517 |
+
for k in range(0,3*n//2+1):
|
| 518 |
+
aux=(2*mu-2)*yd[mu-2,n-2,k-3]+2*(ysigma+n-2)*yd[mu-1,n-2,k-3]
|
| 519 |
+
yd[mu,n,k] = aux - yd[mu-1,n-1,k-1]
|
| 520 |
+
|
| 521 |
+
# COMPUTING THE COEFFICIENTS xtcoef[k,l]
|
| 522 |
+
# See II Section 3.9
|
| 523 |
+
#
|
| 524 |
+
# computing the needed wp
|
| 525 |
+
xwptcoef={}
|
| 526 |
+
xwpterm={}
|
| 527 |
+
ctx.prec = 15
|
| 528 |
+
c1 = ctx.mag(40*(L+2))
|
| 529 |
+
xc2 = ctx.mag(68*(L+2)*xA)
|
| 530 |
+
xc4 = ctx.mag(xB1*a*math.sqrt(ctx.pi))-1
|
| 531 |
+
for k in range(0,L):
|
| 532 |
+
xc3 = xc2 - k*xc4+ctx.mag(ctx.fac(k+0.5))/2.
|
| 533 |
+
xwptcoef[k] = (max(c1,xc3-ctx.mag(xeps4)+1)+1 +20)*1.5
|
| 534 |
+
xwpterm[k] = (max(c1,ctx.mag(L+2)+xc3-ctx.mag(xeps3)+1)+1 +20)
|
| 535 |
+
ywptcoef={}
|
| 536 |
+
ywpterm={}
|
| 537 |
+
ctx.prec = 15
|
| 538 |
+
c1 = ctx.mag(40*(L+2))
|
| 539 |
+
yc2 = ctx.mag(68*(L+2)*yA)
|
| 540 |
+
yc4 = ctx.mag(yB1*a*math.sqrt(ctx.pi))-1
|
| 541 |
+
for k in range(0,L):
|
| 542 |
+
yc3 = yc2 - k*yc4+ctx.mag(ctx.fac(k+0.5))/2.
|
| 543 |
+
ywptcoef[k] = ((max(c1,yc3-ctx.mag(yeps4)+1))+10)*1.5
|
| 544 |
+
ywpterm[k] = (max(c1,ctx.mag(L+2)+yc3-ctx.mag(yeps3)+1)+1)+10
|
| 545 |
+
|
| 546 |
+
# check of power of pi
|
| 547 |
+
# computing the fortcoef[mu,k,ell]
|
| 548 |
+
xfortcoef={}
|
| 549 |
+
for mu in range(0,der+1):
|
| 550 |
+
for k in range(0,L):
|
| 551 |
+
for ell in range(-2,3*k//2+1):
|
| 552 |
+
xfortcoef[mu,k,ell]=0
|
| 553 |
+
for mu in range(0,der+1):
|
| 554 |
+
for k in range(0,L):
|
| 555 |
+
ctx.prec = xwptcoef[k]
|
| 556 |
+
for ell in range(0,3*k//2+1):
|
| 557 |
+
xfortcoef[mu,k,ell]=xd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
|
| 558 |
+
xfortcoef[mu,k,ell]=xfortcoef[mu,k,ell]/((2*ctx.j)**ell)
|
| 559 |
+
|
| 560 |
+
def trunc_a(t):
|
| 561 |
+
wp = ctx.prec
|
| 562 |
+
ctx.prec = wp + 2
|
| 563 |
+
aa = ctx.sqrt(t/(2*ctx.pi))
|
| 564 |
+
ctx.prec = wp
|
| 565 |
+
return aa
|
| 566 |
+
|
| 567 |
+
# computing the tcoef[k,ell]
|
| 568 |
+
xtcoef={}
|
| 569 |
+
for mu in range(0,der+1):
|
| 570 |
+
for k in range(0,L):
|
| 571 |
+
for ell in range(-2,3*k//2+1):
|
| 572 |
+
xtcoef[mu,k,ell]=0
|
| 573 |
+
ctx.prec = max(xwptcoef[0],ywptcoef[0])+3
|
| 574 |
+
aa= trunc_a(t)
|
| 575 |
+
la = -ctx.ln(aa)
|
| 576 |
+
|
| 577 |
+
for chi in range(0,der+1):
|
| 578 |
+
for k in range(0,L):
|
| 579 |
+
ctx.prec = xwptcoef[k]
|
| 580 |
+
for ell in range(0,3*k//2+1):
|
| 581 |
+
xtcoef[chi,k,ell] =0
|
| 582 |
+
for mu in range(0, chi+1):
|
| 583 |
+
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*xfortcoef[chi-mu,k,ell]
|
| 584 |
+
xtcoef[chi,k,ell] += tcoefter
|
| 585 |
+
|
| 586 |
+
# COMPUTING THE COEFFICIENTS ytcoef[k,l]
|
| 587 |
+
# See II Section 3.9
|
| 588 |
+
#
|
| 589 |
+
# computing the needed wp
|
| 590 |
+
# check of power of pi
|
| 591 |
+
# computing the fortcoef[mu,k,ell]
|
| 592 |
+
yfortcoef={}
|
| 593 |
+
for mu in range(0,der+1):
|
| 594 |
+
for k in range(0,L):
|
| 595 |
+
for ell in range(-2,3*k//2+1):
|
| 596 |
+
yfortcoef[mu,k,ell]=0
|
| 597 |
+
for mu in range(0,der+1):
|
| 598 |
+
for k in range(0,L):
|
| 599 |
+
ctx.prec = ywptcoef[k]
|
| 600 |
+
for ell in range(0,3*k//2+1):
|
| 601 |
+
yfortcoef[mu,k,ell]=yd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
|
| 602 |
+
yfortcoef[mu,k,ell]=yfortcoef[mu,k,ell]/((2*ctx.j)**ell)
|
| 603 |
+
# computing the tcoef[k,ell]
|
| 604 |
+
ytcoef={}
|
| 605 |
+
for chi in range(0,der+1):
|
| 606 |
+
for k in range(0,L):
|
| 607 |
+
for ell in range(-2,3*k//2+1):
|
| 608 |
+
ytcoef[chi,k,ell]=0
|
| 609 |
+
for chi in range(0,der+1):
|
| 610 |
+
for k in range(0,L):
|
| 611 |
+
ctx.prec = ywptcoef[k]
|
| 612 |
+
for ell in range(0,3*k//2+1):
|
| 613 |
+
ytcoef[chi,k,ell] =0
|
| 614 |
+
for mu in range(0, chi+1):
|
| 615 |
+
tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*yfortcoef[chi-mu,k,ell]
|
| 616 |
+
ytcoef[chi,k,ell] += tcoefter
|
| 617 |
+
|
| 618 |
+
# COMPUTING tv[k,ell]
|
| 619 |
+
# See II Section 3.8
|
| 620 |
+
#
|
| 621 |
+
# a has a good value
|
| 622 |
+
ctx.prec = max(xwptcoef[0], ywptcoef[0])+2
|
| 623 |
+
av = {}
|
| 624 |
+
av[0] = 1
|
| 625 |
+
av[1] = av[0]/a
|
| 626 |
+
|
| 627 |
+
ctx.prec = max(xwptcoef[0],ywptcoef[0])
|
| 628 |
+
for k in range(2,L):
|
| 629 |
+
av[k] = av[k-1] * av[1]
|
| 630 |
+
|
| 631 |
+
# Computing the quotients
|
| 632 |
+
xtv = {}
|
| 633 |
+
for chi in range(0,der+1):
|
| 634 |
+
for k in range(0,L):
|
| 635 |
+
ctx.prec = xwptcoef[k]
|
| 636 |
+
for ell in range(0,3*k//2+1):
|
| 637 |
+
xtv[chi,k,ell] = xtcoef[chi,k,ell]* av[k]
|
| 638 |
+
# Computing the quotients
|
| 639 |
+
ytv = {}
|
| 640 |
+
for chi in range(0,der+1):
|
| 641 |
+
for k in range(0,L):
|
| 642 |
+
ctx.prec = ywptcoef[k]
|
| 643 |
+
for ell in range(0,3*k//2+1):
|
| 644 |
+
ytv[chi,k,ell] = ytcoef[chi,k,ell]* av[k]
|
| 645 |
+
|
| 646 |
+
# COMPUTING THE TERMS xterm[k]
|
| 647 |
+
# See II Section 3.6
|
| 648 |
+
xterm = {}
|
| 649 |
+
for chi in range(0,der+1):
|
| 650 |
+
for n in range(0,L):
|
| 651 |
+
ctx.prec = xwpterm[n]
|
| 652 |
+
te = 0
|
| 653 |
+
for k in range(0, 3*n//2+1):
|
| 654 |
+
te += xtv[chi,n,k]
|
| 655 |
+
xterm[chi,n] = te
|
| 656 |
+
|
| 657 |
+
# COMPUTING THE TERMS yterm[k]
|
| 658 |
+
# See II Section 3.6
|
| 659 |
+
yterm = {}
|
| 660 |
+
for chi in range(0,der+1):
|
| 661 |
+
for n in range(0,L):
|
| 662 |
+
ctx.prec = ywpterm[n]
|
| 663 |
+
te = 0
|
| 664 |
+
for k in range(0, 3*n//2+1):
|
| 665 |
+
te += ytv[chi,n,k]
|
| 666 |
+
yterm[chi,n] = te
|
| 667 |
+
|
| 668 |
+
# COMPUTING rssum
|
| 669 |
+
# See II Section 3.5
|
| 670 |
+
xrssum={}
|
| 671 |
+
ctx.prec=15
|
| 672 |
+
xrsbound = math.sqrt(ctx.pi) * xc /(xb*a)
|
| 673 |
+
ctx.prec=15
|
| 674 |
+
xwprssum = ctx.mag(4.4*((L+3)**2)*xrsbound / xeps2)
|
| 675 |
+
xwprssum = max(xwprssum, ctx.mag(10*(L+1)))
|
| 676 |
+
ctx.prec = xwprssum
|
| 677 |
+
for chi in range(0,der+1):
|
| 678 |
+
xrssum[chi] = 0
|
| 679 |
+
for k in range(1,L+1):
|
| 680 |
+
xrssum[chi] += xterm[chi,L-k]
|
| 681 |
+
yrssum={}
|
| 682 |
+
ctx.prec=15
|
| 683 |
+
yrsbound = math.sqrt(ctx.pi) * yc /(yb*a)
|
| 684 |
+
ctx.prec=15
|
| 685 |
+
ywprssum = ctx.mag(4.4*((L+3)**2)*yrsbound / yeps2)
|
| 686 |
+
ywprssum = max(ywprssum, ctx.mag(10*(L+1)))
|
| 687 |
+
ctx.prec = ywprssum
|
| 688 |
+
for chi in range(0,der+1):
|
| 689 |
+
yrssum[chi] = 0
|
| 690 |
+
for k in range(1,L+1):
|
| 691 |
+
yrssum[chi] += yterm[chi,L-k]
|
| 692 |
+
|
| 693 |
+
# COMPUTING S3
|
| 694 |
+
# See II Section 3.19
|
| 695 |
+
ctx.prec = 15
|
| 696 |
+
A2 = 2**(max(ctx.mag(abs(xrssum[0])), ctx.mag(abs(yrssum[0]))))
|
| 697 |
+
eps8 = eps/(3*A2)
|
| 698 |
+
T = t *ctx.ln(t/(2*ctx.pi))
|
| 699 |
+
xwps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-xsigma))*T)
|
| 700 |
+
ywps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-ysigma))*T)
|
| 701 |
+
|
| 702 |
+
ctx.prec = max(xwps3, ywps3)
|
| 703 |
+
|
| 704 |
+
tpi = t/(2*ctx.pi)
|
| 705 |
+
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
|
| 706 |
+
U = ctx.expj(-arg)
|
| 707 |
+
a = trunc_a(t)
|
| 708 |
+
xasigma = ctx.power(a, -xsigma)
|
| 709 |
+
yasigma = ctx.power(a, -ysigma)
|
| 710 |
+
xS3 = ((-1)**(N-1)) * xasigma * U
|
| 711 |
+
yS3 = ((-1)**(N-1)) * yasigma * U
|
| 712 |
+
|
| 713 |
+
# COMPUTING S1 the zetasum
|
| 714 |
+
# See II Section 3.18
|
| 715 |
+
ctx.prec = 15
|
| 716 |
+
xwpsum = 4+ ctx.mag((N+ctx.power(N,1-xsigma))*ctx.ln(N) /eps1)
|
| 717 |
+
ywpsum = 4+ ctx.mag((N+ctx.power(N,1-ysigma))*ctx.ln(N) /eps1)
|
| 718 |
+
wpsum = max(xwpsum, ywpsum)
|
| 719 |
+
|
| 720 |
+
ctx.prec = wpsum +10
|
| 721 |
+
'''
|
| 722 |
+
# This can be improved
|
| 723 |
+
xS1={}
|
| 724 |
+
yS1={}
|
| 725 |
+
for chi in range(0,der+1):
|
| 726 |
+
xS1[chi] = 0
|
| 727 |
+
yS1[chi] = 0
|
| 728 |
+
for n in range(1,int(N)+1):
|
| 729 |
+
ln = ctx.ln(n)
|
| 730 |
+
xexpn = ctx.exp(-ln*(xsigma+ctx.j*t))
|
| 731 |
+
yexpn = ctx.conj(1/(n*xexpn))
|
| 732 |
+
for chi in range(0,der+1):
|
| 733 |
+
pown = ctx.power(-ln, chi)
|
| 734 |
+
xterm = pown*xexpn
|
| 735 |
+
yterm = pown*yexpn
|
| 736 |
+
xS1[chi] += xterm
|
| 737 |
+
yS1[chi] += yterm
|
| 738 |
+
'''
|
| 739 |
+
xS1, yS1 = ctx._zetasum(s, 1, int(N)-1, range(0,der+1), True)
|
| 740 |
+
|
| 741 |
+
# END OF COMPUTATION of xrz, yrz
|
| 742 |
+
# See II Section 3.1
|
| 743 |
+
ctx.prec = 15
|
| 744 |
+
xabsS1 = abs(xS1[der])
|
| 745 |
+
xabsS2 = abs(xrssum[der] * xS3)
|
| 746 |
+
xwpend = max(6, wpinitial+ctx.mag(6*(3*xabsS1+7*xabsS2) ) )
|
| 747 |
+
|
| 748 |
+
ctx.prec = xwpend
|
| 749 |
+
xrz={}
|
| 750 |
+
for chi in range(0,der+1):
|
| 751 |
+
xrz[chi] = xS1[chi]+xrssum[chi]*xS3
|
| 752 |
+
|
| 753 |
+
ctx.prec = 15
|
| 754 |
+
yabsS1 = abs(yS1[der])
|
| 755 |
+
yabsS2 = abs(yrssum[der] * yS3)
|
| 756 |
+
ywpend = max(6, wpinitial+ctx.mag(6*(3*yabsS1+7*yabsS2) ) )
|
| 757 |
+
|
| 758 |
+
ctx.prec = ywpend
|
| 759 |
+
yrz={}
|
| 760 |
+
for chi in range(0,der+1):
|
| 761 |
+
yrz[chi] = yS1[chi]+yrssum[chi]*yS3
|
| 762 |
+
yrz[chi] = ctx.conj(yrz[chi])
|
| 763 |
+
ctx.prec = wpinitial
|
| 764 |
+
return xrz, yrz
|
| 765 |
+
|
| 766 |
+
def Rzeta_set(ctx, s, derivatives=[0]):
|
| 767 |
+
r"""
|
| 768 |
+
Computes several derivatives of the auxiliary function of Riemann `R(s)`.
|
| 769 |
+
|
| 770 |
+
**Definition**
|
| 771 |
+
|
| 772 |
+
The function is defined by
|
| 773 |
+
|
| 774 |
+
.. math ::
|
| 775 |
+
|
| 776 |
+
\begin{equation}
|
| 777 |
+
{\mathop{\mathcal R }\nolimits}(s)=
|
| 778 |
+
\int_{0\swarrow1}\frac{x^{-s} e^{\pi i x^2}}{e^{\pi i x}-
|
| 779 |
+
e^{-\pi i x}}\,dx
|
| 780 |
+
\end{equation}
|
| 781 |
+
|
| 782 |
+
To this function we apply the Riemann-Siegel expansion.
|
| 783 |
+
"""
|
| 784 |
+
der = max(derivatives)
|
| 785 |
+
# First we take the value of ctx.prec
|
| 786 |
+
# During the computation we will change ctx.prec, and finally we will
|
| 787 |
+
# restaurate the initial value
|
| 788 |
+
wpinitial = ctx.prec
|
| 789 |
+
# Take the real and imaginary part of s
|
| 790 |
+
t = ctx._im(s)
|
| 791 |
+
sigma = ctx._re(s)
|
| 792 |
+
# Now compute several parameter that appear on the program
|
| 793 |
+
ctx.prec = 15
|
| 794 |
+
a = ctx.sqrt(t/(2*ctx.pi)) # Careful
|
| 795 |
+
asigma = ctx.power(a, sigma) # Careful
|
| 796 |
+
# We need a simple bound A1 < asigma (see II Section 3.1 and 3.3)
|
| 797 |
+
A1 = ctx.power(2, ctx.mag(asigma)-1)
|
| 798 |
+
# We compute various epsilon's (see II end of Section 3.1)
|
| 799 |
+
eps = ctx.power(2, -wpinitial)
|
| 800 |
+
eps1 = eps/6.
|
| 801 |
+
eps2 = eps * A1/3.
|
| 802 |
+
# COMPUTING SOME COEFFICIENTS THAT DEPENDS
|
| 803 |
+
# ON sigma
|
| 804 |
+
# constant b and c (see I Theorem 2 formula (26) )
|
| 805 |
+
# coefficients A and B1 (see I Section 6.1 equation (50))
|
| 806 |
+
# here we not need high precision
|
| 807 |
+
ctx.prec = 15
|
| 808 |
+
if sigma > 0:
|
| 809 |
+
b = 2.
|
| 810 |
+
c = math.pow(9,sigma)/4.44288
|
| 811 |
+
# 4.44288 =(math.sqrt(2)*math.pi)
|
| 812 |
+
A = math.pow(9,sigma)
|
| 813 |
+
B1 = 1
|
| 814 |
+
else:
|
| 815 |
+
b = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi )
|
| 816 |
+
c = math.pow(2,-sigma)/4.44288
|
| 817 |
+
A = math.pow(2,-sigma)
|
| 818 |
+
B1 = 1.10789 # = 2*sqrt(1-log(2))
|
| 819 |
+
# COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL
|
| 820 |
+
# CORRECTION
|
| 821 |
+
# See II Section 3.2
|
| 822 |
+
ctx.prec = 15
|
| 823 |
+
L = 1
|
| 824 |
+
while 3*c*ctx.gamma(L*0.5) * ctx.power(b*a,-L) >= eps2:
|
| 825 |
+
L = L+1
|
| 826 |
+
L = max(2,L)
|
| 827 |
+
# The number L has to satify some conditions.
|
| 828 |
+
# If not RS can not compute Rzeta(s) with the prescribed precision
|
| 829 |
+
# (see II, Section 3.2 condition (20) ) and
|
| 830 |
+
# (II, Section 3.3 condition (22) ). Also we have added
|
| 831 |
+
# an additional technical condition in Section 3.17 Proposition 17
|
| 832 |
+
if ((3*L >= 2*a*a/25.) or (3*L+2+sigma<0) or (abs(sigma)> a/2.)):
|
| 833 |
+
#print 'Error Riemann-Siegel can not compute with such precision'
|
| 834 |
+
ctx.prec = wpinitial
|
| 835 |
+
raise NotImplementedError("Riemann-Siegel can not compute with such precision")
|
| 836 |
+
|
| 837 |
+
# INITIALIZATION (CONTINUATION)
|
| 838 |
+
#
|
| 839 |
+
# eps3 is the constant defined on (II, Section 3.5 equation (27) )
|
| 840 |
+
# each term of the RS correction must be computed with error <= eps3
|
| 841 |
+
eps3 = eps2/(4*L)
|
| 842 |
+
|
| 843 |
+
# eps4 is defined on (II Section 3.6 equation (30) )
|
| 844 |
+
# each component of the formula (II Section 3.6 equation (29) )
|
| 845 |
+
# must be computed with error <= eps4
|
| 846 |
+
eps4 = eps3/(3*L)
|
| 847 |
+
|
| 848 |
+
# COMPUTING M. NUMBER OF DERIVATIVES Fp[m] TO COMPUTE
|
| 849 |
+
M = aux_M_Fp(ctx, A, eps4, a, B1, L)
|
| 850 |
+
Fp = {}
|
| 851 |
+
for n in range(M, 3*L-2):
|
| 852 |
+
Fp[n] = 0
|
| 853 |
+
|
| 854 |
+
# But I have not seen an instance of M != 3*L-3
|
| 855 |
+
#
|
| 856 |
+
# DETERMINATION OF J THE NUMBER OF TERMS NEEDED
|
| 857 |
+
# IN THE TAYLOR SERIES OF F.
|
| 858 |
+
# See II Section 3.11 equation (49))
|
| 859 |
+
h1 = eps4/(632*A)
|
| 860 |
+
h2 = ctx.pi*ctx.pi*B1*a *ctx.sqrt(3)*math.e*math.e
|
| 861 |
+
h2 = h1 * ctx.power((h2/M**2),(M-1)/3) / M
|
| 862 |
+
h3 = min(h1,h2)
|
| 863 |
+
J=12
|
| 864 |
+
jvalue = (2*ctx.pi)**J / ctx.gamma(J+1)
|
| 865 |
+
while jvalue > h3:
|
| 866 |
+
J = J+1
|
| 867 |
+
jvalue = (2*ctx.pi)*jvalue/J
|
| 868 |
+
|
| 869 |
+
# COMPUTING eps5[m] for 1 <= m <= 21
|
| 870 |
+
# See II Section 10 equation (43)
|
| 871 |
+
eps5={}
|
| 872 |
+
foreps5 = math.pi*math.pi*B1*a
|
| 873 |
+
for m in range(0,22):
|
| 874 |
+
aux1 = math.pow(foreps5, m/3)/(316.*A)
|
| 875 |
+
aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5)
|
| 876 |
+
aux2 = math.sqrt(aux2)
|
| 877 |
+
eps5[m] = aux1*aux2*eps4
|
| 878 |
+
|
| 879 |
+
# COMPUTING wpfp
|
| 880 |
+
# See II Section 3.13 equation (59)
|
| 881 |
+
twenty = min(3*L-3, 21)+1
|
| 882 |
+
aux = 6812*J
|
| 883 |
+
wpfp = ctx.mag(44*J)
|
| 884 |
+
for m in range(0, twenty):
|
| 885 |
+
wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m]))
|
| 886 |
+
# COMPUTING N AND p
|
| 887 |
+
# See II Section
|
| 888 |
+
ctx.prec = wpfp + ctx.mag(t) + 20
|
| 889 |
+
a = ctx.sqrt(t/(2*ctx.pi))
|
| 890 |
+
N = ctx.floor(a)
|
| 891 |
+
p = 1-2*(a-N)
|
| 892 |
+
|
| 893 |
+
# now we get a rounded version of p to the precision wpfp
|
| 894 |
+
# this possibly is not necessary
|
| 895 |
+
num = ctx.floor(p*(ctx.mpf(2)**wpfp))
|
| 896 |
+
difference = p * (ctx.mpf(2)**wpfp)-num
|
| 897 |
+
if difference < 0.5:
|
| 898 |
+
num = num
|
| 899 |
+
else:
|
| 900 |
+
num = num+1
|
| 901 |
+
p = ctx.convert(num * (ctx.mpf(2)**(-wpfp)))
|
| 902 |
+
|
| 903 |
+
# COMPUTING THE COEFFICIENTS c[n] = cc[n]
|
| 904 |
+
# We shall use the notation cc[n], since there is
|
| 905 |
+
# a constant that is called c
|
| 906 |
+
# See II Section 3.14
|
| 907 |
+
# We compute the coefficients and also save then in a
|
| 908 |
+
# cache. The bulk of the computation is passed to
|
| 909 |
+
# the function coef()
|
| 910 |
+
#
|
| 911 |
+
# eps6 is defined in II Section 3.13 equation (58)
|
| 912 |
+
eps6 = ctx.power(2*ctx.pi, J)/(ctx.gamma(J+1)*3*J)
|
| 913 |
+
|
| 914 |
+
# Now we compute the coefficients
|
| 915 |
+
cc={}
|
| 916 |
+
cont={}
|
| 917 |
+
cont, pipowers = coef(ctx, J, eps6)
|
| 918 |
+
cc = cont.copy() # we need a copy since we have
|
| 919 |
+
Fp={}
|
| 920 |
+
for n in range(M, 3*L-2):
|
| 921 |
+
Fp[n] = 0
|
| 922 |
+
ctx.prec = wpfp
|
| 923 |
+
for m in range(0,M+1):
|
| 924 |
+
sumP = 0
|
| 925 |
+
for k in range(2*J-m-1,-1,-1):
|
| 926 |
+
sumP = (sumP * p) + cc[k]
|
| 927 |
+
Fp[m] = sumP
|
| 928 |
+
# preparation of the new coefficients
|
| 929 |
+
for k in range(0, 2*J-m-1):
|
| 930 |
+
cc[k] = (k+1) * cc[k+1]
|
| 931 |
+
|
| 932 |
+
# COMPUTING THE NUMBERS d[n,k]
|
| 933 |
+
# See II Section 3.17
|
| 934 |
+
|
| 935 |
+
# First we compute the working precisions wpd[k]
|
| 936 |
+
# Se II equation (92)
|
| 937 |
+
wpd = {}
|
| 938 |
+
d1 = max(6, ctx.mag(40*L*L))
|
| 939 |
+
d2 = 13+ctx.mag((1+abs(sigma))*A)-ctx.mag(eps4)-1
|
| 940 |
+
const = ctx.ln(8/(ctx.pi*ctx.pi*a*a*B1*B1)) /2
|
| 941 |
+
for n in range(0,L):
|
| 942 |
+
d3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*const)+d2
|
| 943 |
+
wpd[n] = max(d3,d1)
|
| 944 |
+
|
| 945 |
+
# procedure of II Section 3.17
|
| 946 |
+
ctx.prec = wpd[1]+10
|
| 947 |
+
psigma = 1-(2*sigma)
|
| 948 |
+
d = {}
|
| 949 |
+
d[0,0,-2]=0; d[0,0,-1]=0; d[0,0,0]=1; d[0,0,1]=0
|
| 950 |
+
d[0,-1,-2]=0; d[0,-1,-1]=0; d[0,-1,0]=1; d[0,-1,1]=0
|
| 951 |
+
for n in range(1,L):
|
| 952 |
+
ctx.prec = wpd[n]+10
|
| 953 |
+
for k in range(0,3*n//2+1):
|
| 954 |
+
m = 3*n-2*k
|
| 955 |
+
if (m!=0):
|
| 956 |
+
m1 = ctx.one/m
|
| 957 |
+
c1 = m1/4
|
| 958 |
+
c2 = (psigma*m1)/2
|
| 959 |
+
c3 = -(m+1)
|
| 960 |
+
d[0,n,k] = c3*d[0,n-1,k-2]+c1*d[0,n-1,k]+c2*d[0,n-1,k-1]
|
| 961 |
+
else:
|
| 962 |
+
d[0,n,k]=0
|
| 963 |
+
for r in range(0,k):
|
| 964 |
+
add = d[0,n,r]*(ctx.one*ctx.fac(2*k-2*r)/ctx.fac(k-r))
|
| 965 |
+
d[0,n,k] -= ((-1)**(k-r))*add
|
| 966 |
+
d[0,n,-2]=0; d[0,n,-1]=0; d[0,n,3*n//2+1]=0
|
| 967 |
+
|
| 968 |
+
for mu in range(-2,der+1):
|
| 969 |
+
for n in range(-2,L):
|
| 970 |
+
for k in range(-3,max(1,3*n//2+2)):
|
| 971 |
+
if ((mu<0)or (n<0) or(k<0)or (k>3*n//2)):
|
| 972 |
+
d[mu,n,k] = 0
|
| 973 |
+
|
| 974 |
+
for mu in range(1,der+1):
|
| 975 |
+
for n in range(0,L):
|
| 976 |
+
ctx.prec = wpd[n]+10
|
| 977 |
+
for k in range(0,3*n//2+1):
|
| 978 |
+
aux=(2*mu-2)*d[mu-2,n-2,k-3]+2*(sigma+n-2)*d[mu-1,n-2,k-3]
|
| 979 |
+
d[mu,n,k] = aux - d[mu-1,n-1,k-1]
|
| 980 |
+
|
| 981 |
+
# COMPUTING THE COEFFICIENTS t[k,l]
|
| 982 |
+
# See II Section 3.9
|
| 983 |
+
#
|
| 984 |
+
# computing the needed wp
|
| 985 |
+
wptcoef = {}
|
| 986 |
+
wpterm = {}
|
| 987 |
+
ctx.prec = 15
|
| 988 |
+
c1 = ctx.mag(40*(L+2))
|
| 989 |
+
c2 = ctx.mag(68*(L+2)*A)
|
| 990 |
+
c4 = ctx.mag(B1*a*math.sqrt(ctx.pi))-1
|
| 991 |
+
for k in range(0,L):
|
| 992 |
+
c3 = c2 - k*c4+ctx.mag(ctx.fac(k+0.5))/2.
|
| 993 |
+
wptcoef[k] = max(c1,c3-ctx.mag(eps4)+1)+1 +10
|
| 994 |
+
wpterm[k] = max(c1,ctx.mag(L+2)+c3-ctx.mag(eps3)+1)+1 +10
|
| 995 |
+
|
| 996 |
+
# check of power of pi
|
| 997 |
+
|
| 998 |
+
# computing the fortcoef[mu,k,ell]
|
| 999 |
+
fortcoef={}
|
| 1000 |
+
for mu in derivatives:
|
| 1001 |
+
for k in range(0,L):
|
| 1002 |
+
for ell in range(-2,3*k//2+1):
|
| 1003 |
+
fortcoef[mu,k,ell]=0
|
| 1004 |
+
|
| 1005 |
+
for mu in derivatives:
|
| 1006 |
+
for k in range(0,L):
|
| 1007 |
+
ctx.prec = wptcoef[k]
|
| 1008 |
+
for ell in range(0,3*k//2+1):
|
| 1009 |
+
fortcoef[mu,k,ell]=d[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell]
|
| 1010 |
+
fortcoef[mu,k,ell]=fortcoef[mu,k,ell]/((2*ctx.j)**ell)
|
| 1011 |
+
|
| 1012 |
+
def trunc_a(t):
|
| 1013 |
+
wp = ctx.prec
|
| 1014 |
+
ctx.prec = wp + 2
|
| 1015 |
+
aa = ctx.sqrt(t/(2*ctx.pi))
|
| 1016 |
+
ctx.prec = wp
|
| 1017 |
+
return aa
|
| 1018 |
+
|
| 1019 |
+
# computing the tcoef[chi,k,ell]
|
| 1020 |
+
tcoef={}
|
| 1021 |
+
for chi in derivatives:
|
| 1022 |
+
for k in range(0,L):
|
| 1023 |
+
for ell in range(-2,3*k//2+1):
|
| 1024 |
+
tcoef[chi,k,ell]=0
|
| 1025 |
+
ctx.prec = wptcoef[0]+3
|
| 1026 |
+
aa = trunc_a(t)
|
| 1027 |
+
la = -ctx.ln(aa)
|
| 1028 |
+
|
| 1029 |
+
for chi in derivatives:
|
| 1030 |
+
for k in range(0,L):
|
| 1031 |
+
ctx.prec = wptcoef[k]
|
| 1032 |
+
for ell in range(0,3*k//2+1):
|
| 1033 |
+
tcoef[chi,k,ell] = 0
|
| 1034 |
+
for mu in range(0, chi+1):
|
| 1035 |
+
tcoefter = ctx.binomial(chi,mu) * la**mu * \
|
| 1036 |
+
fortcoef[chi-mu,k,ell]
|
| 1037 |
+
tcoef[chi,k,ell] += tcoefter
|
| 1038 |
+
|
| 1039 |
+
# COMPUTING tv[k,ell]
|
| 1040 |
+
# See II Section 3.8
|
| 1041 |
+
|
| 1042 |
+
# Computing the powers av[k] = a**(-k)
|
| 1043 |
+
ctx.prec = wptcoef[0] + 2
|
| 1044 |
+
|
| 1045 |
+
# a has a good value of a.
|
| 1046 |
+
# See II Section 3.6
|
| 1047 |
+
av = {}
|
| 1048 |
+
av[0] = 1
|
| 1049 |
+
av[1] = av[0]/a
|
| 1050 |
+
|
| 1051 |
+
ctx.prec = wptcoef[0]
|
| 1052 |
+
for k in range(2,L):
|
| 1053 |
+
av[k] = av[k-1] * av[1]
|
| 1054 |
+
|
| 1055 |
+
# Computing the quotients
|
| 1056 |
+
tv = {}
|
| 1057 |
+
for chi in derivatives:
|
| 1058 |
+
for k in range(0,L):
|
| 1059 |
+
ctx.prec = wptcoef[k]
|
| 1060 |
+
for ell in range(0,3*k//2+1):
|
| 1061 |
+
tv[chi,k,ell] = tcoef[chi,k,ell]* av[k]
|
| 1062 |
+
|
| 1063 |
+
# COMPUTING THE TERMS term[k]
|
| 1064 |
+
# See II Section 3.6
|
| 1065 |
+
term = {}
|
| 1066 |
+
for chi in derivatives:
|
| 1067 |
+
for n in range(0,L):
|
| 1068 |
+
ctx.prec = wpterm[n]
|
| 1069 |
+
te = 0
|
| 1070 |
+
for k in range(0, 3*n//2+1):
|
| 1071 |
+
te += tv[chi,n,k]
|
| 1072 |
+
term[chi,n] = te
|
| 1073 |
+
|
| 1074 |
+
# COMPUTING rssum
|
| 1075 |
+
# See II Section 3.5
|
| 1076 |
+
rssum={}
|
| 1077 |
+
ctx.prec=15
|
| 1078 |
+
rsbound = math.sqrt(ctx.pi) * c /(b*a)
|
| 1079 |
+
ctx.prec=15
|
| 1080 |
+
wprssum = ctx.mag(4.4*((L+3)**2)*rsbound / eps2)
|
| 1081 |
+
wprssum = max(wprssum, ctx.mag(10*(L+1)))
|
| 1082 |
+
ctx.prec = wprssum
|
| 1083 |
+
for chi in derivatives:
|
| 1084 |
+
rssum[chi] = 0
|
| 1085 |
+
for k in range(1,L+1):
|
| 1086 |
+
rssum[chi] += term[chi,L-k]
|
| 1087 |
+
|
| 1088 |
+
# COMPUTING S3
|
| 1089 |
+
# See II Section 3.19
|
| 1090 |
+
ctx.prec = 15
|
| 1091 |
+
A2 = 2**(ctx.mag(rssum[0]))
|
| 1092 |
+
eps8 = eps/(3* A2)
|
| 1093 |
+
T = t * ctx.ln(t/(2*ctx.pi))
|
| 1094 |
+
wps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-sigma))*T)
|
| 1095 |
+
|
| 1096 |
+
ctx.prec = wps3
|
| 1097 |
+
tpi = t/(2*ctx.pi)
|
| 1098 |
+
arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8
|
| 1099 |
+
U = ctx.expj(-arg)
|
| 1100 |
+
a = trunc_a(t)
|
| 1101 |
+
asigma = ctx.power(a, -sigma)
|
| 1102 |
+
S3 = ((-1)**(N-1)) * asigma * U
|
| 1103 |
+
|
| 1104 |
+
# COMPUTING S1 the zetasum
|
| 1105 |
+
# See II Section 3.18
|
| 1106 |
+
ctx.prec = 15
|
| 1107 |
+
wpsum = 4 + ctx.mag((N+ctx.power(N,1-sigma))*ctx.ln(N)/eps1)
|
| 1108 |
+
|
| 1109 |
+
ctx.prec = wpsum + 10
|
| 1110 |
+
'''
|
| 1111 |
+
# This can be improved
|
| 1112 |
+
S1 = {}
|
| 1113 |
+
for chi in derivatives:
|
| 1114 |
+
S1[chi] = 0
|
| 1115 |
+
for n in range(1,int(N)+1):
|
| 1116 |
+
ln = ctx.ln(n)
|
| 1117 |
+
expn = ctx.exp(-ln*(sigma+ctx.j*t))
|
| 1118 |
+
for chi in derivatives:
|
| 1119 |
+
term = ctx.power(-ln, chi)*expn
|
| 1120 |
+
S1[chi] += term
|
| 1121 |
+
'''
|
| 1122 |
+
S1 = ctx._zetasum(s, 1, int(N)-1, derivatives)[0]
|
| 1123 |
+
|
| 1124 |
+
# END OF COMPUTATION
|
| 1125 |
+
# See II Section 3.1
|
| 1126 |
+
ctx.prec = 15
|
| 1127 |
+
absS1 = abs(S1[der])
|
| 1128 |
+
absS2 = abs(rssum[der] * S3)
|
| 1129 |
+
wpend = max(6, wpinitial + ctx.mag(6*(3*absS1+7*absS2)))
|
| 1130 |
+
ctx.prec = wpend
|
| 1131 |
+
rz = {}
|
| 1132 |
+
for chi in derivatives:
|
| 1133 |
+
rz[chi] = S1[chi]+rssum[chi]*S3
|
| 1134 |
+
ctx.prec = wpinitial
|
| 1135 |
+
return rz
|
| 1136 |
+
|
| 1137 |
+
|
| 1138 |
+
def z_half(ctx,t,der=0):
|
| 1139 |
+
r"""
|
| 1140 |
+
z_half(t,der=0) Computes Z^(der)(t)
|
| 1141 |
+
"""
|
| 1142 |
+
s=ctx.mpf('0.5')+ctx.j*t
|
| 1143 |
+
wpinitial = ctx.prec
|
| 1144 |
+
ctx.prec = 15
|
| 1145 |
+
tt = t/(2*ctx.pi)
|
| 1146 |
+
wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt))
|
| 1147 |
+
wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt))
|
| 1148 |
+
ctx.prec = wptheta
|
| 1149 |
+
theta = ctx.siegeltheta(t)
|
| 1150 |
+
ctx.prec = wpz
|
| 1151 |
+
rz = Rzeta_set(ctx,s, range(der+1))
|
| 1152 |
+
if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2)
|
| 1153 |
+
if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4)
|
| 1154 |
+
if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8)
|
| 1155 |
+
if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16)
|
| 1156 |
+
exptheta = ctx.expj(theta)
|
| 1157 |
+
if der == 0:
|
| 1158 |
+
z = 2*exptheta*rz[0]
|
| 1159 |
+
if der == 1:
|
| 1160 |
+
zf = 2j*exptheta
|
| 1161 |
+
z = zf*(ps1*rz[0]+rz[1])
|
| 1162 |
+
if der == 2:
|
| 1163 |
+
zf = 2 * exptheta
|
| 1164 |
+
z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2)
|
| 1165 |
+
if der == 3:
|
| 1166 |
+
zf = -2j*exptheta
|
| 1167 |
+
z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2]
|
| 1168 |
+
z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3)
|
| 1169 |
+
if der == 4:
|
| 1170 |
+
zf = 2*exptheta
|
| 1171 |
+
z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2]
|
| 1172 |
+
z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2
|
| 1173 |
+
z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4
|
| 1174 |
+
z = zf*z
|
| 1175 |
+
ctx.prec = wpinitial
|
| 1176 |
+
return ctx._re(z)
|
| 1177 |
+
|
| 1178 |
+
def zeta_half(ctx, s, k=0):
|
| 1179 |
+
"""
|
| 1180 |
+
zeta_half(s,k=0) Computes zeta^(k)(s) when Re s = 0.5
|
| 1181 |
+
"""
|
| 1182 |
+
wpinitial = ctx.prec
|
| 1183 |
+
sigma = ctx._re(s)
|
| 1184 |
+
t = ctx._im(s)
|
| 1185 |
+
#--- compute wptheta, wpR, wpbasic ---
|
| 1186 |
+
ctx.prec = 53
|
| 1187 |
+
# X see II Section 3.21 (109) and (110)
|
| 1188 |
+
if sigma > 0:
|
| 1189 |
+
X = ctx.sqrt(abs(s))
|
| 1190 |
+
else:
|
| 1191 |
+
X = (2*ctx.pi)**(sigma-1) * abs(1-s)**(0.5-sigma)
|
| 1192 |
+
# M1 see II Section 3.21 (111) and (112)
|
| 1193 |
+
if sigma > 0:
|
| 1194 |
+
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
|
| 1195 |
+
else:
|
| 1196 |
+
M1 = 4 * t * X
|
| 1197 |
+
# T see II Section 3.21 (113)
|
| 1198 |
+
abst = abs(0.5-s)
|
| 1199 |
+
T = 2* abst*math.log(abst)
|
| 1200 |
+
# computing wpbasic, wptheta, wpR see II Section 3.21
|
| 1201 |
+
wpbasic = max(6,3+ctx.mag(t))
|
| 1202 |
+
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M1*X+1.3*M1*X*T)+wpinitial+1
|
| 1203 |
+
wpbasic = max(wpbasic, wpbasic2)
|
| 1204 |
+
wptheta = max(4, 3+ctx.mag(2.7*M1*X)+wpinitial+1)
|
| 1205 |
+
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
|
| 1206 |
+
ctx.prec = wptheta
|
| 1207 |
+
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
|
| 1208 |
+
if k > 0: ps1 = (ctx._re(ctx.psi(0,s/2)))/2 - ctx.ln(ctx.pi)/2
|
| 1209 |
+
if k > 1: ps2 = -(ctx._im(ctx.psi(1,s/2)))/4
|
| 1210 |
+
if k > 2: ps3 = -(ctx._re(ctx.psi(2,s/2)))/8
|
| 1211 |
+
if k > 3: ps4 = (ctx._im(ctx.psi(3,s/2)))/16
|
| 1212 |
+
ctx.prec = wpR
|
| 1213 |
+
xrz = Rzeta_set(ctx,s,range(k+1))
|
| 1214 |
+
yrz={}
|
| 1215 |
+
for chi in range(0,k+1):
|
| 1216 |
+
yrz[chi] = ctx.conj(xrz[chi])
|
| 1217 |
+
ctx.prec = wpbasic
|
| 1218 |
+
exptheta = ctx.expj(-2*theta)
|
| 1219 |
+
if k==0:
|
| 1220 |
+
zv = xrz[0]+exptheta*yrz[0]
|
| 1221 |
+
if k==1:
|
| 1222 |
+
zv1 = -yrz[1] - 2*yrz[0]*ps1
|
| 1223 |
+
zv = xrz[1] + exptheta*zv1
|
| 1224 |
+
if k==2:
|
| 1225 |
+
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2)+yrz[2]+2j*yrz[0]*ps2
|
| 1226 |
+
zv = xrz[2]+exptheta*zv1
|
| 1227 |
+
if k==3:
|
| 1228 |
+
zv1 = -12*yrz[1]*ps1**2-8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
|
| 1229 |
+
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
|
| 1230 |
+
zv = xrz[3]+exptheta*zv1
|
| 1231 |
+
if k == 4:
|
| 1232 |
+
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
|
| 1233 |
+
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
|
| 1234 |
+
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
|
| 1235 |
+
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
|
| 1236 |
+
zv = xrz[4]+exptheta*zv1
|
| 1237 |
+
ctx.prec = wpinitial
|
| 1238 |
+
return zv
|
| 1239 |
+
|
| 1240 |
+
def zeta_offline(ctx, s, k=0):
|
| 1241 |
+
"""
|
| 1242 |
+
Computes zeta^(k)(s) off the line
|
| 1243 |
+
"""
|
| 1244 |
+
wpinitial = ctx.prec
|
| 1245 |
+
sigma = ctx._re(s)
|
| 1246 |
+
t = ctx._im(s)
|
| 1247 |
+
#--- compute wptheta, wpR, wpbasic ---
|
| 1248 |
+
ctx.prec = 53
|
| 1249 |
+
# X see II Section 3.21 (109) and (110)
|
| 1250 |
+
if sigma > 0:
|
| 1251 |
+
X = ctx.power(abs(s), 0.5)
|
| 1252 |
+
else:
|
| 1253 |
+
X = ctx.power(2*ctx.pi, sigma-1)*ctx.power(abs(1-s),0.5-sigma)
|
| 1254 |
+
# M1 see II Section 3.21 (111) and (112)
|
| 1255 |
+
if (sigma > 0):
|
| 1256 |
+
M1 = 2*ctx.sqrt(t/(2*ctx.pi))
|
| 1257 |
+
else:
|
| 1258 |
+
M1 = 4 * t * X
|
| 1259 |
+
# M2 see II Section 3.21 (111) and (112)
|
| 1260 |
+
if (1-sigma > 0):
|
| 1261 |
+
M2 = 2*ctx.sqrt(t/(2*ctx.pi))
|
| 1262 |
+
else:
|
| 1263 |
+
M2 = 4*t*ctx.power(2*ctx.pi, -sigma)*ctx.power(abs(s),sigma-0.5)
|
| 1264 |
+
# T see II Section 3.21 (113)
|
| 1265 |
+
abst = abs(0.5-s)
|
| 1266 |
+
T = 2* abst*math.log(abst)
|
| 1267 |
+
# computing wpbasic, wptheta, wpR see II Section 3.21
|
| 1268 |
+
wpbasic = max(6,3+ctx.mag(t))
|
| 1269 |
+
wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M2*X+1.3*M2*X*T)+wpinitial+1
|
| 1270 |
+
wpbasic = max(wpbasic, wpbasic2)
|
| 1271 |
+
wptheta = max(4, 3+ctx.mag(2.7*M2*X)+wpinitial+1)
|
| 1272 |
+
wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1
|
| 1273 |
+
ctx.prec = wptheta
|
| 1274 |
+
theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5')))
|
| 1275 |
+
s1 = s
|
| 1276 |
+
s2 = ctx.conj(1-s1)
|
| 1277 |
+
ctx.prec = wpR
|
| 1278 |
+
xrz, yrz = Rzeta_simul(ctx, s, k)
|
| 1279 |
+
if k > 0: ps1 = (ctx.psi(0,s1/2)+ctx.psi(0,(1-s1)/2))/4 - ctx.ln(ctx.pi)/2
|
| 1280 |
+
if k > 1: ps2 = ctx.j*(ctx.psi(1,s1/2)-ctx.psi(1,(1-s1)/2))/8
|
| 1281 |
+
if k > 2: ps3 = -(ctx.psi(2,s1/2)+ctx.psi(2,(1-s1)/2))/16
|
| 1282 |
+
if k > 3: ps4 = -ctx.j*(ctx.psi(3,s1/2)-ctx.psi(3,(1-s1)/2))/32
|
| 1283 |
+
ctx.prec = wpbasic
|
| 1284 |
+
exptheta = ctx.expj(-2*theta)
|
| 1285 |
+
if k == 0:
|
| 1286 |
+
zv = xrz[0]+exptheta*yrz[0]
|
| 1287 |
+
if k == 1:
|
| 1288 |
+
zv1 = -yrz[1]-2*yrz[0]*ps1
|
| 1289 |
+
zv = xrz[1]+exptheta*zv1
|
| 1290 |
+
if k == 2:
|
| 1291 |
+
zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2) +yrz[2]+2j*yrz[0]*ps2
|
| 1292 |
+
zv = xrz[2]+exptheta*zv1
|
| 1293 |
+
if k == 3:
|
| 1294 |
+
zv1 = -12*yrz[1]*ps1**2 -8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2
|
| 1295 |
+
zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3
|
| 1296 |
+
zv = xrz[3]+exptheta*zv1
|
| 1297 |
+
if k == 4:
|
| 1298 |
+
zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2
|
| 1299 |
+
zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2
|
| 1300 |
+
zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3
|
| 1301 |
+
zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4
|
| 1302 |
+
zv = xrz[4]+exptheta*zv1
|
| 1303 |
+
ctx.prec = wpinitial
|
| 1304 |
+
return zv
|
| 1305 |
+
|
| 1306 |
+
def z_offline(ctx, w, k=0):
|
| 1307 |
+
r"""
|
| 1308 |
+
Computes Z(w) and its derivatives off the line
|
| 1309 |
+
"""
|
| 1310 |
+
s = ctx.mpf('0.5')+ctx.j*w
|
| 1311 |
+
s1 = s
|
| 1312 |
+
s2 = ctx.conj(1-s1)
|
| 1313 |
+
wpinitial = ctx.prec
|
| 1314 |
+
ctx.prec = 35
|
| 1315 |
+
# X see II Section 3.21 (109) and (110)
|
| 1316 |
+
# M1 see II Section 3.21 (111) and (112)
|
| 1317 |
+
if (ctx._re(s1) >= 0):
|
| 1318 |
+
M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi))
|
| 1319 |
+
X = ctx.sqrt(abs(s1))
|
| 1320 |
+
else:
|
| 1321 |
+
X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1))
|
| 1322 |
+
M1 = 4 * ctx._im(s1)*X
|
| 1323 |
+
# M2 see II Section 3.21 (111) and (112)
|
| 1324 |
+
if (ctx._re(s2) >= 0):
|
| 1325 |
+
M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi))
|
| 1326 |
+
else:
|
| 1327 |
+
M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2))
|
| 1328 |
+
# T see II Section 3.21 Prop. 27
|
| 1329 |
+
T = 2*abs(ctx.siegeltheta(w))
|
| 1330 |
+
# defining some precisions
|
| 1331 |
+
# see II Section 3.22 (115), (116), (117)
|
| 1332 |
+
aux1 = ctx.sqrt(X)
|
| 1333 |
+
aux2 = aux1*(M1+M2)
|
| 1334 |
+
aux3 = 3 +wpinitial
|
| 1335 |
+
wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3)
|
| 1336 |
+
wptheta = max(4,ctx.mag(2.04*aux2)+aux3)
|
| 1337 |
+
wpR = ctx.mag(4*aux1)+aux3
|
| 1338 |
+
# now the computations
|
| 1339 |
+
ctx.prec = wptheta
|
| 1340 |
+
theta = ctx.siegeltheta(w)
|
| 1341 |
+
ctx.prec = wpR
|
| 1342 |
+
xrz, yrz = Rzeta_simul(ctx,s,k)
|
| 1343 |
+
pta = 0.25 + 0.5j*w
|
| 1344 |
+
ptb = 0.25 - 0.5j*w
|
| 1345 |
+
if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2
|
| 1346 |
+
if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb))
|
| 1347 |
+
if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb))
|
| 1348 |
+
if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb))
|
| 1349 |
+
ctx.prec = wpbasic
|
| 1350 |
+
exptheta = ctx.expj(theta)
|
| 1351 |
+
if k == 0:
|
| 1352 |
+
zv = exptheta*xrz[0]+yrz[0]/exptheta
|
| 1353 |
+
j = ctx.j
|
| 1354 |
+
if k == 1:
|
| 1355 |
+
zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta
|
| 1356 |
+
if k == 2:
|
| 1357 |
+
zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2)
|
| 1358 |
+
zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta
|
| 1359 |
+
if k == 3:
|
| 1360 |
+
zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2
|
| 1361 |
+
zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta
|
| 1362 |
+
zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2
|
| 1363 |
+
zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta
|
| 1364 |
+
zv = zv1+zv2
|
| 1365 |
+
if k == 4:
|
| 1366 |
+
zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2
|
| 1367 |
+
zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2
|
| 1368 |
+
zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3
|
| 1369 |
+
zv1 = zv1+xrz[4]+j*xrz[0]*ps4
|
| 1370 |
+
zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2
|
| 1371 |
+
zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2
|
| 1372 |
+
zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3
|
| 1373 |
+
zv2 = zv2+yrz[4]-j*yrz[0]*ps4
|
| 1374 |
+
zv = exptheta*zv1+zv2/exptheta
|
| 1375 |
+
ctx.prec = wpinitial
|
| 1376 |
+
return zv
|
| 1377 |
+
|
| 1378 |
+
@defun
|
| 1379 |
+
def rs_zeta(ctx, s, derivative=0, **kwargs):
|
| 1380 |
+
if derivative > 4:
|
| 1381 |
+
raise NotImplementedError
|
| 1382 |
+
s = ctx.convert(s)
|
| 1383 |
+
re = ctx._re(s); im = ctx._im(s)
|
| 1384 |
+
if im < 0:
|
| 1385 |
+
z = ctx.conj(ctx.rs_zeta(ctx.conj(s), derivative))
|
| 1386 |
+
return z
|
| 1387 |
+
critical_line = (re == 0.5)
|
| 1388 |
+
if critical_line:
|
| 1389 |
+
return zeta_half(ctx, s, derivative)
|
| 1390 |
+
else:
|
| 1391 |
+
return zeta_offline(ctx, s, derivative)
|
| 1392 |
+
|
| 1393 |
+
@defun
|
| 1394 |
+
def rs_z(ctx, w, derivative=0):
|
| 1395 |
+
w = ctx.convert(w)
|
| 1396 |
+
re = ctx._re(w); im = ctx._im(w)
|
| 1397 |
+
if re < 0:
|
| 1398 |
+
return rs_z(ctx, -w, derivative)
|
| 1399 |
+
critical_line = (im == 0)
|
| 1400 |
+
if critical_line :
|
| 1401 |
+
return z_half(ctx, w, derivative)
|
| 1402 |
+
else:
|
| 1403 |
+
return z_offline(ctx, w, derivative)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/signals.py
ADDED
|
@@ -0,0 +1,32 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from .functions import defun_wrapped
|
| 2 |
+
|
| 3 |
+
@defun_wrapped
|
| 4 |
+
def squarew(ctx, t, amplitude=1, period=1):
|
| 5 |
+
P = period
|
| 6 |
+
A = amplitude
|
| 7 |
+
return A*((-1)**ctx.floor(2*t/P))
|
| 8 |
+
|
| 9 |
+
@defun_wrapped
|
| 10 |
+
def trianglew(ctx, t, amplitude=1, period=1):
|
| 11 |
+
A = amplitude
|
| 12 |
+
P = period
|
| 13 |
+
|
| 14 |
+
return 2*A*(0.5 - ctx.fabs(1 - 2*ctx.frac(t/P + 0.25)))
|
| 15 |
+
|
| 16 |
+
@defun_wrapped
|
| 17 |
+
def sawtoothw(ctx, t, amplitude=1, period=1):
|
| 18 |
+
A = amplitude
|
| 19 |
+
P = period
|
| 20 |
+
return A*ctx.frac(t/P)
|
| 21 |
+
|
| 22 |
+
@defun_wrapped
|
| 23 |
+
def unit_triangle(ctx, t, amplitude=1):
|
| 24 |
+
A = amplitude
|
| 25 |
+
if t <= -1 or t >= 1:
|
| 26 |
+
return ctx.zero
|
| 27 |
+
return A*(-ctx.fabs(t) + 1)
|
| 28 |
+
|
| 29 |
+
@defun_wrapped
|
| 30 |
+
def sigmoid(ctx, t, amplitude=1):
|
| 31 |
+
A = amplitude
|
| 32 |
+
return A / (1 + ctx.exp(-t))
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/functions/zeta.py
ADDED
|
@@ -0,0 +1,1154 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from __future__ import print_function
|
| 2 |
+
|
| 3 |
+
from ..libmp.backend import xrange
|
| 4 |
+
from .functions import defun, defun_wrapped, defun_static
|
| 5 |
+
|
| 6 |
+
@defun
|
| 7 |
+
def stieltjes(ctx, n, a=1):
|
| 8 |
+
n = ctx.convert(n)
|
| 9 |
+
a = ctx.convert(a)
|
| 10 |
+
if n < 0:
|
| 11 |
+
return ctx.bad_domain("Stieltjes constants defined for n >= 0")
|
| 12 |
+
if hasattr(ctx, "stieltjes_cache"):
|
| 13 |
+
stieltjes_cache = ctx.stieltjes_cache
|
| 14 |
+
else:
|
| 15 |
+
stieltjes_cache = ctx.stieltjes_cache = {}
|
| 16 |
+
if a == 1:
|
| 17 |
+
if n == 0:
|
| 18 |
+
return +ctx.euler
|
| 19 |
+
if n in stieltjes_cache:
|
| 20 |
+
prec, s = stieltjes_cache[n]
|
| 21 |
+
if prec >= ctx.prec:
|
| 22 |
+
return +s
|
| 23 |
+
mag = 1
|
| 24 |
+
def f(x):
|
| 25 |
+
xa = x/a
|
| 26 |
+
v = (xa-ctx.j)*ctx.ln(a-ctx.j*x)**n/(1+xa**2)/(ctx.exp(2*ctx.pi*x)-1)
|
| 27 |
+
return ctx._re(v) / mag
|
| 28 |
+
orig = ctx.prec
|
| 29 |
+
try:
|
| 30 |
+
# Normalize integrand by approx. magnitude to
|
| 31 |
+
# speed up quadrature (which uses absolute error)
|
| 32 |
+
if n > 50:
|
| 33 |
+
ctx.prec = 20
|
| 34 |
+
mag = ctx.quad(f, [0,ctx.inf], maxdegree=3)
|
| 35 |
+
ctx.prec = orig + 10 + int(n**0.5)
|
| 36 |
+
s = ctx.quad(f, [0,ctx.inf], maxdegree=20)
|
| 37 |
+
v = ctx.ln(a)**n/(2*a) - ctx.ln(a)**(n+1)/(n+1) + 2*s/a*mag
|
| 38 |
+
finally:
|
| 39 |
+
ctx.prec = orig
|
| 40 |
+
if a == 1 and ctx.isint(n):
|
| 41 |
+
stieltjes_cache[n] = (ctx.prec, v)
|
| 42 |
+
return +v
|
| 43 |
+
|
| 44 |
+
@defun_wrapped
|
| 45 |
+
def siegeltheta(ctx, t, derivative=0):
|
| 46 |
+
d = int(derivative)
|
| 47 |
+
if (t == ctx.inf or t == ctx.ninf):
|
| 48 |
+
if d < 2:
|
| 49 |
+
if t == ctx.ninf and d == 0:
|
| 50 |
+
return ctx.ninf
|
| 51 |
+
return ctx.inf
|
| 52 |
+
else:
|
| 53 |
+
return ctx.zero
|
| 54 |
+
if d == 0:
|
| 55 |
+
if ctx._im(t):
|
| 56 |
+
# XXX: cancellation occurs
|
| 57 |
+
a = ctx.loggamma(0.25+0.5j*t)
|
| 58 |
+
b = ctx.loggamma(0.25-0.5j*t)
|
| 59 |
+
return -ctx.ln(ctx.pi)/2*t - 0.5j*(a-b)
|
| 60 |
+
else:
|
| 61 |
+
if ctx.isinf(t):
|
| 62 |
+
return t
|
| 63 |
+
return ctx._im(ctx.loggamma(0.25+0.5j*t)) - ctx.ln(ctx.pi)/2*t
|
| 64 |
+
if d > 0:
|
| 65 |
+
a = (-0.5j)**(d-1)*ctx.polygamma(d-1, 0.25-0.5j*t)
|
| 66 |
+
b = (0.5j)**(d-1)*ctx.polygamma(d-1, 0.25+0.5j*t)
|
| 67 |
+
if ctx._im(t):
|
| 68 |
+
if d == 1:
|
| 69 |
+
return -0.5*ctx.log(ctx.pi)+0.25*(a+b)
|
| 70 |
+
else:
|
| 71 |
+
return 0.25*(a+b)
|
| 72 |
+
else:
|
| 73 |
+
if d == 1:
|
| 74 |
+
return ctx._re(-0.5*ctx.log(ctx.pi)+0.25*(a+b))
|
| 75 |
+
else:
|
| 76 |
+
return ctx._re(0.25*(a+b))
|
| 77 |
+
|
| 78 |
+
@defun_wrapped
|
| 79 |
+
def grampoint(ctx, n):
|
| 80 |
+
# asymptotic expansion, from
|
| 81 |
+
# http://mathworld.wolfram.com/GramPoint.html
|
| 82 |
+
g = 2*ctx.pi*ctx.exp(1+ctx.lambertw((8*n+1)/(8*ctx.e)))
|
| 83 |
+
return ctx.findroot(lambda t: ctx.siegeltheta(t)-ctx.pi*n, g)
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
@defun_wrapped
|
| 87 |
+
def siegelz(ctx, t, **kwargs):
|
| 88 |
+
d = int(kwargs.get("derivative", 0))
|
| 89 |
+
t = ctx.convert(t)
|
| 90 |
+
t1 = ctx._re(t)
|
| 91 |
+
t2 = ctx._im(t)
|
| 92 |
+
prec = ctx.prec
|
| 93 |
+
try:
|
| 94 |
+
if abs(t1) > 500*prec and t2**2 < t1:
|
| 95 |
+
v = ctx.rs_z(t, d)
|
| 96 |
+
if ctx._is_real_type(t):
|
| 97 |
+
return ctx._re(v)
|
| 98 |
+
return v
|
| 99 |
+
except NotImplementedError:
|
| 100 |
+
pass
|
| 101 |
+
ctx.prec += 21
|
| 102 |
+
e1 = ctx.expj(ctx.siegeltheta(t))
|
| 103 |
+
z = ctx.zeta(0.5+ctx.j*t)
|
| 104 |
+
if d == 0:
|
| 105 |
+
v = e1*z
|
| 106 |
+
ctx.prec=prec
|
| 107 |
+
if ctx._is_real_type(t):
|
| 108 |
+
return ctx._re(v)
|
| 109 |
+
return +v
|
| 110 |
+
z1 = ctx.zeta(0.5+ctx.j*t, derivative=1)
|
| 111 |
+
theta1 = ctx.siegeltheta(t, derivative=1)
|
| 112 |
+
if d == 1:
|
| 113 |
+
v = ctx.j*e1*(z1+z*theta1)
|
| 114 |
+
ctx.prec=prec
|
| 115 |
+
if ctx._is_real_type(t):
|
| 116 |
+
return ctx._re(v)
|
| 117 |
+
return +v
|
| 118 |
+
z2 = ctx.zeta(0.5+ctx.j*t, derivative=2)
|
| 119 |
+
theta2 = ctx.siegeltheta(t, derivative=2)
|
| 120 |
+
comb1 = theta1**2-ctx.j*theta2
|
| 121 |
+
if d == 2:
|
| 122 |
+
def terms():
|
| 123 |
+
return [2*z1*theta1, z2, z*comb1]
|
| 124 |
+
v = ctx.sum_accurately(terms, 1)
|
| 125 |
+
v = -e1*v
|
| 126 |
+
ctx.prec = prec
|
| 127 |
+
if ctx._is_real_type(t):
|
| 128 |
+
return ctx._re(v)
|
| 129 |
+
return +v
|
| 130 |
+
ctx.prec += 10
|
| 131 |
+
z3 = ctx.zeta(0.5+ctx.j*t, derivative=3)
|
| 132 |
+
theta3 = ctx.siegeltheta(t, derivative=3)
|
| 133 |
+
comb2 = theta1**3-3*ctx.j*theta1*theta2-theta3
|
| 134 |
+
if d == 3:
|
| 135 |
+
def terms():
|
| 136 |
+
return [3*theta1*z2, 3*z1*comb1, z3+z*comb2]
|
| 137 |
+
v = ctx.sum_accurately(terms, 1)
|
| 138 |
+
v = -ctx.j*e1*v
|
| 139 |
+
ctx.prec = prec
|
| 140 |
+
if ctx._is_real_type(t):
|
| 141 |
+
return ctx._re(v)
|
| 142 |
+
return +v
|
| 143 |
+
z4 = ctx.zeta(0.5+ctx.j*t, derivative=4)
|
| 144 |
+
theta4 = ctx.siegeltheta(t, derivative=4)
|
| 145 |
+
def terms():
|
| 146 |
+
return [theta1**4, -6*ctx.j*theta1**2*theta2, -3*theta2**2,
|
| 147 |
+
-4*theta1*theta3, ctx.j*theta4]
|
| 148 |
+
comb3 = ctx.sum_accurately(terms, 1)
|
| 149 |
+
if d == 4:
|
| 150 |
+
def terms():
|
| 151 |
+
return [6*theta1**2*z2, -6*ctx.j*z2*theta2, 4*theta1*z3,
|
| 152 |
+
4*z1*comb2, z4, z*comb3]
|
| 153 |
+
v = ctx.sum_accurately(terms, 1)
|
| 154 |
+
v = e1*v
|
| 155 |
+
ctx.prec = prec
|
| 156 |
+
if ctx._is_real_type(t):
|
| 157 |
+
return ctx._re(v)
|
| 158 |
+
return +v
|
| 159 |
+
if d > 4:
|
| 160 |
+
h = lambda x: ctx.siegelz(x, derivative=4)
|
| 161 |
+
return ctx.diff(h, t, n=d-4)
|
| 162 |
+
|
| 163 |
+
|
| 164 |
+
_zeta_zeros = [
|
| 165 |
+
14.134725142,21.022039639,25.010857580,30.424876126,32.935061588,
|
| 166 |
+
37.586178159,40.918719012,43.327073281,48.005150881,49.773832478,
|
| 167 |
+
52.970321478,56.446247697,59.347044003,60.831778525,65.112544048,
|
| 168 |
+
67.079810529,69.546401711,72.067157674,75.704690699,77.144840069,
|
| 169 |
+
79.337375020,82.910380854,84.735492981,87.425274613,88.809111208,
|
| 170 |
+
92.491899271,94.651344041,95.870634228,98.831194218,101.317851006,
|
| 171 |
+
103.725538040,105.446623052,107.168611184,111.029535543,111.874659177,
|
| 172 |
+
114.320220915,116.226680321,118.790782866,121.370125002,122.946829294,
|
| 173 |
+
124.256818554,127.516683880,129.578704200,131.087688531,133.497737203,
|
| 174 |
+
134.756509753,138.116042055,139.736208952,141.123707404,143.111845808,
|
| 175 |
+
146.000982487,147.422765343,150.053520421,150.925257612,153.024693811,
|
| 176 |
+
156.112909294,157.597591818,158.849988171,161.188964138,163.030709687,
|
| 177 |
+
165.537069188,167.184439978,169.094515416,169.911976479,173.411536520,
|
| 178 |
+
174.754191523,176.441434298,178.377407776,179.916484020,182.207078484,
|
| 179 |
+
184.874467848,185.598783678,187.228922584,189.416158656,192.026656361,
|
| 180 |
+
193.079726604,195.265396680,196.876481841,198.015309676,201.264751944,
|
| 181 |
+
202.493594514,204.189671803,205.394697202,207.906258888,209.576509717,
|
| 182 |
+
211.690862595,213.347919360,214.547044783,216.169538508,219.067596349,
|
| 183 |
+
220.714918839,221.430705555,224.007000255,224.983324670,227.421444280,
|
| 184 |
+
229.337413306,231.250188700,231.987235253,233.693404179,236.524229666,
|
| 185 |
+
]
|
| 186 |
+
|
| 187 |
+
def _load_zeta_zeros(url):
|
| 188 |
+
import urllib
|
| 189 |
+
d = urllib.urlopen(url)
|
| 190 |
+
L = [float(x) for x in d.readlines()]
|
| 191 |
+
# Sanity check
|
| 192 |
+
assert round(L[0]) == 14
|
| 193 |
+
_zeta_zeros[:] = L
|
| 194 |
+
|
| 195 |
+
@defun
|
| 196 |
+
def oldzetazero(ctx, n, url='http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros1'):
|
| 197 |
+
n = int(n)
|
| 198 |
+
if n < 0:
|
| 199 |
+
return ctx.zetazero(-n).conjugate()
|
| 200 |
+
if n == 0:
|
| 201 |
+
raise ValueError("n must be nonzero")
|
| 202 |
+
if n > len(_zeta_zeros) and n <= 100000:
|
| 203 |
+
_load_zeta_zeros(url)
|
| 204 |
+
if n > len(_zeta_zeros):
|
| 205 |
+
raise NotImplementedError("n too large for zetazeros")
|
| 206 |
+
return ctx.mpc(0.5, ctx.findroot(ctx.siegelz, _zeta_zeros[n-1]))
|
| 207 |
+
|
| 208 |
+
@defun_wrapped
|
| 209 |
+
def riemannr(ctx, x):
|
| 210 |
+
if x == 0:
|
| 211 |
+
return ctx.zero
|
| 212 |
+
# Check if a simple asymptotic estimate is accurate enough
|
| 213 |
+
if abs(x) > 1000:
|
| 214 |
+
a = ctx.li(x)
|
| 215 |
+
b = 0.5*ctx.li(ctx.sqrt(x))
|
| 216 |
+
if abs(b) < abs(a)*ctx.eps:
|
| 217 |
+
return a
|
| 218 |
+
if abs(x) < 0.01:
|
| 219 |
+
# XXX
|
| 220 |
+
ctx.prec += int(-ctx.log(abs(x),2))
|
| 221 |
+
# Sum Gram's series
|
| 222 |
+
s = t = ctx.one
|
| 223 |
+
u = ctx.ln(x)
|
| 224 |
+
k = 1
|
| 225 |
+
while abs(t) > abs(s)*ctx.eps:
|
| 226 |
+
t = t * u / k
|
| 227 |
+
s += t / (k * ctx._zeta_int(k+1))
|
| 228 |
+
k += 1
|
| 229 |
+
return s
|
| 230 |
+
|
| 231 |
+
@defun_static
|
| 232 |
+
def primepi(ctx, x):
|
| 233 |
+
x = int(x)
|
| 234 |
+
if x < 2:
|
| 235 |
+
return 0
|
| 236 |
+
return len(ctx.list_primes(x))
|
| 237 |
+
|
| 238 |
+
# TODO: fix the interface wrt contexts
|
| 239 |
+
@defun_wrapped
|
| 240 |
+
def primepi2(ctx, x):
|
| 241 |
+
x = int(x)
|
| 242 |
+
if x < 2:
|
| 243 |
+
return ctx._iv.zero
|
| 244 |
+
if x < 2657:
|
| 245 |
+
return ctx._iv.mpf(ctx.primepi(x))
|
| 246 |
+
mid = ctx.li(x)
|
| 247 |
+
# Schoenfeld's estimate for x >= 2657, assuming RH
|
| 248 |
+
err = ctx.sqrt(x,rounding='u')*ctx.ln(x,rounding='u')/8/ctx.pi(rounding='d')
|
| 249 |
+
a = ctx.floor((ctx._iv.mpf(mid)-err).a, rounding='d')
|
| 250 |
+
b = ctx.ceil((ctx._iv.mpf(mid)+err).b, rounding='u')
|
| 251 |
+
return ctx._iv.mpf([a,b])
|
| 252 |
+
|
| 253 |
+
@defun_wrapped
|
| 254 |
+
def primezeta(ctx, s):
|
| 255 |
+
if ctx.isnan(s):
|
| 256 |
+
return s
|
| 257 |
+
if ctx.re(s) <= 0:
|
| 258 |
+
raise ValueError("prime zeta function defined only for re(s) > 0")
|
| 259 |
+
if s == 1:
|
| 260 |
+
return ctx.inf
|
| 261 |
+
if s == 0.5:
|
| 262 |
+
return ctx.mpc(ctx.ninf, ctx.pi)
|
| 263 |
+
r = ctx.re(s)
|
| 264 |
+
if r > ctx.prec:
|
| 265 |
+
return 0.5**s
|
| 266 |
+
else:
|
| 267 |
+
wp = ctx.prec + int(r)
|
| 268 |
+
def terms():
|
| 269 |
+
orig = ctx.prec
|
| 270 |
+
# zeta ~ 1+eps; need to set precision
|
| 271 |
+
# to get logarithm accurately
|
| 272 |
+
k = 0
|
| 273 |
+
while 1:
|
| 274 |
+
k += 1
|
| 275 |
+
u = ctx.moebius(k)
|
| 276 |
+
if not u:
|
| 277 |
+
continue
|
| 278 |
+
ctx.prec = wp
|
| 279 |
+
t = u*ctx.ln(ctx.zeta(k*s))/k
|
| 280 |
+
if not t:
|
| 281 |
+
return
|
| 282 |
+
#print ctx.prec, ctx.nstr(t)
|
| 283 |
+
ctx.prec = orig
|
| 284 |
+
yield t
|
| 285 |
+
return ctx.sum_accurately(terms)
|
| 286 |
+
|
| 287 |
+
# TODO: for bernpoly and eulerpoly, ensure that all exact zeros are covered
|
| 288 |
+
|
| 289 |
+
@defun_wrapped
|
| 290 |
+
def bernpoly(ctx, n, z):
|
| 291 |
+
# Slow implementation:
|
| 292 |
+
#return sum(ctx.binomial(n,k)*ctx.bernoulli(k)*z**(n-k) for k in xrange(0,n+1))
|
| 293 |
+
n = int(n)
|
| 294 |
+
if n < 0:
|
| 295 |
+
raise ValueError("Bernoulli polynomials only defined for n >= 0")
|
| 296 |
+
if z == 0 or (z == 1 and n > 1):
|
| 297 |
+
return ctx.bernoulli(n)
|
| 298 |
+
if z == 0.5:
|
| 299 |
+
return (ctx.ldexp(1,1-n)-1)*ctx.bernoulli(n)
|
| 300 |
+
if n <= 3:
|
| 301 |
+
if n == 0: return z ** 0
|
| 302 |
+
if n == 1: return z - 0.5
|
| 303 |
+
if n == 2: return (6*z*(z-1)+1)/6
|
| 304 |
+
if n == 3: return z*(z*(z-1.5)+0.5)
|
| 305 |
+
if ctx.isinf(z):
|
| 306 |
+
return z ** n
|
| 307 |
+
if ctx.isnan(z):
|
| 308 |
+
return z
|
| 309 |
+
if abs(z) > 2:
|
| 310 |
+
def terms():
|
| 311 |
+
t = ctx.one
|
| 312 |
+
yield t
|
| 313 |
+
r = ctx.one/z
|
| 314 |
+
k = 1
|
| 315 |
+
while k <= n:
|
| 316 |
+
t = t*(n+1-k)/k*r
|
| 317 |
+
if not (k > 2 and k & 1):
|
| 318 |
+
yield t*ctx.bernoulli(k)
|
| 319 |
+
k += 1
|
| 320 |
+
return ctx.sum_accurately(terms) * z**n
|
| 321 |
+
else:
|
| 322 |
+
def terms():
|
| 323 |
+
yield ctx.bernoulli(n)
|
| 324 |
+
t = ctx.one
|
| 325 |
+
k = 1
|
| 326 |
+
while k <= n:
|
| 327 |
+
t = t*(n+1-k)/k * z
|
| 328 |
+
m = n-k
|
| 329 |
+
if not (m > 2 and m & 1):
|
| 330 |
+
yield t*ctx.bernoulli(m)
|
| 331 |
+
k += 1
|
| 332 |
+
return ctx.sum_accurately(terms)
|
| 333 |
+
|
| 334 |
+
@defun_wrapped
|
| 335 |
+
def eulerpoly(ctx, n, z):
|
| 336 |
+
n = int(n)
|
| 337 |
+
if n < 0:
|
| 338 |
+
raise ValueError("Euler polynomials only defined for n >= 0")
|
| 339 |
+
if n <= 2:
|
| 340 |
+
if n == 0: return z ** 0
|
| 341 |
+
if n == 1: return z - 0.5
|
| 342 |
+
if n == 2: return z*(z-1)
|
| 343 |
+
if ctx.isinf(z):
|
| 344 |
+
return z**n
|
| 345 |
+
if ctx.isnan(z):
|
| 346 |
+
return z
|
| 347 |
+
m = n+1
|
| 348 |
+
if z == 0:
|
| 349 |
+
return -2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0
|
| 350 |
+
if z == 1:
|
| 351 |
+
return 2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0
|
| 352 |
+
if z == 0.5:
|
| 353 |
+
if n % 2:
|
| 354 |
+
return ctx.zero
|
| 355 |
+
# Use exact code for Euler numbers
|
| 356 |
+
if n < 100 or n*ctx.mag(0.46839865*n) < ctx.prec*0.25:
|
| 357 |
+
return ctx.ldexp(ctx._eulernum(n), -n)
|
| 358 |
+
# http://functions.wolfram.com/Polynomials/EulerE2/06/01/02/01/0002/
|
| 359 |
+
def terms():
|
| 360 |
+
t = ctx.one
|
| 361 |
+
k = 0
|
| 362 |
+
w = ctx.ldexp(1,n+2)
|
| 363 |
+
while 1:
|
| 364 |
+
v = n-k+1
|
| 365 |
+
if not (v > 2 and v & 1):
|
| 366 |
+
yield (2-w)*ctx.bernoulli(v)*t
|
| 367 |
+
k += 1
|
| 368 |
+
if k > n:
|
| 369 |
+
break
|
| 370 |
+
t = t*z*(n-k+2)/k
|
| 371 |
+
w *= 0.5
|
| 372 |
+
return ctx.sum_accurately(terms) / m
|
| 373 |
+
|
| 374 |
+
@defun
|
| 375 |
+
def eulernum(ctx, n, exact=False):
|
| 376 |
+
n = int(n)
|
| 377 |
+
if exact:
|
| 378 |
+
return int(ctx._eulernum(n))
|
| 379 |
+
if n < 100:
|
| 380 |
+
return ctx.mpf(ctx._eulernum(n))
|
| 381 |
+
if n % 2:
|
| 382 |
+
return ctx.zero
|
| 383 |
+
return ctx.ldexp(ctx.eulerpoly(n,0.5), n)
|
| 384 |
+
|
| 385 |
+
# TODO: this should be implemented low-level
|
| 386 |
+
def polylog_series(ctx, s, z):
|
| 387 |
+
tol = +ctx.eps
|
| 388 |
+
l = ctx.zero
|
| 389 |
+
k = 1
|
| 390 |
+
zk = z
|
| 391 |
+
while 1:
|
| 392 |
+
term = zk / k**s
|
| 393 |
+
l += term
|
| 394 |
+
if abs(term) < tol:
|
| 395 |
+
break
|
| 396 |
+
zk *= z
|
| 397 |
+
k += 1
|
| 398 |
+
return l
|
| 399 |
+
|
| 400 |
+
def polylog_continuation(ctx, n, z):
|
| 401 |
+
if n < 0:
|
| 402 |
+
return z*0
|
| 403 |
+
twopij = 2j * ctx.pi
|
| 404 |
+
a = -twopij**n/ctx.fac(n) * ctx.bernpoly(n, ctx.ln(z)/twopij)
|
| 405 |
+
if ctx._is_real_type(z) and z < 0:
|
| 406 |
+
a = ctx._re(a)
|
| 407 |
+
if ctx._im(z) < 0 or (ctx._im(z) == 0 and ctx._re(z) >= 1):
|
| 408 |
+
a -= twopij*ctx.ln(z)**(n-1)/ctx.fac(n-1)
|
| 409 |
+
return a
|
| 410 |
+
|
| 411 |
+
def polylog_unitcircle(ctx, n, z):
|
| 412 |
+
tol = +ctx.eps
|
| 413 |
+
if n > 1:
|
| 414 |
+
l = ctx.zero
|
| 415 |
+
logz = ctx.ln(z)
|
| 416 |
+
logmz = ctx.one
|
| 417 |
+
m = 0
|
| 418 |
+
while 1:
|
| 419 |
+
if (n-m) != 1:
|
| 420 |
+
term = ctx.zeta(n-m) * logmz / ctx.fac(m)
|
| 421 |
+
if term and abs(term) < tol:
|
| 422 |
+
break
|
| 423 |
+
l += term
|
| 424 |
+
logmz *= logz
|
| 425 |
+
m += 1
|
| 426 |
+
l += ctx.ln(z)**(n-1)/ctx.fac(n-1)*(ctx.harmonic(n-1)-ctx.ln(-ctx.ln(z)))
|
| 427 |
+
elif n < 1: # else
|
| 428 |
+
l = ctx.fac(-n)*(-ctx.ln(z))**(n-1)
|
| 429 |
+
logz = ctx.ln(z)
|
| 430 |
+
logkz = ctx.one
|
| 431 |
+
k = 0
|
| 432 |
+
while 1:
|
| 433 |
+
b = ctx.bernoulli(k-n+1)
|
| 434 |
+
if b:
|
| 435 |
+
term = b*logkz/(ctx.fac(k)*(k-n+1))
|
| 436 |
+
if abs(term) < tol:
|
| 437 |
+
break
|
| 438 |
+
l -= term
|
| 439 |
+
logkz *= logz
|
| 440 |
+
k += 1
|
| 441 |
+
else:
|
| 442 |
+
raise ValueError
|
| 443 |
+
if ctx._is_real_type(z) and z < 0:
|
| 444 |
+
l = ctx._re(l)
|
| 445 |
+
return l
|
| 446 |
+
|
| 447 |
+
def polylog_general(ctx, s, z):
|
| 448 |
+
v = ctx.zero
|
| 449 |
+
u = ctx.ln(z)
|
| 450 |
+
if not abs(u) < 5: # theoretically |u| < 2*pi
|
| 451 |
+
j = ctx.j
|
| 452 |
+
v = 1-s
|
| 453 |
+
y = ctx.ln(-z)/(2*ctx.pi*j)
|
| 454 |
+
return ctx.gamma(v)*(j**v*ctx.zeta(v,0.5+y) + j**-v*ctx.zeta(v,0.5-y))/(2*ctx.pi)**v
|
| 455 |
+
t = 1
|
| 456 |
+
k = 0
|
| 457 |
+
while 1:
|
| 458 |
+
term = ctx.zeta(s-k) * t
|
| 459 |
+
if abs(term) < ctx.eps:
|
| 460 |
+
break
|
| 461 |
+
v += term
|
| 462 |
+
k += 1
|
| 463 |
+
t *= u
|
| 464 |
+
t /= k
|
| 465 |
+
return ctx.gamma(1-s)*(-u)**(s-1) + v
|
| 466 |
+
|
| 467 |
+
@defun_wrapped
|
| 468 |
+
def polylog(ctx, s, z):
|
| 469 |
+
s = ctx.convert(s)
|
| 470 |
+
z = ctx.convert(z)
|
| 471 |
+
if z == 1:
|
| 472 |
+
return ctx.zeta(s)
|
| 473 |
+
if z == -1:
|
| 474 |
+
return -ctx.altzeta(s)
|
| 475 |
+
if s == 0:
|
| 476 |
+
return z/(1-z)
|
| 477 |
+
if s == 1:
|
| 478 |
+
return -ctx.ln(1-z)
|
| 479 |
+
if s == -1:
|
| 480 |
+
return z/(1-z)**2
|
| 481 |
+
if abs(z) <= 0.75 or (not ctx.isint(s) and abs(z) < 0.9):
|
| 482 |
+
return polylog_series(ctx, s, z)
|
| 483 |
+
if abs(z) >= 1.4 and ctx.isint(s):
|
| 484 |
+
return (-1)**(s+1)*polylog_series(ctx, s, 1/z) + polylog_continuation(ctx, int(ctx.re(s)), z)
|
| 485 |
+
if ctx.isint(s):
|
| 486 |
+
return polylog_unitcircle(ctx, int(ctx.re(s)), z)
|
| 487 |
+
return polylog_general(ctx, s, z)
|
| 488 |
+
|
| 489 |
+
@defun_wrapped
|
| 490 |
+
def clsin(ctx, s, z, pi=False):
|
| 491 |
+
if ctx.isint(s) and s < 0 and int(s) % 2 == 1:
|
| 492 |
+
return z*0
|
| 493 |
+
if pi:
|
| 494 |
+
a = ctx.expjpi(z)
|
| 495 |
+
else:
|
| 496 |
+
a = ctx.expj(z)
|
| 497 |
+
if ctx._is_real_type(z) and ctx._is_real_type(s):
|
| 498 |
+
return ctx.im(ctx.polylog(s,a))
|
| 499 |
+
b = 1/a
|
| 500 |
+
return (-0.5j)*(ctx.polylog(s,a) - ctx.polylog(s,b))
|
| 501 |
+
|
| 502 |
+
@defun_wrapped
|
| 503 |
+
def clcos(ctx, s, z, pi=False):
|
| 504 |
+
if ctx.isint(s) and s < 0 and int(s) % 2 == 0:
|
| 505 |
+
return z*0
|
| 506 |
+
if pi:
|
| 507 |
+
a = ctx.expjpi(z)
|
| 508 |
+
else:
|
| 509 |
+
a = ctx.expj(z)
|
| 510 |
+
if ctx._is_real_type(z) and ctx._is_real_type(s):
|
| 511 |
+
return ctx.re(ctx.polylog(s,a))
|
| 512 |
+
b = 1/a
|
| 513 |
+
return 0.5*(ctx.polylog(s,a) + ctx.polylog(s,b))
|
| 514 |
+
|
| 515 |
+
@defun
|
| 516 |
+
def altzeta(ctx, s, **kwargs):
|
| 517 |
+
try:
|
| 518 |
+
return ctx._altzeta(s, **kwargs)
|
| 519 |
+
except NotImplementedError:
|
| 520 |
+
return ctx._altzeta_generic(s)
|
| 521 |
+
|
| 522 |
+
@defun_wrapped
|
| 523 |
+
def _altzeta_generic(ctx, s):
|
| 524 |
+
if s == 1:
|
| 525 |
+
return ctx.ln2 + 0*s
|
| 526 |
+
return -ctx.powm1(2, 1-s) * ctx.zeta(s)
|
| 527 |
+
|
| 528 |
+
@defun
|
| 529 |
+
def zeta(ctx, s, a=1, derivative=0, method=None, **kwargs):
|
| 530 |
+
d = int(derivative)
|
| 531 |
+
if a == 1 and not (d or method):
|
| 532 |
+
try:
|
| 533 |
+
return ctx._zeta(s, **kwargs)
|
| 534 |
+
except NotImplementedError:
|
| 535 |
+
pass
|
| 536 |
+
s = ctx.convert(s)
|
| 537 |
+
prec = ctx.prec
|
| 538 |
+
method = kwargs.get('method')
|
| 539 |
+
verbose = kwargs.get('verbose')
|
| 540 |
+
if (not s) and (not derivative):
|
| 541 |
+
return ctx.mpf(0.5) - ctx._convert_param(a)[0]
|
| 542 |
+
if a == 1 and method != 'euler-maclaurin':
|
| 543 |
+
im = abs(ctx._im(s))
|
| 544 |
+
re = abs(ctx._re(s))
|
| 545 |
+
#if (im < prec or method == 'borwein') and not derivative:
|
| 546 |
+
# try:
|
| 547 |
+
# if verbose:
|
| 548 |
+
# print "zeta: Attempting to use the Borwein algorithm"
|
| 549 |
+
# return ctx._zeta(s, **kwargs)
|
| 550 |
+
# except NotImplementedError:
|
| 551 |
+
# if verbose:
|
| 552 |
+
# print "zeta: Could not use the Borwein algorithm"
|
| 553 |
+
# pass
|
| 554 |
+
if abs(im) > 500*prec and 10*re < prec and derivative <= 4 or \
|
| 555 |
+
method == 'riemann-siegel':
|
| 556 |
+
try: # py2.4 compatible try block
|
| 557 |
+
try:
|
| 558 |
+
if verbose:
|
| 559 |
+
print("zeta: Attempting to use the Riemann-Siegel algorithm")
|
| 560 |
+
return ctx.rs_zeta(s, derivative, **kwargs)
|
| 561 |
+
except NotImplementedError:
|
| 562 |
+
if verbose:
|
| 563 |
+
print("zeta: Could not use the Riemann-Siegel algorithm")
|
| 564 |
+
pass
|
| 565 |
+
finally:
|
| 566 |
+
ctx.prec = prec
|
| 567 |
+
if s == 1:
|
| 568 |
+
return ctx.inf
|
| 569 |
+
abss = abs(s)
|
| 570 |
+
if abss == ctx.inf:
|
| 571 |
+
if ctx.re(s) == ctx.inf:
|
| 572 |
+
if d == 0:
|
| 573 |
+
return ctx.one
|
| 574 |
+
return ctx.zero
|
| 575 |
+
return s*0
|
| 576 |
+
elif ctx.isnan(abss):
|
| 577 |
+
return 1/s
|
| 578 |
+
if ctx.re(s) > 2*ctx.prec and a == 1 and not derivative:
|
| 579 |
+
return ctx.one + ctx.power(2, -s)
|
| 580 |
+
return +ctx._hurwitz(s, a, d, **kwargs)
|
| 581 |
+
|
| 582 |
+
@defun
|
| 583 |
+
def _hurwitz(ctx, s, a=1, d=0, **kwargs):
|
| 584 |
+
prec = ctx.prec
|
| 585 |
+
verbose = kwargs.get('verbose')
|
| 586 |
+
try:
|
| 587 |
+
extraprec = 10
|
| 588 |
+
ctx.prec += extraprec
|
| 589 |
+
# We strongly want to special-case rational a
|
| 590 |
+
a, atype = ctx._convert_param(a)
|
| 591 |
+
if ctx.re(s) < 0:
|
| 592 |
+
if verbose:
|
| 593 |
+
print("zeta: Attempting reflection formula")
|
| 594 |
+
try:
|
| 595 |
+
return _hurwitz_reflection(ctx, s, a, d, atype)
|
| 596 |
+
except NotImplementedError:
|
| 597 |
+
pass
|
| 598 |
+
if verbose:
|
| 599 |
+
print("zeta: Reflection formula failed")
|
| 600 |
+
if verbose:
|
| 601 |
+
print("zeta: Using the Euler-Maclaurin algorithm")
|
| 602 |
+
while 1:
|
| 603 |
+
ctx.prec = prec + extraprec
|
| 604 |
+
T1, T2 = _hurwitz_em(ctx, s, a, d, prec+10, verbose)
|
| 605 |
+
cancellation = ctx.mag(T1) - ctx.mag(T1+T2)
|
| 606 |
+
if verbose:
|
| 607 |
+
print("Term 1:", T1)
|
| 608 |
+
print("Term 2:", T2)
|
| 609 |
+
print("Cancellation:", cancellation, "bits")
|
| 610 |
+
if cancellation < extraprec:
|
| 611 |
+
return T1 + T2
|
| 612 |
+
else:
|
| 613 |
+
extraprec = max(2*extraprec, min(cancellation + 5, 100*prec))
|
| 614 |
+
if extraprec > kwargs.get('maxprec', 100*prec):
|
| 615 |
+
raise ctx.NoConvergence("zeta: too much cancellation")
|
| 616 |
+
finally:
|
| 617 |
+
ctx.prec = prec
|
| 618 |
+
|
| 619 |
+
def _hurwitz_reflection(ctx, s, a, d, atype):
|
| 620 |
+
# TODO: implement for derivatives
|
| 621 |
+
if d != 0:
|
| 622 |
+
raise NotImplementedError
|
| 623 |
+
res = ctx.re(s)
|
| 624 |
+
negs = -s
|
| 625 |
+
# Integer reflection formula
|
| 626 |
+
if ctx.isnpint(s):
|
| 627 |
+
n = int(res)
|
| 628 |
+
if n <= 0:
|
| 629 |
+
return ctx.bernpoly(1-n, a) / (n-1)
|
| 630 |
+
if not (atype == 'Q' or atype == 'Z'):
|
| 631 |
+
raise NotImplementedError
|
| 632 |
+
t = 1-s
|
| 633 |
+
# We now require a to be standardized
|
| 634 |
+
v = 0
|
| 635 |
+
shift = 0
|
| 636 |
+
b = a
|
| 637 |
+
while ctx.re(b) > 1:
|
| 638 |
+
b -= 1
|
| 639 |
+
v -= b**negs
|
| 640 |
+
shift -= 1
|
| 641 |
+
while ctx.re(b) <= 0:
|
| 642 |
+
v += b**negs
|
| 643 |
+
b += 1
|
| 644 |
+
shift += 1
|
| 645 |
+
# Rational reflection formula
|
| 646 |
+
try:
|
| 647 |
+
p, q = a._mpq_
|
| 648 |
+
except:
|
| 649 |
+
assert a == int(a)
|
| 650 |
+
p = int(a)
|
| 651 |
+
q = 1
|
| 652 |
+
p += shift*q
|
| 653 |
+
assert 1 <= p <= q
|
| 654 |
+
g = ctx.fsum(ctx.cospi(t/2-2*k*b)*ctx._hurwitz(t,(k,q)) \
|
| 655 |
+
for k in range(1,q+1))
|
| 656 |
+
g *= 2*ctx.gamma(t)/(2*ctx.pi*q)**t
|
| 657 |
+
v += g
|
| 658 |
+
return v
|
| 659 |
+
|
| 660 |
+
def _hurwitz_em(ctx, s, a, d, prec, verbose):
|
| 661 |
+
# May not be converted at this point
|
| 662 |
+
a = ctx.convert(a)
|
| 663 |
+
tol = -prec
|
| 664 |
+
# Estimate number of terms for Euler-Maclaurin summation; could be improved
|
| 665 |
+
M1 = 0
|
| 666 |
+
M2 = prec // 3
|
| 667 |
+
N = M2
|
| 668 |
+
lsum = 0
|
| 669 |
+
# This speeds up the recurrence for derivatives
|
| 670 |
+
if ctx.isint(s):
|
| 671 |
+
s = int(ctx._re(s))
|
| 672 |
+
s1 = s-1
|
| 673 |
+
while 1:
|
| 674 |
+
# Truncated L-series
|
| 675 |
+
l = ctx._zetasum(s, M1+a, M2-M1-1, [d])[0][0]
|
| 676 |
+
#if d:
|
| 677 |
+
# l = ctx.fsum((-ctx.ln(n+a))**d * (n+a)**negs for n in range(M1,M2))
|
| 678 |
+
#else:
|
| 679 |
+
# l = ctx.fsum((n+a)**negs for n in range(M1,M2))
|
| 680 |
+
lsum += l
|
| 681 |
+
M2a = M2+a
|
| 682 |
+
logM2a = ctx.ln(M2a)
|
| 683 |
+
logM2ad = logM2a**d
|
| 684 |
+
logs = [logM2ad]
|
| 685 |
+
logr = 1/logM2a
|
| 686 |
+
rM2a = 1/M2a
|
| 687 |
+
M2as = M2a**(-s)
|
| 688 |
+
if d:
|
| 689 |
+
tailsum = ctx.gammainc(d+1, s1*logM2a) / s1**(d+1)
|
| 690 |
+
else:
|
| 691 |
+
tailsum = 1/((s1)*(M2a)**s1)
|
| 692 |
+
tailsum += 0.5 * logM2ad * M2as
|
| 693 |
+
U = [1]
|
| 694 |
+
r = M2as
|
| 695 |
+
fact = 2
|
| 696 |
+
for j in range(1, N+1):
|
| 697 |
+
# TODO: the following could perhaps be tidied a bit
|
| 698 |
+
j2 = 2*j
|
| 699 |
+
if j == 1:
|
| 700 |
+
upds = [1]
|
| 701 |
+
else:
|
| 702 |
+
upds = [j2-2, j2-1]
|
| 703 |
+
for m in upds:
|
| 704 |
+
D = min(m,d+1)
|
| 705 |
+
if m <= d:
|
| 706 |
+
logs.append(logs[-1] * logr)
|
| 707 |
+
Un = [0]*(D+1)
|
| 708 |
+
for i in xrange(D): Un[i] = (1-m-s)*U[i]
|
| 709 |
+
for i in xrange(1,D+1): Un[i] += (d-(i-1))*U[i-1]
|
| 710 |
+
U = Un
|
| 711 |
+
r *= rM2a
|
| 712 |
+
t = ctx.fdot(U, logs) * r * ctx.bernoulli(j2)/(-fact)
|
| 713 |
+
tailsum += t
|
| 714 |
+
if ctx.mag(t) < tol:
|
| 715 |
+
return lsum, (-1)**d * tailsum
|
| 716 |
+
fact *= (j2+1)*(j2+2)
|
| 717 |
+
if verbose:
|
| 718 |
+
print("Sum range:", M1, M2, "term magnitude", ctx.mag(t), "tolerance", tol)
|
| 719 |
+
M1, M2 = M2, M2*2
|
| 720 |
+
if ctx.re(s) < 0:
|
| 721 |
+
N += N//2
|
| 722 |
+
|
| 723 |
+
|
| 724 |
+
|
| 725 |
+
@defun
|
| 726 |
+
def _zetasum(ctx, s, a, n, derivatives=[0], reflect=False):
|
| 727 |
+
"""
|
| 728 |
+
Returns [xd0,xd1,...,xdr], [yd0,yd1,...ydr] where
|
| 729 |
+
|
| 730 |
+
xdk = D^k ( 1/a^s + 1/(a+1)^s + ... + 1/(a+n)^s )
|
| 731 |
+
ydk = D^k conj( 1/a^(1-s) + 1/(a+1)^(1-s) + ... + 1/(a+n)^(1-s) )
|
| 732 |
+
|
| 733 |
+
D^k = kth derivative with respect to s, k ranges over the given list of
|
| 734 |
+
derivatives (which should consist of either a single element
|
| 735 |
+
or a range 0,1,...r). If reflect=False, the ydks are not computed.
|
| 736 |
+
"""
|
| 737 |
+
#print "zetasum", s, a, n
|
| 738 |
+
# don't use the fixed-point code if there are large exponentials
|
| 739 |
+
if abs(ctx.re(s)) < 0.5 * ctx.prec:
|
| 740 |
+
try:
|
| 741 |
+
return ctx._zetasum_fast(s, a, n, derivatives, reflect)
|
| 742 |
+
except NotImplementedError:
|
| 743 |
+
pass
|
| 744 |
+
negs = ctx.fneg(s, exact=True)
|
| 745 |
+
have_derivatives = derivatives != [0]
|
| 746 |
+
have_one_derivative = len(derivatives) == 1
|
| 747 |
+
if not reflect:
|
| 748 |
+
if not have_derivatives:
|
| 749 |
+
return [ctx.fsum((a+k)**negs for k in xrange(n+1))], []
|
| 750 |
+
if have_one_derivative:
|
| 751 |
+
d = derivatives[0]
|
| 752 |
+
x = ctx.fsum(ctx.ln(a+k)**d * (a+k)**negs for k in xrange(n+1))
|
| 753 |
+
return [(-1)**d * x], []
|
| 754 |
+
maxd = max(derivatives)
|
| 755 |
+
if not have_one_derivative:
|
| 756 |
+
derivatives = range(maxd+1)
|
| 757 |
+
xs = [ctx.zero for d in derivatives]
|
| 758 |
+
if reflect:
|
| 759 |
+
ys = [ctx.zero for d in derivatives]
|
| 760 |
+
else:
|
| 761 |
+
ys = []
|
| 762 |
+
for k in xrange(n+1):
|
| 763 |
+
w = a + k
|
| 764 |
+
xterm = w ** negs
|
| 765 |
+
if reflect:
|
| 766 |
+
yterm = ctx.conj(ctx.one / (w * xterm))
|
| 767 |
+
if have_derivatives:
|
| 768 |
+
logw = -ctx.ln(w)
|
| 769 |
+
if have_one_derivative:
|
| 770 |
+
logw = logw ** maxd
|
| 771 |
+
xs[0] += xterm * logw
|
| 772 |
+
if reflect:
|
| 773 |
+
ys[0] += yterm * logw
|
| 774 |
+
else:
|
| 775 |
+
t = ctx.one
|
| 776 |
+
for d in derivatives:
|
| 777 |
+
xs[d] += xterm * t
|
| 778 |
+
if reflect:
|
| 779 |
+
ys[d] += yterm * t
|
| 780 |
+
t *= logw
|
| 781 |
+
else:
|
| 782 |
+
xs[0] += xterm
|
| 783 |
+
if reflect:
|
| 784 |
+
ys[0] += yterm
|
| 785 |
+
return xs, ys
|
| 786 |
+
|
| 787 |
+
@defun
|
| 788 |
+
def dirichlet(ctx, s, chi=[1], derivative=0):
|
| 789 |
+
s = ctx.convert(s)
|
| 790 |
+
q = len(chi)
|
| 791 |
+
d = int(derivative)
|
| 792 |
+
if d > 2:
|
| 793 |
+
raise NotImplementedError("arbitrary order derivatives")
|
| 794 |
+
prec = ctx.prec
|
| 795 |
+
try:
|
| 796 |
+
ctx.prec += 10
|
| 797 |
+
if s == 1:
|
| 798 |
+
have_pole = True
|
| 799 |
+
for x in chi:
|
| 800 |
+
if x and x != 1:
|
| 801 |
+
have_pole = False
|
| 802 |
+
h = +ctx.eps
|
| 803 |
+
ctx.prec *= 2*(d+1)
|
| 804 |
+
s += h
|
| 805 |
+
if have_pole:
|
| 806 |
+
return +ctx.inf
|
| 807 |
+
z = ctx.zero
|
| 808 |
+
for p in range(1,q+1):
|
| 809 |
+
if chi[p%q]:
|
| 810 |
+
if d == 1:
|
| 811 |
+
z += chi[p%q] * (ctx.zeta(s, (p,q), 1) - \
|
| 812 |
+
ctx.zeta(s, (p,q))*ctx.log(q))
|
| 813 |
+
else:
|
| 814 |
+
z += chi[p%q] * ctx.zeta(s, (p,q))
|
| 815 |
+
z /= q**s
|
| 816 |
+
finally:
|
| 817 |
+
ctx.prec = prec
|
| 818 |
+
return +z
|
| 819 |
+
|
| 820 |
+
|
| 821 |
+
def secondzeta_main_term(ctx, s, a, **kwargs):
|
| 822 |
+
tol = ctx.eps
|
| 823 |
+
f = lambda n: ctx.gammainc(0.5*s, a*gamm**2, regularized=True)*gamm**(-s)
|
| 824 |
+
totsum = term = ctx.zero
|
| 825 |
+
mg = ctx.inf
|
| 826 |
+
n = 0
|
| 827 |
+
while mg > tol:
|
| 828 |
+
totsum += term
|
| 829 |
+
n += 1
|
| 830 |
+
gamm = ctx.im(ctx.zetazero_memoized(n))
|
| 831 |
+
term = f(n)
|
| 832 |
+
mg = abs(term)
|
| 833 |
+
err = 0
|
| 834 |
+
if kwargs.get("error"):
|
| 835 |
+
sg = ctx.re(s)
|
| 836 |
+
err = 0.5*ctx.pi**(-1)*max(1,sg)*a**(sg-0.5)*ctx.log(gamm/(2*ctx.pi))*\
|
| 837 |
+
ctx.gammainc(-0.5, a*gamm**2)/abs(ctx.gamma(s/2))
|
| 838 |
+
err = abs(err)
|
| 839 |
+
return +totsum, err, n
|
| 840 |
+
|
| 841 |
+
def secondzeta_prime_term(ctx, s, a, **kwargs):
|
| 842 |
+
tol = ctx.eps
|
| 843 |
+
f = lambda n: ctx.gammainc(0.5*(1-s),0.25*ctx.log(n)**2 * a**(-1))*\
|
| 844 |
+
((0.5*ctx.log(n))**(s-1))*ctx.mangoldt(n)/ctx.sqrt(n)/\
|
| 845 |
+
(2*ctx.gamma(0.5*s)*ctx.sqrt(ctx.pi))
|
| 846 |
+
totsum = term = ctx.zero
|
| 847 |
+
mg = ctx.inf
|
| 848 |
+
n = 1
|
| 849 |
+
while mg > tol or n < 9:
|
| 850 |
+
totsum += term
|
| 851 |
+
n += 1
|
| 852 |
+
term = f(n)
|
| 853 |
+
if term == 0:
|
| 854 |
+
mg = ctx.inf
|
| 855 |
+
else:
|
| 856 |
+
mg = abs(term)
|
| 857 |
+
if kwargs.get("error"):
|
| 858 |
+
err = mg
|
| 859 |
+
return +totsum, err, n
|
| 860 |
+
|
| 861 |
+
def secondzeta_exp_term(ctx, s, a):
|
| 862 |
+
if ctx.isint(s) and ctx.re(s) <= 0:
|
| 863 |
+
m = int(round(ctx.re(s)))
|
| 864 |
+
if not m & 1:
|
| 865 |
+
return ctx.mpf('-0.25')**(-m//2)
|
| 866 |
+
tol = ctx.eps
|
| 867 |
+
f = lambda n: (0.25*a)**n/((n+0.5*s)*ctx.fac(n))
|
| 868 |
+
totsum = ctx.zero
|
| 869 |
+
term = f(0)
|
| 870 |
+
mg = ctx.inf
|
| 871 |
+
n = 0
|
| 872 |
+
while mg > tol:
|
| 873 |
+
totsum += term
|
| 874 |
+
n += 1
|
| 875 |
+
term = f(n)
|
| 876 |
+
mg = abs(term)
|
| 877 |
+
v = a**(0.5*s)*totsum/ctx.gamma(0.5*s)
|
| 878 |
+
return v
|
| 879 |
+
|
| 880 |
+
def secondzeta_singular_term(ctx, s, a, **kwargs):
|
| 881 |
+
factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s))
|
| 882 |
+
extraprec = ctx.mag(factor)
|
| 883 |
+
ctx.prec += extraprec
|
| 884 |
+
factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s))
|
| 885 |
+
tol = ctx.eps
|
| 886 |
+
f = lambda n: ctx.bernpoly(n,0.75)*(4*ctx.sqrt(a))**n*\
|
| 887 |
+
ctx.gamma(0.5*n)/((s+n-1)*ctx.fac(n))
|
| 888 |
+
totsum = ctx.zero
|
| 889 |
+
mg1 = ctx.inf
|
| 890 |
+
n = 1
|
| 891 |
+
term = f(n)
|
| 892 |
+
mg2 = abs(term)
|
| 893 |
+
while mg2 > tol and mg2 <= mg1:
|
| 894 |
+
totsum += term
|
| 895 |
+
n += 1
|
| 896 |
+
term = f(n)
|
| 897 |
+
totsum += term
|
| 898 |
+
n +=1
|
| 899 |
+
term = f(n)
|
| 900 |
+
mg1 = mg2
|
| 901 |
+
mg2 = abs(term)
|
| 902 |
+
totsum += term
|
| 903 |
+
pole = -2*(s-1)**(-2)+(ctx.euler+ctx.log(16*ctx.pi**2*a))*(s-1)**(-1)
|
| 904 |
+
st = factor*(pole+totsum)
|
| 905 |
+
err = 0
|
| 906 |
+
if kwargs.get("error"):
|
| 907 |
+
if not ((mg2 > tol) and (mg2 <= mg1)):
|
| 908 |
+
if mg2 <= tol:
|
| 909 |
+
err = ctx.mpf(10)**int(ctx.log(abs(factor*tol),10))
|
| 910 |
+
if mg2 > mg1:
|
| 911 |
+
err = ctx.mpf(10)**int(ctx.log(abs(factor*mg1),10))
|
| 912 |
+
err = max(err, ctx.eps*1.)
|
| 913 |
+
ctx.prec -= extraprec
|
| 914 |
+
return +st, err
|
| 915 |
+
|
| 916 |
+
@defun
|
| 917 |
+
def secondzeta(ctx, s, a = 0.015, **kwargs):
|
| 918 |
+
r"""
|
| 919 |
+
Evaluates the secondary zeta function `Z(s)`, defined for
|
| 920 |
+
`\mathrm{Re}(s)>1` by
|
| 921 |
+
|
| 922 |
+
.. math ::
|
| 923 |
+
|
| 924 |
+
Z(s) = \sum_{n=1}^{\infty} \frac{1}{\tau_n^s}
|
| 925 |
+
|
| 926 |
+
where `\frac12+i\tau_n` runs through the zeros of `\zeta(s)` with
|
| 927 |
+
imaginary part positive.
|
| 928 |
+
|
| 929 |
+
`Z(s)` extends to a meromorphic function on `\mathbb{C}` with a
|
| 930 |
+
double pole at `s=1` and simple poles at the points `-2n` for
|
| 931 |
+
`n=0`, 1, 2, ...
|
| 932 |
+
|
| 933 |
+
**Examples**
|
| 934 |
+
|
| 935 |
+
>>> from mpmath import *
|
| 936 |
+
>>> mp.pretty = True; mp.dps = 15
|
| 937 |
+
>>> secondzeta(2)
|
| 938 |
+
0.023104993115419
|
| 939 |
+
>>> xi = lambda s: 0.5*s*(s-1)*pi**(-0.5*s)*gamma(0.5*s)*zeta(s)
|
| 940 |
+
>>> Xi = lambda t: xi(0.5+t*j)
|
| 941 |
+
>>> chop(-0.5*diff(Xi,0,n=2)/Xi(0))
|
| 942 |
+
0.023104993115419
|
| 943 |
+
|
| 944 |
+
We may ask for an approximate error value::
|
| 945 |
+
|
| 946 |
+
>>> secondzeta(0.5+100j, error=True)
|
| 947 |
+
((-0.216272011276718 - 0.844952708937228j), 2.22044604925031e-16)
|
| 948 |
+
|
| 949 |
+
The function has poles at the negative odd integers,
|
| 950 |
+
and dyadic rational values at the negative even integers::
|
| 951 |
+
|
| 952 |
+
>>> mp.dps = 30
|
| 953 |
+
>>> secondzeta(-8)
|
| 954 |
+
-0.67236328125
|
| 955 |
+
>>> secondzeta(-7)
|
| 956 |
+
+inf
|
| 957 |
+
|
| 958 |
+
**Implementation notes**
|
| 959 |
+
|
| 960 |
+
The function is computed as sum of four terms `Z(s)=A(s)-P(s)+E(s)-S(s)`
|
| 961 |
+
respectively main, prime, exponential and singular terms.
|
| 962 |
+
The main term `A(s)` is computed from the zeros of zeta.
|
| 963 |
+
The prime term depends on the von Mangoldt function.
|
| 964 |
+
The singular term is responsible for the poles of the function.
|
| 965 |
+
|
| 966 |
+
The four terms depends on a small parameter `a`. We may change the
|
| 967 |
+
value of `a`. Theoretically this has no effect on the sum of the four
|
| 968 |
+
terms, but in practice may be important.
|
| 969 |
+
|
| 970 |
+
A smaller value of the parameter `a` makes `A(s)` depend on
|
| 971 |
+
a smaller number of zeros of zeta, but `P(s)` uses more values of
|
| 972 |
+
von Mangoldt function.
|
| 973 |
+
|
| 974 |
+
We may also add a verbose option to obtain data about the
|
| 975 |
+
values of the four terms.
|
| 976 |
+
|
| 977 |
+
>>> mp.dps = 10
|
| 978 |
+
>>> secondzeta(0.5 + 40j, error=True, verbose=True)
|
| 979 |
+
main term = (-30190318549.138656312556 - 13964804384.624622876523j)
|
| 980 |
+
computed using 19 zeros of zeta
|
| 981 |
+
prime term = (132717176.89212754625045 + 188980555.17563978290601j)
|
| 982 |
+
computed using 9 values of the von Mangoldt function
|
| 983 |
+
exponential term = (542447428666.07179812536 + 362434922978.80192435203j)
|
| 984 |
+
singular term = (512124392939.98154322355 + 348281138038.65531023921j)
|
| 985 |
+
((0.059471043 + 0.3463514534j), 1.455191523e-11)
|
| 986 |
+
|
| 987 |
+
>>> secondzeta(0.5 + 40j, a=0.04, error=True, verbose=True)
|
| 988 |
+
main term = (-151962888.19606243907725 - 217930683.90210294051982j)
|
| 989 |
+
computed using 9 zeros of zeta
|
| 990 |
+
prime term = (2476659342.3038722372461 + 28711581821.921627163136j)
|
| 991 |
+
computed using 37 values of the von Mangoldt function
|
| 992 |
+
exponential term = (178506047114.7838188264 + 819674143244.45677330576j)
|
| 993 |
+
singular term = (175877424884.22441310708 + 790744630738.28669174871j)
|
| 994 |
+
((0.059471043 + 0.3463514534j), 1.455191523e-11)
|
| 995 |
+
|
| 996 |
+
Notice the great cancellation between the four terms. Changing `a`, the
|
| 997 |
+
four terms are very different numbers but the cancellation gives
|
| 998 |
+
the good value of Z(s).
|
| 999 |
+
|
| 1000 |
+
**References**
|
| 1001 |
+
|
| 1002 |
+
A. Voros, Zeta functions for the Riemann zeros, Ann. Institute Fourier,
|
| 1003 |
+
53, (2003) 665--699.
|
| 1004 |
+
|
| 1005 |
+
A. Voros, Zeta functions over Zeros of Zeta Functions, Lecture Notes
|
| 1006 |
+
of the Unione Matematica Italiana, Springer, 2009.
|
| 1007 |
+
"""
|
| 1008 |
+
s = ctx.convert(s)
|
| 1009 |
+
a = ctx.convert(a)
|
| 1010 |
+
tol = ctx.eps
|
| 1011 |
+
if ctx.isint(s) and ctx.re(s) <= 1:
|
| 1012 |
+
if abs(s-1) < tol*1000:
|
| 1013 |
+
return ctx.inf
|
| 1014 |
+
m = int(round(ctx.re(s)))
|
| 1015 |
+
if m & 1:
|
| 1016 |
+
return ctx.inf
|
| 1017 |
+
else:
|
| 1018 |
+
return ((-1)**(-m//2)*\
|
| 1019 |
+
ctx.fraction(8-ctx.eulernum(-m,exact=True),2**(-m+3)))
|
| 1020 |
+
prec = ctx.prec
|
| 1021 |
+
try:
|
| 1022 |
+
t3 = secondzeta_exp_term(ctx, s, a)
|
| 1023 |
+
extraprec = max(ctx.mag(t3),0)
|
| 1024 |
+
ctx.prec += extraprec + 3
|
| 1025 |
+
t1, r1, gt = secondzeta_main_term(ctx,s,a,error='True', verbose='True')
|
| 1026 |
+
t2, r2, pt = secondzeta_prime_term(ctx,s,a,error='True', verbose='True')
|
| 1027 |
+
t4, r4 = secondzeta_singular_term(ctx,s,a,error='True')
|
| 1028 |
+
t3 = secondzeta_exp_term(ctx, s, a)
|
| 1029 |
+
err = r1+r2+r4
|
| 1030 |
+
t = t1-t2+t3-t4
|
| 1031 |
+
if kwargs.get("verbose"):
|
| 1032 |
+
print('main term =', t1)
|
| 1033 |
+
print(' computed using', gt, 'zeros of zeta')
|
| 1034 |
+
print('prime term =', t2)
|
| 1035 |
+
print(' computed using', pt, 'values of the von Mangoldt function')
|
| 1036 |
+
print('exponential term =', t3)
|
| 1037 |
+
print('singular term =', t4)
|
| 1038 |
+
finally:
|
| 1039 |
+
ctx.prec = prec
|
| 1040 |
+
if kwargs.get("error"):
|
| 1041 |
+
w = max(ctx.mag(abs(t)),0)
|
| 1042 |
+
err = max(err*2**w, ctx.eps*1.*2**w)
|
| 1043 |
+
return +t, err
|
| 1044 |
+
return +t
|
| 1045 |
+
|
| 1046 |
+
|
| 1047 |
+
@defun_wrapped
|
| 1048 |
+
def lerchphi(ctx, z, s, a):
|
| 1049 |
+
r"""
|
| 1050 |
+
Gives the Lerch transcendent, defined for `|z| < 1` and
|
| 1051 |
+
`\Re{a} > 0` by
|
| 1052 |
+
|
| 1053 |
+
.. math ::
|
| 1054 |
+
|
| 1055 |
+
\Phi(z,s,a) = \sum_{k=0}^{\infty} \frac{z^k}{(a+k)^s}
|
| 1056 |
+
|
| 1057 |
+
and generally by the recurrence `\Phi(z,s,a) = z \Phi(z,s,a+1) + a^{-s}`
|
| 1058 |
+
along with the integral representation valid for `\Re{a} > 0`
|
| 1059 |
+
|
| 1060 |
+
.. math ::
|
| 1061 |
+
|
| 1062 |
+
\Phi(z,s,a) = \frac{1}{2 a^s} +
|
| 1063 |
+
\int_0^{\infty} \frac{z^t}{(a+t)^s} dt -
|
| 1064 |
+
2 \int_0^{\infty} \frac{\sin(t \log z - s
|
| 1065 |
+
\operatorname{arctan}(t/a)}{(a^2 + t^2)^{s/2}
|
| 1066 |
+
(e^{2 \pi t}-1)} dt.
|
| 1067 |
+
|
| 1068 |
+
The Lerch transcendent generalizes the Hurwitz zeta function :func:`zeta`
|
| 1069 |
+
(`z = 1`) and the polylogarithm :func:`polylog` (`a = 1`).
|
| 1070 |
+
|
| 1071 |
+
**Examples**
|
| 1072 |
+
|
| 1073 |
+
Several evaluations in terms of simpler functions::
|
| 1074 |
+
|
| 1075 |
+
>>> from mpmath import *
|
| 1076 |
+
>>> mp.dps = 25; mp.pretty = True
|
| 1077 |
+
>>> lerchphi(-1,2,0.5); 4*catalan
|
| 1078 |
+
3.663862376708876060218414
|
| 1079 |
+
3.663862376708876060218414
|
| 1080 |
+
>>> diff(lerchphi, (-1,-2,1), (0,1,0)); 7*zeta(3)/(4*pi**2)
|
| 1081 |
+
0.2131391994087528954617607
|
| 1082 |
+
0.2131391994087528954617607
|
| 1083 |
+
>>> lerchphi(-4,1,1); log(5)/4
|
| 1084 |
+
0.4023594781085250936501898
|
| 1085 |
+
0.4023594781085250936501898
|
| 1086 |
+
>>> lerchphi(-3+2j,1,0.5); 2*atanh(sqrt(-3+2j))/sqrt(-3+2j)
|
| 1087 |
+
(1.142423447120257137774002 + 0.2118232380980201350495795j)
|
| 1088 |
+
(1.142423447120257137774002 + 0.2118232380980201350495795j)
|
| 1089 |
+
|
| 1090 |
+
Evaluation works for complex arguments and `|z| \ge 1`::
|
| 1091 |
+
|
| 1092 |
+
>>> lerchphi(1+2j, 3-j, 4+2j)
|
| 1093 |
+
(0.002025009957009908600539469 + 0.003327897536813558807438089j)
|
| 1094 |
+
>>> lerchphi(-2,2,-2.5)
|
| 1095 |
+
-12.28676272353094275265944
|
| 1096 |
+
>>> lerchphi(10,10,10)
|
| 1097 |
+
(-4.462130727102185701817349e-11 - 1.575172198981096218823481e-12j)
|
| 1098 |
+
>>> lerchphi(10,10,-10.5)
|
| 1099 |
+
(112658784011940.5605789002 - 498113185.5756221777743631j)
|
| 1100 |
+
|
| 1101 |
+
Some degenerate cases::
|
| 1102 |
+
|
| 1103 |
+
>>> lerchphi(0,1,2)
|
| 1104 |
+
0.5
|
| 1105 |
+
>>> lerchphi(0,1,-2)
|
| 1106 |
+
-0.5
|
| 1107 |
+
|
| 1108 |
+
Reduction to simpler functions::
|
| 1109 |
+
|
| 1110 |
+
>>> lerchphi(1, 4.25+1j, 1)
|
| 1111 |
+
(1.044674457556746668033975 - 0.04674508654012658932271226j)
|
| 1112 |
+
>>> zeta(4.25+1j)
|
| 1113 |
+
(1.044674457556746668033975 - 0.04674508654012658932271226j)
|
| 1114 |
+
>>> lerchphi(1 - 0.5**10, 4.25+1j, 1)
|
| 1115 |
+
(1.044629338021507546737197 - 0.04667768813963388181708101j)
|
| 1116 |
+
>>> lerchphi(3, 4, 1)
|
| 1117 |
+
(1.249503297023366545192592 - 0.2314252413375664776474462j)
|
| 1118 |
+
>>> polylog(4, 3) / 3
|
| 1119 |
+
(1.249503297023366545192592 - 0.2314252413375664776474462j)
|
| 1120 |
+
>>> lerchphi(3, 4, 1 - 0.5**10)
|
| 1121 |
+
(1.253978063946663945672674 - 0.2316736622836535468765376j)
|
| 1122 |
+
|
| 1123 |
+
**References**
|
| 1124 |
+
|
| 1125 |
+
1. [DLMF]_ section 25.14
|
| 1126 |
+
|
| 1127 |
+
"""
|
| 1128 |
+
if z == 0:
|
| 1129 |
+
return a ** (-s)
|
| 1130 |
+
# Faster, but these cases are useful for testing right now
|
| 1131 |
+
if z == 1:
|
| 1132 |
+
return ctx.zeta(s, a)
|
| 1133 |
+
if a == 1:
|
| 1134 |
+
return ctx.polylog(s, z) / z
|
| 1135 |
+
if ctx.re(a) < 1:
|
| 1136 |
+
if ctx.isnpint(a):
|
| 1137 |
+
raise ValueError("Lerch transcendent complex infinity")
|
| 1138 |
+
m = int(ctx.ceil(1-ctx.re(a)))
|
| 1139 |
+
v = ctx.zero
|
| 1140 |
+
zpow = ctx.one
|
| 1141 |
+
for n in xrange(m):
|
| 1142 |
+
v += zpow / (a+n)**s
|
| 1143 |
+
zpow *= z
|
| 1144 |
+
return zpow * ctx.lerchphi(z,s, a+m) + v
|
| 1145 |
+
g = ctx.ln(z)
|
| 1146 |
+
v = 1/(2*a**s) + ctx.gammainc(1-s, -a*g) * (-g)**(s-1) / z**a
|
| 1147 |
+
h = s / 2
|
| 1148 |
+
r = 2*ctx.pi
|
| 1149 |
+
f = lambda t: ctx.sin(s*ctx.atan(t/a)-t*g) / \
|
| 1150 |
+
((a**2+t**2)**h * ctx.expm1(r*t))
|
| 1151 |
+
v += 2*ctx.quad(f, [0, ctx.inf])
|
| 1152 |
+
if not ctx.im(z) and not ctx.im(s) and not ctx.im(a) and ctx.re(z) < 1:
|
| 1153 |
+
v = ctx.chop(v)
|
| 1154 |
+
return v
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (10.4 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/backend.cpython-311.pyc
ADDED
|
Binary file (2.85 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-311.pyc
ADDED
|
Binary file (89 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-311.pyc
ADDED
|
Binary file (22.9 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-311.pyc
ADDED
|
Binary file (52.6 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-311.pyc
ADDED
|
Binary file (43.4 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/backend.py
ADDED
|
@@ -0,0 +1,115 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
import sys
|
| 3 |
+
|
| 4 |
+
#----------------------------------------------------------------------------#
|
| 5 |
+
# Support GMPY for high-speed large integer arithmetic. #
|
| 6 |
+
# #
|
| 7 |
+
# To allow an external module to handle arithmetic, we need to make sure #
|
| 8 |
+
# that all high-precision variables are declared of the correct type. MPZ #
|
| 9 |
+
# is the constructor for the high-precision type. It defaults to Python's #
|
| 10 |
+
# long type but can be assinged another type, typically gmpy.mpz. #
|
| 11 |
+
# #
|
| 12 |
+
# MPZ must be used for the mantissa component of an mpf and must be used #
|
| 13 |
+
# for internal fixed-point operations. #
|
| 14 |
+
# #
|
| 15 |
+
# Side-effects #
|
| 16 |
+
# 1) "is" cannot be used to test for special values. Must use "==". #
|
| 17 |
+
# 2) There are bugs in GMPY prior to v1.02 so we must use v1.03 or later. #
|
| 18 |
+
#----------------------------------------------------------------------------#
|
| 19 |
+
|
| 20 |
+
# So we can import it from this module
|
| 21 |
+
gmpy = None
|
| 22 |
+
sage = None
|
| 23 |
+
sage_utils = None
|
| 24 |
+
|
| 25 |
+
if sys.version_info[0] < 3:
|
| 26 |
+
python3 = False
|
| 27 |
+
else:
|
| 28 |
+
python3 = True
|
| 29 |
+
|
| 30 |
+
BACKEND = 'python'
|
| 31 |
+
|
| 32 |
+
if not python3:
|
| 33 |
+
MPZ = long
|
| 34 |
+
xrange = xrange
|
| 35 |
+
basestring = basestring
|
| 36 |
+
|
| 37 |
+
def exec_(_code_, _globs_=None, _locs_=None):
|
| 38 |
+
"""Execute code in a namespace."""
|
| 39 |
+
if _globs_ is None:
|
| 40 |
+
frame = sys._getframe(1)
|
| 41 |
+
_globs_ = frame.f_globals
|
| 42 |
+
if _locs_ is None:
|
| 43 |
+
_locs_ = frame.f_locals
|
| 44 |
+
del frame
|
| 45 |
+
elif _locs_ is None:
|
| 46 |
+
_locs_ = _globs_
|
| 47 |
+
exec("""exec _code_ in _globs_, _locs_""")
|
| 48 |
+
else:
|
| 49 |
+
MPZ = int
|
| 50 |
+
xrange = range
|
| 51 |
+
basestring = str
|
| 52 |
+
|
| 53 |
+
import builtins
|
| 54 |
+
exec_ = getattr(builtins, "exec")
|
| 55 |
+
|
| 56 |
+
# Define constants for calculating hash on Python 3.2.
|
| 57 |
+
if sys.version_info >= (3, 2):
|
| 58 |
+
HASH_MODULUS = sys.hash_info.modulus
|
| 59 |
+
if sys.hash_info.width == 32:
|
| 60 |
+
HASH_BITS = 31
|
| 61 |
+
else:
|
| 62 |
+
HASH_BITS = 61
|
| 63 |
+
else:
|
| 64 |
+
HASH_MODULUS = None
|
| 65 |
+
HASH_BITS = None
|
| 66 |
+
|
| 67 |
+
if 'MPMATH_NOGMPY' not in os.environ:
|
| 68 |
+
try:
|
| 69 |
+
try:
|
| 70 |
+
import gmpy2 as gmpy
|
| 71 |
+
except ImportError:
|
| 72 |
+
try:
|
| 73 |
+
import gmpy
|
| 74 |
+
except ImportError:
|
| 75 |
+
raise ImportError
|
| 76 |
+
if gmpy.version() >= '1.03':
|
| 77 |
+
BACKEND = 'gmpy'
|
| 78 |
+
MPZ = gmpy.mpz
|
| 79 |
+
except:
|
| 80 |
+
pass
|
| 81 |
+
|
| 82 |
+
if ('MPMATH_NOSAGE' not in os.environ and 'SAGE_ROOT' in os.environ or
|
| 83 |
+
'MPMATH_SAGE' in os.environ):
|
| 84 |
+
try:
|
| 85 |
+
import sage.all
|
| 86 |
+
import sage.libs.mpmath.utils as _sage_utils
|
| 87 |
+
sage = sage.all
|
| 88 |
+
sage_utils = _sage_utils
|
| 89 |
+
BACKEND = 'sage'
|
| 90 |
+
MPZ = sage.Integer
|
| 91 |
+
except:
|
| 92 |
+
pass
|
| 93 |
+
|
| 94 |
+
if 'MPMATH_STRICT' in os.environ:
|
| 95 |
+
STRICT = True
|
| 96 |
+
else:
|
| 97 |
+
STRICT = False
|
| 98 |
+
|
| 99 |
+
MPZ_TYPE = type(MPZ(0))
|
| 100 |
+
MPZ_ZERO = MPZ(0)
|
| 101 |
+
MPZ_ONE = MPZ(1)
|
| 102 |
+
MPZ_TWO = MPZ(2)
|
| 103 |
+
MPZ_THREE = MPZ(3)
|
| 104 |
+
MPZ_FIVE = MPZ(5)
|
| 105 |
+
|
| 106 |
+
try:
|
| 107 |
+
if BACKEND == 'python':
|
| 108 |
+
int_types = (int, long)
|
| 109 |
+
else:
|
| 110 |
+
int_types = (int, long, MPZ_TYPE)
|
| 111 |
+
except NameError:
|
| 112 |
+
if BACKEND == 'python':
|
| 113 |
+
int_types = (int,)
|
| 114 |
+
else:
|
| 115 |
+
int_types = (int, MPZ_TYPE)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/gammazeta.py
ADDED
|
@@ -0,0 +1,2167 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
-----------------------------------------------------------------------
|
| 3 |
+
This module implements gamma- and zeta-related functions:
|
| 4 |
+
|
| 5 |
+
* Bernoulli numbers
|
| 6 |
+
* Factorials
|
| 7 |
+
* The gamma function
|
| 8 |
+
* Polygamma functions
|
| 9 |
+
* Harmonic numbers
|
| 10 |
+
* The Riemann zeta function
|
| 11 |
+
* Constants related to these functions
|
| 12 |
+
|
| 13 |
+
-----------------------------------------------------------------------
|
| 14 |
+
"""
|
| 15 |
+
|
| 16 |
+
import math
|
| 17 |
+
import sys
|
| 18 |
+
|
| 19 |
+
from .backend import xrange
|
| 20 |
+
from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_THREE, gmpy
|
| 21 |
+
|
| 22 |
+
from .libintmath import list_primes, ifac, ifac2, moebius
|
| 23 |
+
|
| 24 |
+
from .libmpf import (\
|
| 25 |
+
round_floor, round_ceiling, round_down, round_up,
|
| 26 |
+
round_nearest, round_fast,
|
| 27 |
+
lshift, sqrt_fixed, isqrt_fast,
|
| 28 |
+
fzero, fone, fnone, fhalf, ftwo, finf, fninf, fnan,
|
| 29 |
+
from_int, to_int, to_fixed, from_man_exp, from_rational,
|
| 30 |
+
mpf_pos, mpf_neg, mpf_abs, mpf_add, mpf_sub,
|
| 31 |
+
mpf_mul, mpf_mul_int, mpf_div, mpf_sqrt, mpf_pow_int,
|
| 32 |
+
mpf_rdiv_int,
|
| 33 |
+
mpf_perturb, mpf_le, mpf_lt, mpf_gt, mpf_shift,
|
| 34 |
+
negative_rnd, reciprocal_rnd,
|
| 35 |
+
bitcount, to_float, mpf_floor, mpf_sign, ComplexResult
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
from .libelefun import (\
|
| 39 |
+
constant_memo,
|
| 40 |
+
def_mpf_constant,
|
| 41 |
+
mpf_pi, pi_fixed, ln2_fixed, log_int_fixed, mpf_ln2,
|
| 42 |
+
mpf_exp, mpf_log, mpf_pow, mpf_cosh,
|
| 43 |
+
mpf_cos_sin, mpf_cosh_sinh, mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi,
|
| 44 |
+
ln_sqrt2pi_fixed, mpf_ln_sqrt2pi, sqrtpi_fixed, mpf_sqrtpi,
|
| 45 |
+
cos_sin_fixed, exp_fixed
|
| 46 |
+
)
|
| 47 |
+
|
| 48 |
+
from .libmpc import (\
|
| 49 |
+
mpc_zero, mpc_one, mpc_half, mpc_two,
|
| 50 |
+
mpc_abs, mpc_shift, mpc_pos, mpc_neg,
|
| 51 |
+
mpc_add, mpc_sub, mpc_mul, mpc_div,
|
| 52 |
+
mpc_add_mpf, mpc_mul_mpf, mpc_div_mpf, mpc_mpf_div,
|
| 53 |
+
mpc_mul_int, mpc_pow_int,
|
| 54 |
+
mpc_log, mpc_exp, mpc_pow,
|
| 55 |
+
mpc_cos_pi, mpc_sin_pi,
|
| 56 |
+
mpc_reciprocal, mpc_square,
|
| 57 |
+
mpc_sub_mpf
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
|
| 61 |
+
|
| 62 |
+
# Catalan's constant is computed using Lupas's rapidly convergent series
|
| 63 |
+
# (listed on http://mathworld.wolfram.com/CatalansConstant.html)
|
| 64 |
+
# oo
|
| 65 |
+
# ___ n-1 8n 2 3 2
|
| 66 |
+
# 1 \ (-1) 2 (40n - 24n + 3) [(2n)!] (n!)
|
| 67 |
+
# K = --- ) -----------------------------------------
|
| 68 |
+
# 64 /___ 3 2
|
| 69 |
+
# n (2n-1) [(4n)!]
|
| 70 |
+
# n = 1
|
| 71 |
+
|
| 72 |
+
@constant_memo
|
| 73 |
+
def catalan_fixed(prec):
|
| 74 |
+
prec = prec + 20
|
| 75 |
+
a = one = MPZ_ONE << prec
|
| 76 |
+
s, t, n = 0, 1, 1
|
| 77 |
+
while t:
|
| 78 |
+
a *= 32 * n**3 * (2*n-1)
|
| 79 |
+
a //= (3-16*n+16*n**2)**2
|
| 80 |
+
t = a * (-1)**(n-1) * (40*n**2-24*n+3) // (n**3 * (2*n-1))
|
| 81 |
+
s += t
|
| 82 |
+
n += 1
|
| 83 |
+
return s >> (20 + 6)
|
| 84 |
+
|
| 85 |
+
# Khinchin's constant is relatively difficult to compute. Here
|
| 86 |
+
# we use the rational zeta series
|
| 87 |
+
|
| 88 |
+
# oo 2*n-1
|
| 89 |
+
# ___ ___
|
| 90 |
+
# \ ` zeta(2*n)-1 \ ` (-1)^(k+1)
|
| 91 |
+
# log(K)*log(2) = ) ------------ ) ----------
|
| 92 |
+
# /___. n /___. k
|
| 93 |
+
# n = 1 k = 1
|
| 94 |
+
|
| 95 |
+
# which adds half a digit per term. The essential trick for achieving
|
| 96 |
+
# reasonable efficiency is to recycle both the values of the zeta
|
| 97 |
+
# function (essentially Bernoulli numbers) and the partial terms of
|
| 98 |
+
# the inner sum.
|
| 99 |
+
|
| 100 |
+
# An alternative might be to use K = 2*exp[1/log(2) X] where
|
| 101 |
+
|
| 102 |
+
# / 1 1 [ pi*x*(1-x^2) ]
|
| 103 |
+
# X = | ------ log [ ------------ ].
|
| 104 |
+
# / 0 x(1+x) [ sin(pi*x) ]
|
| 105 |
+
|
| 106 |
+
# and integrate numerically. In practice, this seems to be slightly
|
| 107 |
+
# slower than the zeta series at high precision.
|
| 108 |
+
|
| 109 |
+
@constant_memo
|
| 110 |
+
def khinchin_fixed(prec):
|
| 111 |
+
wp = int(prec + prec**0.5 + 15)
|
| 112 |
+
s = MPZ_ZERO
|
| 113 |
+
fac = from_int(4)
|
| 114 |
+
t = ONE = MPZ_ONE << wp
|
| 115 |
+
pi = mpf_pi(wp)
|
| 116 |
+
pipow = twopi2 = mpf_shift(mpf_mul(pi, pi, wp), 2)
|
| 117 |
+
n = 1
|
| 118 |
+
while 1:
|
| 119 |
+
zeta2n = mpf_abs(mpf_bernoulli(2*n, wp))
|
| 120 |
+
zeta2n = mpf_mul(zeta2n, pipow, wp)
|
| 121 |
+
zeta2n = mpf_div(zeta2n, fac, wp)
|
| 122 |
+
zeta2n = to_fixed(zeta2n, wp)
|
| 123 |
+
term = (((zeta2n - ONE) * t) // n) >> wp
|
| 124 |
+
if term < 100:
|
| 125 |
+
break
|
| 126 |
+
#if not n % 10:
|
| 127 |
+
# print n, math.log(int(abs(term)))
|
| 128 |
+
s += term
|
| 129 |
+
t += ONE//(2*n+1) - ONE//(2*n)
|
| 130 |
+
n += 1
|
| 131 |
+
fac = mpf_mul_int(fac, (2*n)*(2*n-1), wp)
|
| 132 |
+
pipow = mpf_mul(pipow, twopi2, wp)
|
| 133 |
+
s = (s << wp) // ln2_fixed(wp)
|
| 134 |
+
K = mpf_exp(from_man_exp(s, -wp), wp)
|
| 135 |
+
K = to_fixed(K, prec)
|
| 136 |
+
return K
|
| 137 |
+
|
| 138 |
+
|
| 139 |
+
# Glaisher's constant is defined as A = exp(1/2 - zeta'(-1)).
|
| 140 |
+
# One way to compute it would be to perform direct numerical
|
| 141 |
+
# differentiation, but computing arbitrary Riemann zeta function
|
| 142 |
+
# values at high precision is expensive. We instead use the formula
|
| 143 |
+
|
| 144 |
+
# A = exp((6 (-zeta'(2))/pi^2 + log 2 pi + gamma)/12)
|
| 145 |
+
|
| 146 |
+
# and compute zeta'(2) from the series representation
|
| 147 |
+
|
| 148 |
+
# oo
|
| 149 |
+
# ___
|
| 150 |
+
# \ log k
|
| 151 |
+
# -zeta'(2) = ) -----
|
| 152 |
+
# /___ 2
|
| 153 |
+
# k
|
| 154 |
+
# k = 2
|
| 155 |
+
|
| 156 |
+
# This series converges exceptionally slowly, but can be accelerated
|
| 157 |
+
# using Euler-Maclaurin formula. The important insight is that the
|
| 158 |
+
# E-M integral can be done in closed form and that the high order
|
| 159 |
+
# are given by
|
| 160 |
+
|
| 161 |
+
# n / \
|
| 162 |
+
# d | log x | a + b log x
|
| 163 |
+
# --- | ----- | = -----------
|
| 164 |
+
# n | 2 | 2 + n
|
| 165 |
+
# dx \ x / x
|
| 166 |
+
|
| 167 |
+
# where a and b are integers given by a simple recurrence. Note
|
| 168 |
+
# that just one logarithm is needed. However, lots of integer
|
| 169 |
+
# logarithms are required for the initial summation.
|
| 170 |
+
|
| 171 |
+
# This algorithm could possibly be turned into a faster algorithm
|
| 172 |
+
# for general evaluation of zeta(s) or zeta'(s); this should be
|
| 173 |
+
# looked into.
|
| 174 |
+
|
| 175 |
+
@constant_memo
|
| 176 |
+
def glaisher_fixed(prec):
|
| 177 |
+
wp = prec + 30
|
| 178 |
+
# Number of direct terms to sum before applying the Euler-Maclaurin
|
| 179 |
+
# formula to the tail. TODO: choose more intelligently
|
| 180 |
+
N = int(0.33*prec + 5)
|
| 181 |
+
ONE = MPZ_ONE << wp
|
| 182 |
+
# Euler-Maclaurin, step 1: sum log(k)/k**2 for k from 2 to N-1
|
| 183 |
+
s = MPZ_ZERO
|
| 184 |
+
for k in range(2, N):
|
| 185 |
+
#print k, N
|
| 186 |
+
s += log_int_fixed(k, wp) // k**2
|
| 187 |
+
logN = log_int_fixed(N, wp)
|
| 188 |
+
#logN = to_fixed(mpf_log(from_int(N), wp+20), wp)
|
| 189 |
+
# E-M step 2: integral of log(x)/x**2 from N to inf
|
| 190 |
+
s += (ONE + logN) // N
|
| 191 |
+
# E-M step 3: endpoint correction term f(N)/2
|
| 192 |
+
s += logN // (N**2 * 2)
|
| 193 |
+
# E-M step 4: the series of derivatives
|
| 194 |
+
pN = N**3
|
| 195 |
+
a = 1
|
| 196 |
+
b = -2
|
| 197 |
+
j = 3
|
| 198 |
+
fac = from_int(2)
|
| 199 |
+
k = 1
|
| 200 |
+
while 1:
|
| 201 |
+
# D(2*k-1) * B(2*k) / fac(2*k) [D(n) = nth derivative]
|
| 202 |
+
D = ((a << wp) + b*logN) // pN
|
| 203 |
+
D = from_man_exp(D, -wp)
|
| 204 |
+
B = mpf_bernoulli(2*k, wp)
|
| 205 |
+
term = mpf_mul(B, D, wp)
|
| 206 |
+
term = mpf_div(term, fac, wp)
|
| 207 |
+
term = to_fixed(term, wp)
|
| 208 |
+
if abs(term) < 100:
|
| 209 |
+
break
|
| 210 |
+
#if not k % 10:
|
| 211 |
+
# print k, math.log(int(abs(term)), 10)
|
| 212 |
+
s -= term
|
| 213 |
+
# Advance derivative twice
|
| 214 |
+
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
|
| 215 |
+
a, b, pN, j = b-a*j, -j*b, pN*N, j+1
|
| 216 |
+
k += 1
|
| 217 |
+
fac = mpf_mul_int(fac, (2*k)*(2*k-1), wp)
|
| 218 |
+
# A = exp((6*s/pi**2 + log(2*pi) + euler)/12)
|
| 219 |
+
pi = pi_fixed(wp)
|
| 220 |
+
s *= 6
|
| 221 |
+
s = (s << wp) // (pi**2 >> wp)
|
| 222 |
+
s += euler_fixed(wp)
|
| 223 |
+
s += to_fixed(mpf_log(from_man_exp(2*pi, -wp), wp), wp)
|
| 224 |
+
s //= 12
|
| 225 |
+
A = mpf_exp(from_man_exp(s, -wp), wp)
|
| 226 |
+
return to_fixed(A, prec)
|
| 227 |
+
|
| 228 |
+
# Apery's constant can be computed using the very rapidly convergent
|
| 229 |
+
# series
|
| 230 |
+
# oo
|
| 231 |
+
# ___ 2 10
|
| 232 |
+
# \ n 205 n + 250 n + 77 (n!)
|
| 233 |
+
# zeta(3) = ) (-1) ------------------- ----------
|
| 234 |
+
# /___ 64 5
|
| 235 |
+
# n = 0 ((2n+1)!)
|
| 236 |
+
|
| 237 |
+
@constant_memo
|
| 238 |
+
def apery_fixed(prec):
|
| 239 |
+
prec += 20
|
| 240 |
+
d = MPZ_ONE << prec
|
| 241 |
+
term = MPZ(77) << prec
|
| 242 |
+
n = 1
|
| 243 |
+
s = MPZ_ZERO
|
| 244 |
+
while term:
|
| 245 |
+
s += term
|
| 246 |
+
d *= (n**10)
|
| 247 |
+
d //= (((2*n+1)**5) * (2*n)**5)
|
| 248 |
+
term = (-1)**n * (205*(n**2) + 250*n + 77) * d
|
| 249 |
+
n += 1
|
| 250 |
+
return s >> (20 + 6)
|
| 251 |
+
|
| 252 |
+
"""
|
| 253 |
+
Euler's constant (gamma) is computed using the Brent-McMillan formula,
|
| 254 |
+
gamma ~= I(n)/J(n) - log(n), where
|
| 255 |
+
|
| 256 |
+
I(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k)
|
| 257 |
+
J(n) = sum_{k=0,1,2,...} (n**k / k!)**2
|
| 258 |
+
H(k) = 1 + 1/2 + 1/3 + ... + 1/k
|
| 259 |
+
|
| 260 |
+
The error is bounded by O(exp(-4n)). Choosing n to be a power
|
| 261 |
+
of two, 2**p, the logarithm becomes particularly easy to calculate.[1]
|
| 262 |
+
|
| 263 |
+
We use the formulation of Algorithm 3.9 in [2] to make the summation
|
| 264 |
+
more efficient.
|
| 265 |
+
|
| 266 |
+
Reference:
|
| 267 |
+
[1] Xavier Gourdon & Pascal Sebah, The Euler constant: gamma
|
| 268 |
+
http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf
|
| 269 |
+
|
| 270 |
+
[2] [BorweinBailey]_
|
| 271 |
+
"""
|
| 272 |
+
|
| 273 |
+
@constant_memo
|
| 274 |
+
def euler_fixed(prec):
|
| 275 |
+
extra = 30
|
| 276 |
+
prec += extra
|
| 277 |
+
# choose p such that exp(-4*(2**p)) < 2**-n
|
| 278 |
+
p = int(math.log((prec/4) * math.log(2), 2)) + 1
|
| 279 |
+
n = 2**p
|
| 280 |
+
A = U = -p*ln2_fixed(prec)
|
| 281 |
+
B = V = MPZ_ONE << prec
|
| 282 |
+
k = 1
|
| 283 |
+
while 1:
|
| 284 |
+
B = B*n**2//k**2
|
| 285 |
+
A = (A*n**2//k + B)//k
|
| 286 |
+
U += A
|
| 287 |
+
V += B
|
| 288 |
+
if max(abs(A), abs(B)) < 100:
|
| 289 |
+
break
|
| 290 |
+
k += 1
|
| 291 |
+
return (U<<(prec-extra))//V
|
| 292 |
+
|
| 293 |
+
# Use zeta accelerated formulas for the Mertens and twin
|
| 294 |
+
# prime constants; see
|
| 295 |
+
# http://mathworld.wolfram.com/MertensConstant.html
|
| 296 |
+
# http://mathworld.wolfram.com/TwinPrimesConstant.html
|
| 297 |
+
|
| 298 |
+
@constant_memo
|
| 299 |
+
def mertens_fixed(prec):
|
| 300 |
+
wp = prec + 20
|
| 301 |
+
m = 2
|
| 302 |
+
s = mpf_euler(wp)
|
| 303 |
+
while 1:
|
| 304 |
+
t = mpf_zeta_int(m, wp)
|
| 305 |
+
if t == fone:
|
| 306 |
+
break
|
| 307 |
+
t = mpf_log(t, wp)
|
| 308 |
+
t = mpf_mul_int(t, moebius(m), wp)
|
| 309 |
+
t = mpf_div(t, from_int(m), wp)
|
| 310 |
+
s = mpf_add(s, t)
|
| 311 |
+
m += 1
|
| 312 |
+
return to_fixed(s, prec)
|
| 313 |
+
|
| 314 |
+
@constant_memo
|
| 315 |
+
def twinprime_fixed(prec):
|
| 316 |
+
def I(n):
|
| 317 |
+
return sum(moebius(d)<<(n//d) for d in xrange(1,n+1) if not n%d)//n
|
| 318 |
+
wp = 2*prec + 30
|
| 319 |
+
res = fone
|
| 320 |
+
primes = [from_rational(1,p,wp) for p in [2,3,5,7]]
|
| 321 |
+
ppowers = [mpf_mul(p,p,wp) for p in primes]
|
| 322 |
+
n = 2
|
| 323 |
+
while 1:
|
| 324 |
+
a = mpf_zeta_int(n, wp)
|
| 325 |
+
for i in range(4):
|
| 326 |
+
a = mpf_mul(a, mpf_sub(fone, ppowers[i]), wp)
|
| 327 |
+
ppowers[i] = mpf_mul(ppowers[i], primes[i], wp)
|
| 328 |
+
a = mpf_pow_int(a, -I(n), wp)
|
| 329 |
+
if mpf_pos(a, prec+10, 'n') == fone:
|
| 330 |
+
break
|
| 331 |
+
#from libmpf import to_str
|
| 332 |
+
#print n, to_str(mpf_sub(fone, a), 6)
|
| 333 |
+
res = mpf_mul(res, a, wp)
|
| 334 |
+
n += 1
|
| 335 |
+
res = mpf_mul(res, from_int(3*15*35), wp)
|
| 336 |
+
res = mpf_div(res, from_int(4*16*36), wp)
|
| 337 |
+
return to_fixed(res, prec)
|
| 338 |
+
|
| 339 |
+
|
| 340 |
+
mpf_euler = def_mpf_constant(euler_fixed)
|
| 341 |
+
mpf_apery = def_mpf_constant(apery_fixed)
|
| 342 |
+
mpf_khinchin = def_mpf_constant(khinchin_fixed)
|
| 343 |
+
mpf_glaisher = def_mpf_constant(glaisher_fixed)
|
| 344 |
+
mpf_catalan = def_mpf_constant(catalan_fixed)
|
| 345 |
+
mpf_mertens = def_mpf_constant(mertens_fixed)
|
| 346 |
+
mpf_twinprime = def_mpf_constant(twinprime_fixed)
|
| 347 |
+
|
| 348 |
+
|
| 349 |
+
#-----------------------------------------------------------------------#
|
| 350 |
+
# #
|
| 351 |
+
# Bernoulli numbers #
|
| 352 |
+
# #
|
| 353 |
+
#-----------------------------------------------------------------------#
|
| 354 |
+
|
| 355 |
+
MAX_BERNOULLI_CACHE = 3000
|
| 356 |
+
|
| 357 |
+
|
| 358 |
+
r"""
|
| 359 |
+
Small Bernoulli numbers and factorials are used in numerous summations,
|
| 360 |
+
so it is critical for speed that sequential computation is fast and that
|
| 361 |
+
values are cached up to a fairly high threshold.
|
| 362 |
+
|
| 363 |
+
On the other hand, we also want to support fast computation of isolated
|
| 364 |
+
large numbers. Currently, no such acceleration is provided for integer
|
| 365 |
+
factorials (though it is for large floating-point factorials, which are
|
| 366 |
+
computed via gamma if the precision is low enough).
|
| 367 |
+
|
| 368 |
+
For sequential computation of Bernoulli numbers, we use Ramanujan's formula
|
| 369 |
+
|
| 370 |
+
/ n + 3 \
|
| 371 |
+
B = (A(n) - S(n)) / | |
|
| 372 |
+
n \ n /
|
| 373 |
+
|
| 374 |
+
where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6
|
| 375 |
+
when n = 4 (mod 6), and
|
| 376 |
+
|
| 377 |
+
[n/6]
|
| 378 |
+
___
|
| 379 |
+
\ / n + 3 \
|
| 380 |
+
S(n) = ) | | * B
|
| 381 |
+
/___ \ n - 6*k / n-6*k
|
| 382 |
+
k = 1
|
| 383 |
+
|
| 384 |
+
For isolated large Bernoulli numbers, we use the Riemann zeta function
|
| 385 |
+
to calculate a numerical value for B_n. The von Staudt-Clausen theorem
|
| 386 |
+
can then be used to optionally find the exact value of the
|
| 387 |
+
numerator and denominator.
|
| 388 |
+
"""
|
| 389 |
+
|
| 390 |
+
bernoulli_cache = {}
|
| 391 |
+
f3 = from_int(3)
|
| 392 |
+
f6 = from_int(6)
|
| 393 |
+
|
| 394 |
+
def bernoulli_size(n):
|
| 395 |
+
"""Accurately estimate the size of B_n (even n > 2 only)"""
|
| 396 |
+
lgn = math.log(n,2)
|
| 397 |
+
return int(2.326 + 0.5*lgn + n*(lgn - 4.094))
|
| 398 |
+
|
| 399 |
+
BERNOULLI_PREC_CUTOFF = bernoulli_size(MAX_BERNOULLI_CACHE)
|
| 400 |
+
|
| 401 |
+
def mpf_bernoulli(n, prec, rnd=None):
|
| 402 |
+
"""Computation of Bernoulli numbers (numerically)"""
|
| 403 |
+
if n < 2:
|
| 404 |
+
if n < 0:
|
| 405 |
+
raise ValueError("Bernoulli numbers only defined for n >= 0")
|
| 406 |
+
if n == 0:
|
| 407 |
+
return fone
|
| 408 |
+
if n == 1:
|
| 409 |
+
return mpf_neg(fhalf)
|
| 410 |
+
# For odd n > 1, the Bernoulli numbers are zero
|
| 411 |
+
if n & 1:
|
| 412 |
+
return fzero
|
| 413 |
+
# If precision is extremely high, we can save time by computing
|
| 414 |
+
# the Bernoulli number at a lower precision that is sufficient to
|
| 415 |
+
# obtain the exact fraction, round to the exact fraction, and
|
| 416 |
+
# convert the fraction back to an mpf value at the original precision
|
| 417 |
+
if prec > BERNOULLI_PREC_CUTOFF and prec > bernoulli_size(n)*1.1 + 1000:
|
| 418 |
+
p, q = bernfrac(n)
|
| 419 |
+
return from_rational(p, q, prec, rnd or round_floor)
|
| 420 |
+
if n > MAX_BERNOULLI_CACHE:
|
| 421 |
+
return mpf_bernoulli_huge(n, prec, rnd)
|
| 422 |
+
wp = prec + 30
|
| 423 |
+
# Reuse nearby precisions
|
| 424 |
+
wp += 32 - (prec & 31)
|
| 425 |
+
cached = bernoulli_cache.get(wp)
|
| 426 |
+
if cached:
|
| 427 |
+
numbers, state = cached
|
| 428 |
+
if n in numbers:
|
| 429 |
+
if not rnd:
|
| 430 |
+
return numbers[n]
|
| 431 |
+
return mpf_pos(numbers[n], prec, rnd)
|
| 432 |
+
m, bin, bin1 = state
|
| 433 |
+
if n - m > 10:
|
| 434 |
+
return mpf_bernoulli_huge(n, prec, rnd)
|
| 435 |
+
else:
|
| 436 |
+
if n > 10:
|
| 437 |
+
return mpf_bernoulli_huge(n, prec, rnd)
|
| 438 |
+
numbers = {0:fone}
|
| 439 |
+
m, bin, bin1 = state = [2, MPZ(10), MPZ_ONE]
|
| 440 |
+
bernoulli_cache[wp] = (numbers, state)
|
| 441 |
+
while m <= n:
|
| 442 |
+
#print m
|
| 443 |
+
case = m % 6
|
| 444 |
+
# Accurately estimate size of B_m so we can use
|
| 445 |
+
# fixed point math without using too much precision
|
| 446 |
+
szbm = bernoulli_size(m)
|
| 447 |
+
s = 0
|
| 448 |
+
sexp = max(0, szbm) - wp
|
| 449 |
+
if m < 6:
|
| 450 |
+
a = MPZ_ZERO
|
| 451 |
+
else:
|
| 452 |
+
a = bin1
|
| 453 |
+
for j in xrange(1, m//6+1):
|
| 454 |
+
usign, uman, uexp, ubc = u = numbers[m-6*j]
|
| 455 |
+
if usign:
|
| 456 |
+
uman = -uman
|
| 457 |
+
s += lshift(a*uman, uexp-sexp)
|
| 458 |
+
# Update inner binomial coefficient
|
| 459 |
+
j6 = 6*j
|
| 460 |
+
a *= ((m-5-j6)*(m-4-j6)*(m-3-j6)*(m-2-j6)*(m-1-j6)*(m-j6))
|
| 461 |
+
a //= ((4+j6)*(5+j6)*(6+j6)*(7+j6)*(8+j6)*(9+j6))
|
| 462 |
+
if case == 0: b = mpf_rdiv_int(m+3, f3, wp)
|
| 463 |
+
if case == 2: b = mpf_rdiv_int(m+3, f3, wp)
|
| 464 |
+
if case == 4: b = mpf_rdiv_int(-m-3, f6, wp)
|
| 465 |
+
s = from_man_exp(s, sexp, wp)
|
| 466 |
+
b = mpf_div(mpf_sub(b, s, wp), from_int(bin), wp)
|
| 467 |
+
numbers[m] = b
|
| 468 |
+
m += 2
|
| 469 |
+
# Update outer binomial coefficient
|
| 470 |
+
bin = bin * ((m+2)*(m+3)) // (m*(m-1))
|
| 471 |
+
if m > 6:
|
| 472 |
+
bin1 = bin1 * ((2+m)*(3+m)) // ((m-7)*(m-6))
|
| 473 |
+
state[:] = [m, bin, bin1]
|
| 474 |
+
return numbers[n]
|
| 475 |
+
|
| 476 |
+
def mpf_bernoulli_huge(n, prec, rnd=None):
|
| 477 |
+
wp = prec + 10
|
| 478 |
+
piprec = wp + int(math.log(n,2))
|
| 479 |
+
v = mpf_gamma_int(n+1, wp)
|
| 480 |
+
v = mpf_mul(v, mpf_zeta_int(n, wp), wp)
|
| 481 |
+
v = mpf_mul(v, mpf_pow_int(mpf_pi(piprec), -n, wp))
|
| 482 |
+
v = mpf_shift(v, 1-n)
|
| 483 |
+
if not n & 3:
|
| 484 |
+
v = mpf_neg(v)
|
| 485 |
+
return mpf_pos(v, prec, rnd or round_fast)
|
| 486 |
+
|
| 487 |
+
def bernfrac(n):
|
| 488 |
+
r"""
|
| 489 |
+
Returns a tuple of integers `(p, q)` such that `p/q = B_n` exactly,
|
| 490 |
+
where `B_n` denotes the `n`-th Bernoulli number. The fraction is
|
| 491 |
+
always reduced to lowest terms. Note that for `n > 1` and `n` odd,
|
| 492 |
+
`B_n = 0`, and `(0, 1)` is returned.
|
| 493 |
+
|
| 494 |
+
**Examples**
|
| 495 |
+
|
| 496 |
+
The first few Bernoulli numbers are exactly::
|
| 497 |
+
|
| 498 |
+
>>> from mpmath import *
|
| 499 |
+
>>> for n in range(15):
|
| 500 |
+
... p, q = bernfrac(n)
|
| 501 |
+
... print("%s %s/%s" % (n, p, q))
|
| 502 |
+
...
|
| 503 |
+
0 1/1
|
| 504 |
+
1 -1/2
|
| 505 |
+
2 1/6
|
| 506 |
+
3 0/1
|
| 507 |
+
4 -1/30
|
| 508 |
+
5 0/1
|
| 509 |
+
6 1/42
|
| 510 |
+
7 0/1
|
| 511 |
+
8 -1/30
|
| 512 |
+
9 0/1
|
| 513 |
+
10 5/66
|
| 514 |
+
11 0/1
|
| 515 |
+
12 -691/2730
|
| 516 |
+
13 0/1
|
| 517 |
+
14 7/6
|
| 518 |
+
|
| 519 |
+
This function works for arbitrarily large `n`::
|
| 520 |
+
|
| 521 |
+
>>> p, q = bernfrac(10**4)
|
| 522 |
+
>>> print(q)
|
| 523 |
+
2338224387510
|
| 524 |
+
>>> print(len(str(p)))
|
| 525 |
+
27692
|
| 526 |
+
>>> mp.dps = 15
|
| 527 |
+
>>> print(mpf(p) / q)
|
| 528 |
+
-9.04942396360948e+27677
|
| 529 |
+
>>> print(bernoulli(10**4))
|
| 530 |
+
-9.04942396360948e+27677
|
| 531 |
+
|
| 532 |
+
.. note ::
|
| 533 |
+
|
| 534 |
+
:func:`~mpmath.bernoulli` computes a floating-point approximation
|
| 535 |
+
directly, without computing the exact fraction first.
|
| 536 |
+
This is much faster for large `n`.
|
| 537 |
+
|
| 538 |
+
**Algorithm**
|
| 539 |
+
|
| 540 |
+
:func:`~mpmath.bernfrac` works by computing the value of `B_n` numerically
|
| 541 |
+
and then using the von Staudt-Clausen theorem [1] to reconstruct
|
| 542 |
+
the exact fraction. For large `n`, this is significantly faster than
|
| 543 |
+
computing `B_1, B_2, \ldots, B_2` recursively with exact arithmetic.
|
| 544 |
+
The implementation has been tested for `n = 10^m` up to `m = 6`.
|
| 545 |
+
|
| 546 |
+
In practice, :func:`~mpmath.bernfrac` appears to be about three times
|
| 547 |
+
slower than the specialized program calcbn.exe [2]
|
| 548 |
+
|
| 549 |
+
**References**
|
| 550 |
+
|
| 551 |
+
1. MathWorld, von Staudt-Clausen Theorem:
|
| 552 |
+
http://mathworld.wolfram.com/vonStaudt-ClausenTheorem.html
|
| 553 |
+
|
| 554 |
+
2. The Bernoulli Number Page:
|
| 555 |
+
http://www.bernoulli.org/
|
| 556 |
+
|
| 557 |
+
"""
|
| 558 |
+
n = int(n)
|
| 559 |
+
if n < 3:
|
| 560 |
+
return [(1, 1), (-1, 2), (1, 6)][n]
|
| 561 |
+
if n & 1:
|
| 562 |
+
return (0, 1)
|
| 563 |
+
q = 1
|
| 564 |
+
for k in list_primes(n+1):
|
| 565 |
+
if not (n % (k-1)):
|
| 566 |
+
q *= k
|
| 567 |
+
prec = bernoulli_size(n) + int(math.log(q,2)) + 20
|
| 568 |
+
b = mpf_bernoulli(n, prec)
|
| 569 |
+
p = mpf_mul(b, from_int(q))
|
| 570 |
+
pint = to_int(p, round_nearest)
|
| 571 |
+
return (pint, q)
|
| 572 |
+
|
| 573 |
+
|
| 574 |
+
#-----------------------------------------------------------------------#
|
| 575 |
+
# #
|
| 576 |
+
# Polygamma functions #
|
| 577 |
+
# #
|
| 578 |
+
#-----------------------------------------------------------------------#
|
| 579 |
+
|
| 580 |
+
r"""
|
| 581 |
+
For all polygamma (psi) functions, we use the Euler-Maclaurin summation
|
| 582 |
+
formula. It looks slightly different in the m = 0 and m > 0 cases.
|
| 583 |
+
|
| 584 |
+
For m = 0, we have
|
| 585 |
+
oo
|
| 586 |
+
___ B
|
| 587 |
+
(0) 1 \ 2 k -2 k
|
| 588 |
+
psi (z) ~ log z + --- - ) ------ z
|
| 589 |
+
2 z /___ (2 k)!
|
| 590 |
+
k = 1
|
| 591 |
+
|
| 592 |
+
Experiment shows that the minimum term of the asymptotic series
|
| 593 |
+
reaches 2^(-p) when Re(z) > 0.11*p. So we simply use the recurrence
|
| 594 |
+
for psi (equivalent, in fact, to summing to the first few terms
|
| 595 |
+
directly before applying E-M) to obtain z large enough.
|
| 596 |
+
|
| 597 |
+
Since, very crudely, log z ~= 1 for Re(z) > 1, we can use
|
| 598 |
+
fixed-point arithmetic (if z is extremely large, log(z) itself
|
| 599 |
+
is a sufficient approximation, so we can stop there already).
|
| 600 |
+
|
| 601 |
+
For Re(z) << 0, we could use recurrence, but this is of course
|
| 602 |
+
inefficient for large negative z, so there we use the
|
| 603 |
+
reflection formula instead.
|
| 604 |
+
|
| 605 |
+
For m > 0, we have
|
| 606 |
+
|
| 607 |
+
N - 1
|
| 608 |
+
___
|
| 609 |
+
~~~(m) [ \ 1 ] 1 1
|
| 610 |
+
psi (z) ~ [ ) -------- ] + ---------- + -------- +
|
| 611 |
+
[ /___ m+1 ] m+1 m
|
| 612 |
+
k = 1 (z+k) ] 2 (z+N) m (z+N)
|
| 613 |
+
|
| 614 |
+
oo
|
| 615 |
+
___ B
|
| 616 |
+
\ 2 k (m+1) (m+2) ... (m+2k-1)
|
| 617 |
+
+ ) ------ ------------------------
|
| 618 |
+
/___ (2 k)! m + 2 k
|
| 619 |
+
k = 1 (z+N)
|
| 620 |
+
|
| 621 |
+
where ~~~ denotes the function rescaled by 1/((-1)^(m+1) m!).
|
| 622 |
+
|
| 623 |
+
Here again N is chosen to make z+N large enough for the minimum
|
| 624 |
+
term in the last series to become smaller than eps.
|
| 625 |
+
|
| 626 |
+
TODO: the current estimation of N for m > 0 is *very suboptimal*.
|
| 627 |
+
|
| 628 |
+
TODO: implement the reflection formula for m > 0, Re(z) << 0.
|
| 629 |
+
It is generally a combination of multiple cotangents. Need to
|
| 630 |
+
figure out a reasonably simple way to generate these formulas
|
| 631 |
+
on the fly.
|
| 632 |
+
|
| 633 |
+
TODO: maybe use exact algorithms to compute psi for integral
|
| 634 |
+
and certain rational arguments, as this can be much more
|
| 635 |
+
efficient. (On the other hand, the availability of these
|
| 636 |
+
special values provides a convenient way to test the general
|
| 637 |
+
algorithm.)
|
| 638 |
+
"""
|
| 639 |
+
|
| 640 |
+
# Harmonic numbers are just shifted digamma functions
|
| 641 |
+
# We should calculate these exactly when x is an integer
|
| 642 |
+
# and when doing so is faster.
|
| 643 |
+
|
| 644 |
+
def mpf_harmonic(x, prec, rnd):
|
| 645 |
+
if x in (fzero, fnan, finf):
|
| 646 |
+
return x
|
| 647 |
+
a = mpf_psi0(mpf_add(fone, x, prec+5), prec)
|
| 648 |
+
return mpf_add(a, mpf_euler(prec+5, rnd), prec, rnd)
|
| 649 |
+
|
| 650 |
+
def mpc_harmonic(z, prec, rnd):
|
| 651 |
+
if z[1] == fzero:
|
| 652 |
+
return (mpf_harmonic(z[0], prec, rnd), fzero)
|
| 653 |
+
a = mpc_psi0(mpc_add_mpf(z, fone, prec+5), prec)
|
| 654 |
+
return mpc_add_mpf(a, mpf_euler(prec+5, rnd), prec, rnd)
|
| 655 |
+
|
| 656 |
+
def mpf_psi0(x, prec, rnd=round_fast):
|
| 657 |
+
"""
|
| 658 |
+
Computation of the digamma function (psi function of order 0)
|
| 659 |
+
of a real argument.
|
| 660 |
+
"""
|
| 661 |
+
sign, man, exp, bc = x
|
| 662 |
+
wp = prec + 10
|
| 663 |
+
if not man:
|
| 664 |
+
if x == finf: return x
|
| 665 |
+
if x == fninf or x == fnan: return fnan
|
| 666 |
+
if x == fzero or (exp >= 0 and sign):
|
| 667 |
+
raise ValueError("polygamma pole")
|
| 668 |
+
# Near 0 -- fixed-point arithmetic becomes bad
|
| 669 |
+
if exp+bc < -5:
|
| 670 |
+
v = mpf_psi0(mpf_add(x, fone, prec, rnd), prec, rnd)
|
| 671 |
+
return mpf_sub(v, mpf_div(fone, x, wp, rnd), prec, rnd)
|
| 672 |
+
# Reflection formula
|
| 673 |
+
if sign and exp+bc > 3:
|
| 674 |
+
c, s = mpf_cos_sin_pi(x, wp)
|
| 675 |
+
q = mpf_mul(mpf_div(c, s, wp), mpf_pi(wp), wp)
|
| 676 |
+
p = mpf_psi0(mpf_sub(fone, x, wp), wp)
|
| 677 |
+
return mpf_sub(p, q, prec, rnd)
|
| 678 |
+
# The logarithmic term is accurate enough
|
| 679 |
+
if (not sign) and bc + exp > wp:
|
| 680 |
+
return mpf_log(mpf_sub(x, fone, wp), prec, rnd)
|
| 681 |
+
# Initial recurrence to obtain a large enough x
|
| 682 |
+
m = to_int(x)
|
| 683 |
+
n = int(0.11*wp) + 2
|
| 684 |
+
s = MPZ_ZERO
|
| 685 |
+
x = to_fixed(x, wp)
|
| 686 |
+
one = MPZ_ONE << wp
|
| 687 |
+
if m < n:
|
| 688 |
+
for k in xrange(m, n):
|
| 689 |
+
s -= (one << wp) // x
|
| 690 |
+
x += one
|
| 691 |
+
x -= one
|
| 692 |
+
# Logarithmic term
|
| 693 |
+
s += to_fixed(mpf_log(from_man_exp(x, -wp, wp), wp), wp)
|
| 694 |
+
# Endpoint term in Euler-Maclaurin expansion
|
| 695 |
+
s += (one << wp) // (2*x)
|
| 696 |
+
# Euler-Maclaurin remainder sum
|
| 697 |
+
x2 = (x*x) >> wp
|
| 698 |
+
t = one
|
| 699 |
+
prev = 0
|
| 700 |
+
k = 1
|
| 701 |
+
while 1:
|
| 702 |
+
t = (t*x2) >> wp
|
| 703 |
+
bsign, bman, bexp, bbc = mpf_bernoulli(2*k, wp)
|
| 704 |
+
offset = (bexp + 2*wp)
|
| 705 |
+
if offset >= 0: term = (bman << offset) // (t*(2*k))
|
| 706 |
+
else: term = (bman >> (-offset)) // (t*(2*k))
|
| 707 |
+
if k & 1: s -= term
|
| 708 |
+
else: s += term
|
| 709 |
+
if k > 2 and term >= prev:
|
| 710 |
+
break
|
| 711 |
+
prev = term
|
| 712 |
+
k += 1
|
| 713 |
+
return from_man_exp(s, -wp, wp, rnd)
|
| 714 |
+
|
| 715 |
+
def mpc_psi0(z, prec, rnd=round_fast):
|
| 716 |
+
"""
|
| 717 |
+
Computation of the digamma function (psi function of order 0)
|
| 718 |
+
of a complex argument.
|
| 719 |
+
"""
|
| 720 |
+
re, im = z
|
| 721 |
+
# Fall back to the real case
|
| 722 |
+
if im == fzero:
|
| 723 |
+
return (mpf_psi0(re, prec, rnd), fzero)
|
| 724 |
+
wp = prec + 20
|
| 725 |
+
sign, man, exp, bc = re
|
| 726 |
+
# Reflection formula
|
| 727 |
+
if sign and exp+bc > 3:
|
| 728 |
+
c = mpc_cos_pi(z, wp)
|
| 729 |
+
s = mpc_sin_pi(z, wp)
|
| 730 |
+
q = mpc_mul_mpf(mpc_div(c, s, wp), mpf_pi(wp), wp)
|
| 731 |
+
p = mpc_psi0(mpc_sub(mpc_one, z, wp), wp)
|
| 732 |
+
return mpc_sub(p, q, prec, rnd)
|
| 733 |
+
# Just the logarithmic term
|
| 734 |
+
if (not sign) and bc + exp > wp:
|
| 735 |
+
return mpc_log(mpc_sub(z, mpc_one, wp), prec, rnd)
|
| 736 |
+
# Initial recurrence to obtain a large enough z
|
| 737 |
+
w = to_int(re)
|
| 738 |
+
n = int(0.11*wp) + 2
|
| 739 |
+
s = mpc_zero
|
| 740 |
+
if w < n:
|
| 741 |
+
for k in xrange(w, n):
|
| 742 |
+
s = mpc_sub(s, mpc_reciprocal(z, wp), wp)
|
| 743 |
+
z = mpc_add_mpf(z, fone, wp)
|
| 744 |
+
z = mpc_sub(z, mpc_one, wp)
|
| 745 |
+
# Logarithmic and endpoint term
|
| 746 |
+
s = mpc_add(s, mpc_log(z, wp), wp)
|
| 747 |
+
s = mpc_add(s, mpc_div(mpc_half, z, wp), wp)
|
| 748 |
+
# Euler-Maclaurin remainder sum
|
| 749 |
+
z2 = mpc_square(z, wp)
|
| 750 |
+
t = mpc_one
|
| 751 |
+
prev = mpc_zero
|
| 752 |
+
szprev = fzero
|
| 753 |
+
k = 1
|
| 754 |
+
eps = mpf_shift(fone, -wp+2)
|
| 755 |
+
while 1:
|
| 756 |
+
t = mpc_mul(t, z2, wp)
|
| 757 |
+
bern = mpf_bernoulli(2*k, wp)
|
| 758 |
+
term = mpc_mpf_div(bern, mpc_mul_int(t, 2*k, wp), wp)
|
| 759 |
+
s = mpc_sub(s, term, wp)
|
| 760 |
+
szterm = mpc_abs(term, 10)
|
| 761 |
+
if k > 2 and (mpf_le(szterm, eps) or mpf_le(szprev, szterm)):
|
| 762 |
+
break
|
| 763 |
+
prev = term
|
| 764 |
+
szprev = szterm
|
| 765 |
+
k += 1
|
| 766 |
+
return s
|
| 767 |
+
|
| 768 |
+
# Currently unoptimized
|
| 769 |
+
def mpf_psi(m, x, prec, rnd=round_fast):
|
| 770 |
+
"""
|
| 771 |
+
Computation of the polygamma function of arbitrary integer order
|
| 772 |
+
m >= 0, for a real argument x.
|
| 773 |
+
"""
|
| 774 |
+
if m == 0:
|
| 775 |
+
return mpf_psi0(x, prec, rnd=round_fast)
|
| 776 |
+
return mpc_psi(m, (x, fzero), prec, rnd)[0]
|
| 777 |
+
|
| 778 |
+
def mpc_psi(m, z, prec, rnd=round_fast):
|
| 779 |
+
"""
|
| 780 |
+
Computation of the polygamma function of arbitrary integer order
|
| 781 |
+
m >= 0, for a complex argument z.
|
| 782 |
+
"""
|
| 783 |
+
if m == 0:
|
| 784 |
+
return mpc_psi0(z, prec, rnd)
|
| 785 |
+
re, im = z
|
| 786 |
+
wp = prec + 20
|
| 787 |
+
sign, man, exp, bc = re
|
| 788 |
+
if not im[1]:
|
| 789 |
+
if im in (finf, fninf, fnan):
|
| 790 |
+
return (fnan, fnan)
|
| 791 |
+
if not man:
|
| 792 |
+
if re == finf and im == fzero:
|
| 793 |
+
return (fzero, fzero)
|
| 794 |
+
if re == fnan:
|
| 795 |
+
return (fnan, fnan)
|
| 796 |
+
# Recurrence
|
| 797 |
+
w = to_int(re)
|
| 798 |
+
n = int(0.4*wp + 4*m)
|
| 799 |
+
s = mpc_zero
|
| 800 |
+
if w < n:
|
| 801 |
+
for k in xrange(w, n):
|
| 802 |
+
t = mpc_pow_int(z, -m-1, wp)
|
| 803 |
+
s = mpc_add(s, t, wp)
|
| 804 |
+
z = mpc_add_mpf(z, fone, wp)
|
| 805 |
+
zm = mpc_pow_int(z, -m, wp)
|
| 806 |
+
z2 = mpc_pow_int(z, -2, wp)
|
| 807 |
+
# 1/m*(z+N)^m
|
| 808 |
+
integral_term = mpc_div_mpf(zm, from_int(m), wp)
|
| 809 |
+
s = mpc_add(s, integral_term, wp)
|
| 810 |
+
# 1/2*(z+N)^(-(m+1))
|
| 811 |
+
s = mpc_add(s, mpc_mul_mpf(mpc_div(zm, z, wp), fhalf, wp), wp)
|
| 812 |
+
a = m + 1
|
| 813 |
+
b = 2
|
| 814 |
+
k = 1
|
| 815 |
+
# Important: we want to sum up to the *relative* error,
|
| 816 |
+
# not the absolute error, because psi^(m)(z) might be tiny
|
| 817 |
+
magn = mpc_abs(s, 10)
|
| 818 |
+
magn = magn[2]+magn[3]
|
| 819 |
+
eps = mpf_shift(fone, magn-wp+2)
|
| 820 |
+
while 1:
|
| 821 |
+
zm = mpc_mul(zm, z2, wp)
|
| 822 |
+
bern = mpf_bernoulli(2*k, wp)
|
| 823 |
+
scal = mpf_mul_int(bern, a, wp)
|
| 824 |
+
scal = mpf_div(scal, from_int(b), wp)
|
| 825 |
+
term = mpc_mul_mpf(zm, scal, wp)
|
| 826 |
+
s = mpc_add(s, term, wp)
|
| 827 |
+
szterm = mpc_abs(term, 10)
|
| 828 |
+
if k > 2 and mpf_le(szterm, eps):
|
| 829 |
+
break
|
| 830 |
+
#print k, to_str(szterm, 10), to_str(eps, 10)
|
| 831 |
+
a *= (m+2*k)*(m+2*k+1)
|
| 832 |
+
b *= (2*k+1)*(2*k+2)
|
| 833 |
+
k += 1
|
| 834 |
+
# Scale and sign factor
|
| 835 |
+
v = mpc_mul_mpf(s, mpf_gamma(from_int(m+1), wp), prec, rnd)
|
| 836 |
+
if not (m & 1):
|
| 837 |
+
v = mpf_neg(v[0]), mpf_neg(v[1])
|
| 838 |
+
return v
|
| 839 |
+
|
| 840 |
+
|
| 841 |
+
#-----------------------------------------------------------------------#
|
| 842 |
+
# #
|
| 843 |
+
# Riemann zeta function #
|
| 844 |
+
# #
|
| 845 |
+
#-----------------------------------------------------------------------#
|
| 846 |
+
|
| 847 |
+
r"""
|
| 848 |
+
We use zeta(s) = eta(s) / (1 - 2**(1-s)) and Borwein's approximation
|
| 849 |
+
|
| 850 |
+
n-1
|
| 851 |
+
___ k
|
| 852 |
+
-1 \ (-1) (d_k - d_n)
|
| 853 |
+
eta(s) ~= ---- ) ------------------
|
| 854 |
+
d_n /___ s
|
| 855 |
+
k = 0 (k + 1)
|
| 856 |
+
where
|
| 857 |
+
k
|
| 858 |
+
___ i
|
| 859 |
+
\ (n + i - 1)! 4
|
| 860 |
+
d_k = n ) ---------------.
|
| 861 |
+
/___ (n - i)! (2i)!
|
| 862 |
+
i = 0
|
| 863 |
+
|
| 864 |
+
If s = a + b*I, the absolute error for eta(s) is bounded by
|
| 865 |
+
|
| 866 |
+
3 (1 + 2|b|)
|
| 867 |
+
------------ * exp(|b| pi/2)
|
| 868 |
+
n
|
| 869 |
+
(3+sqrt(8))
|
| 870 |
+
|
| 871 |
+
Disregarding the linear term, we have approximately,
|
| 872 |
+
|
| 873 |
+
log(err) ~= log(exp(1.58*|b|)) - log(5.8**n)
|
| 874 |
+
log(err) ~= 1.58*|b| - log(5.8)*n
|
| 875 |
+
log(err) ~= 1.58*|b| - 1.76*n
|
| 876 |
+
log2(err) ~= 2.28*|b| - 2.54*n
|
| 877 |
+
|
| 878 |
+
So for p bits, we should choose n > (p + 2.28*|b|) / 2.54.
|
| 879 |
+
|
| 880 |
+
References:
|
| 881 |
+
-----------
|
| 882 |
+
|
| 883 |
+
Peter Borwein, "An Efficient Algorithm for the Riemann Zeta Function"
|
| 884 |
+
http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P117.ps
|
| 885 |
+
|
| 886 |
+
http://en.wikipedia.org/wiki/Dirichlet_eta_function
|
| 887 |
+
"""
|
| 888 |
+
|
| 889 |
+
borwein_cache = {}
|
| 890 |
+
|
| 891 |
+
def borwein_coefficients(n):
|
| 892 |
+
if n in borwein_cache:
|
| 893 |
+
return borwein_cache[n]
|
| 894 |
+
ds = [MPZ_ZERO] * (n+1)
|
| 895 |
+
d = MPZ_ONE
|
| 896 |
+
s = ds[0] = MPZ_ONE
|
| 897 |
+
for i in range(1, n+1):
|
| 898 |
+
d = d * 4 * (n+i-1) * (n-i+1)
|
| 899 |
+
d //= ((2*i) * ((2*i)-1))
|
| 900 |
+
s += d
|
| 901 |
+
ds[i] = s
|
| 902 |
+
borwein_cache[n] = ds
|
| 903 |
+
return ds
|
| 904 |
+
|
| 905 |
+
ZETA_INT_CACHE_MAX_PREC = 1000
|
| 906 |
+
zeta_int_cache = {}
|
| 907 |
+
|
| 908 |
+
def mpf_zeta_int(s, prec, rnd=round_fast):
|
| 909 |
+
"""
|
| 910 |
+
Optimized computation of zeta(s) for an integer s.
|
| 911 |
+
"""
|
| 912 |
+
wp = prec + 20
|
| 913 |
+
s = int(s)
|
| 914 |
+
if s in zeta_int_cache and zeta_int_cache[s][0] >= wp:
|
| 915 |
+
return mpf_pos(zeta_int_cache[s][1], prec, rnd)
|
| 916 |
+
if s < 2:
|
| 917 |
+
if s == 1:
|
| 918 |
+
raise ValueError("zeta(1) pole")
|
| 919 |
+
if not s:
|
| 920 |
+
return mpf_neg(fhalf)
|
| 921 |
+
return mpf_div(mpf_bernoulli(-s+1, wp), from_int(s-1), prec, rnd)
|
| 922 |
+
# 2^-s term vanishes?
|
| 923 |
+
if s >= wp:
|
| 924 |
+
return mpf_perturb(fone, 0, prec, rnd)
|
| 925 |
+
# 5^-s term vanishes?
|
| 926 |
+
elif s >= wp*0.431:
|
| 927 |
+
t = one = 1 << wp
|
| 928 |
+
t += 1 << (wp - s)
|
| 929 |
+
t += one // (MPZ_THREE ** s)
|
| 930 |
+
t += 1 << max(0, wp - s*2)
|
| 931 |
+
return from_man_exp(t, -wp, prec, rnd)
|
| 932 |
+
else:
|
| 933 |
+
# Fast enough to sum directly?
|
| 934 |
+
# Even better, we use the Euler product (idea stolen from pari)
|
| 935 |
+
m = (float(wp)/(s-1) + 1)
|
| 936 |
+
if m < 30:
|
| 937 |
+
needed_terms = int(2.0**m + 1)
|
| 938 |
+
if needed_terms < int(wp/2.54 + 5) / 10:
|
| 939 |
+
t = fone
|
| 940 |
+
for k in list_primes(needed_terms):
|
| 941 |
+
#print k, needed_terms
|
| 942 |
+
powprec = int(wp - s*math.log(k,2))
|
| 943 |
+
if powprec < 2:
|
| 944 |
+
break
|
| 945 |
+
a = mpf_sub(fone, mpf_pow_int(from_int(k), -s, powprec), wp)
|
| 946 |
+
t = mpf_mul(t, a, wp)
|
| 947 |
+
return mpf_div(fone, t, wp)
|
| 948 |
+
# Use Borwein's algorithm
|
| 949 |
+
n = int(wp/2.54 + 5)
|
| 950 |
+
d = borwein_coefficients(n)
|
| 951 |
+
t = MPZ_ZERO
|
| 952 |
+
s = MPZ(s)
|
| 953 |
+
for k in xrange(n):
|
| 954 |
+
t += (((-1)**k * (d[k] - d[n])) << wp) // (k+1)**s
|
| 955 |
+
t = (t << wp) // (-d[n])
|
| 956 |
+
t = (t << wp) // ((1 << wp) - (1 << (wp+1-s)))
|
| 957 |
+
if (s in zeta_int_cache and zeta_int_cache[s][0] < wp) or (s not in zeta_int_cache):
|
| 958 |
+
zeta_int_cache[s] = (wp, from_man_exp(t, -wp-wp))
|
| 959 |
+
return from_man_exp(t, -wp-wp, prec, rnd)
|
| 960 |
+
|
| 961 |
+
def mpf_zeta(s, prec, rnd=round_fast, alt=0):
|
| 962 |
+
sign, man, exp, bc = s
|
| 963 |
+
if not man:
|
| 964 |
+
if s == fzero:
|
| 965 |
+
if alt:
|
| 966 |
+
return fhalf
|
| 967 |
+
else:
|
| 968 |
+
return mpf_neg(fhalf)
|
| 969 |
+
if s == finf:
|
| 970 |
+
return fone
|
| 971 |
+
return fnan
|
| 972 |
+
wp = prec + 20
|
| 973 |
+
# First term vanishes?
|
| 974 |
+
if (not sign) and (exp + bc > (math.log(wp,2) + 2)):
|
| 975 |
+
return mpf_perturb(fone, alt, prec, rnd)
|
| 976 |
+
# Optimize for integer arguments
|
| 977 |
+
elif exp >= 0:
|
| 978 |
+
if alt:
|
| 979 |
+
if s == fone:
|
| 980 |
+
return mpf_ln2(prec, rnd)
|
| 981 |
+
z = mpf_zeta_int(to_int(s), wp, negative_rnd[rnd])
|
| 982 |
+
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
|
| 983 |
+
return mpf_mul(z, q, prec, rnd)
|
| 984 |
+
else:
|
| 985 |
+
return mpf_zeta_int(to_int(s), prec, rnd)
|
| 986 |
+
# Negative: use the reflection formula
|
| 987 |
+
# Borwein only proves the accuracy bound for x >= 1/2. However, based on
|
| 988 |
+
# tests, the accuracy without reflection is quite good even some distance
|
| 989 |
+
# to the left of 1/2. XXX: verify this.
|
| 990 |
+
if sign:
|
| 991 |
+
# XXX: could use the separate refl. formula for Dirichlet eta
|
| 992 |
+
if alt:
|
| 993 |
+
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
|
| 994 |
+
return mpf_mul(mpf_zeta(s, wp), q, prec, rnd)
|
| 995 |
+
# XXX: -1 should be done exactly
|
| 996 |
+
y = mpf_sub(fone, s, 10*wp)
|
| 997 |
+
a = mpf_gamma(y, wp)
|
| 998 |
+
b = mpf_zeta(y, wp)
|
| 999 |
+
c = mpf_sin_pi(mpf_shift(s, -1), wp)
|
| 1000 |
+
wp2 = wp + max(0,exp+bc)
|
| 1001 |
+
pi = mpf_pi(wp+wp2)
|
| 1002 |
+
d = mpf_div(mpf_pow(mpf_shift(pi, 1), s, wp2), pi, wp2)
|
| 1003 |
+
return mpf_mul(a,mpf_mul(b,mpf_mul(c,d,wp),wp),prec,rnd)
|
| 1004 |
+
|
| 1005 |
+
# Near pole
|
| 1006 |
+
r = mpf_sub(fone, s, wp)
|
| 1007 |
+
asign, aman, aexp, abc = mpf_abs(r)
|
| 1008 |
+
pole_dist = -2*(aexp+abc)
|
| 1009 |
+
if pole_dist > wp:
|
| 1010 |
+
if alt:
|
| 1011 |
+
return mpf_ln2(prec, rnd)
|
| 1012 |
+
else:
|
| 1013 |
+
q = mpf_neg(mpf_div(fone, r, wp))
|
| 1014 |
+
return mpf_add(q, mpf_euler(wp), prec, rnd)
|
| 1015 |
+
else:
|
| 1016 |
+
wp += max(0, pole_dist)
|
| 1017 |
+
|
| 1018 |
+
t = MPZ_ZERO
|
| 1019 |
+
#wp += 16 - (prec & 15)
|
| 1020 |
+
# Use Borwein's algorithm
|
| 1021 |
+
n = int(wp/2.54 + 5)
|
| 1022 |
+
d = borwein_coefficients(n)
|
| 1023 |
+
t = MPZ_ZERO
|
| 1024 |
+
sf = to_fixed(s, wp)
|
| 1025 |
+
ln2 = ln2_fixed(wp)
|
| 1026 |
+
for k in xrange(n):
|
| 1027 |
+
u = (-sf*log_int_fixed(k+1, wp, ln2)) >> wp
|
| 1028 |
+
#esign, eman, eexp, ebc = mpf_exp(u, wp)
|
| 1029 |
+
#offset = eexp + wp
|
| 1030 |
+
#if offset >= 0:
|
| 1031 |
+
# w = ((d[k] - d[n]) * eman) << offset
|
| 1032 |
+
#else:
|
| 1033 |
+
# w = ((d[k] - d[n]) * eman) >> (-offset)
|
| 1034 |
+
eman = exp_fixed(u, wp, ln2)
|
| 1035 |
+
w = (d[k] - d[n]) * eman
|
| 1036 |
+
if k & 1:
|
| 1037 |
+
t -= w
|
| 1038 |
+
else:
|
| 1039 |
+
t += w
|
| 1040 |
+
t = t // (-d[n])
|
| 1041 |
+
t = from_man_exp(t, -wp, wp)
|
| 1042 |
+
if alt:
|
| 1043 |
+
return mpf_pos(t, prec, rnd)
|
| 1044 |
+
else:
|
| 1045 |
+
q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp)
|
| 1046 |
+
return mpf_div(t, q, prec, rnd)
|
| 1047 |
+
|
| 1048 |
+
def mpc_zeta(s, prec, rnd=round_fast, alt=0, force=False):
|
| 1049 |
+
re, im = s
|
| 1050 |
+
if im == fzero:
|
| 1051 |
+
return mpf_zeta(re, prec, rnd, alt), fzero
|
| 1052 |
+
|
| 1053 |
+
# slow for large s
|
| 1054 |
+
if (not force) and mpf_gt(mpc_abs(s, 10), from_int(prec)):
|
| 1055 |
+
raise NotImplementedError
|
| 1056 |
+
|
| 1057 |
+
wp = prec + 20
|
| 1058 |
+
|
| 1059 |
+
# Near pole
|
| 1060 |
+
r = mpc_sub(mpc_one, s, wp)
|
| 1061 |
+
asign, aman, aexp, abc = mpc_abs(r, 10)
|
| 1062 |
+
pole_dist = -2*(aexp+abc)
|
| 1063 |
+
if pole_dist > wp:
|
| 1064 |
+
if alt:
|
| 1065 |
+
q = mpf_ln2(wp)
|
| 1066 |
+
y = mpf_mul(q, mpf_euler(wp), wp)
|
| 1067 |
+
g = mpf_shift(mpf_mul(q, q, wp), -1)
|
| 1068 |
+
g = mpf_sub(y, g)
|
| 1069 |
+
z = mpc_mul_mpf(r, mpf_neg(g), wp)
|
| 1070 |
+
z = mpc_add_mpf(z, q, wp)
|
| 1071 |
+
return mpc_pos(z, prec, rnd)
|
| 1072 |
+
else:
|
| 1073 |
+
q = mpc_neg(mpc_div(mpc_one, r, wp))
|
| 1074 |
+
q = mpc_add_mpf(q, mpf_euler(wp), wp)
|
| 1075 |
+
return mpc_pos(q, prec, rnd)
|
| 1076 |
+
else:
|
| 1077 |
+
wp += max(0, pole_dist)
|
| 1078 |
+
|
| 1079 |
+
# Reflection formula. To be rigorous, we should reflect to the left of
|
| 1080 |
+
# re = 1/2 (see comments for mpf_zeta), but this leads to unnecessary
|
| 1081 |
+
# slowdown for interesting values of s
|
| 1082 |
+
if mpf_lt(re, fzero):
|
| 1083 |
+
# XXX: could use the separate refl. formula for Dirichlet eta
|
| 1084 |
+
if alt:
|
| 1085 |
+
q = mpc_sub(mpc_one, mpc_pow(mpc_two, mpc_sub(mpc_one, s, wp),
|
| 1086 |
+
wp), wp)
|
| 1087 |
+
return mpc_mul(mpc_zeta(s, wp), q, prec, rnd)
|
| 1088 |
+
# XXX: -1 should be done exactly
|
| 1089 |
+
y = mpc_sub(mpc_one, s, 10*wp)
|
| 1090 |
+
a = mpc_gamma(y, wp)
|
| 1091 |
+
b = mpc_zeta(y, wp)
|
| 1092 |
+
c = mpc_sin_pi(mpc_shift(s, -1), wp)
|
| 1093 |
+
rsign, rman, rexp, rbc = re
|
| 1094 |
+
isign, iman, iexp, ibc = im
|
| 1095 |
+
mag = max(rexp+rbc, iexp+ibc)
|
| 1096 |
+
wp2 = wp + max(0, mag)
|
| 1097 |
+
pi = mpf_pi(wp+wp2)
|
| 1098 |
+
pi2 = (mpf_shift(pi, 1), fzero)
|
| 1099 |
+
d = mpc_div_mpf(mpc_pow(pi2, s, wp2), pi, wp2)
|
| 1100 |
+
return mpc_mul(a,mpc_mul(b,mpc_mul(c,d,wp),wp),prec,rnd)
|
| 1101 |
+
n = int(wp/2.54 + 5)
|
| 1102 |
+
n += int(0.9*abs(to_int(im)))
|
| 1103 |
+
d = borwein_coefficients(n)
|
| 1104 |
+
ref = to_fixed(re, wp)
|
| 1105 |
+
imf = to_fixed(im, wp)
|
| 1106 |
+
tre = MPZ_ZERO
|
| 1107 |
+
tim = MPZ_ZERO
|
| 1108 |
+
one = MPZ_ONE << wp
|
| 1109 |
+
one_2wp = MPZ_ONE << (2*wp)
|
| 1110 |
+
critical_line = re == fhalf
|
| 1111 |
+
ln2 = ln2_fixed(wp)
|
| 1112 |
+
pi2 = pi_fixed(wp-1)
|
| 1113 |
+
wp2 = wp+wp
|
| 1114 |
+
for k in xrange(n):
|
| 1115 |
+
log = log_int_fixed(k+1, wp, ln2)
|
| 1116 |
+
# A square root is much cheaper than an exp
|
| 1117 |
+
if critical_line:
|
| 1118 |
+
w = one_2wp // isqrt_fast((k+1) << wp2)
|
| 1119 |
+
else:
|
| 1120 |
+
w = exp_fixed((-ref*log) >> wp, wp)
|
| 1121 |
+
if k & 1:
|
| 1122 |
+
w *= (d[n] - d[k])
|
| 1123 |
+
else:
|
| 1124 |
+
w *= (d[k] - d[n])
|
| 1125 |
+
wre, wim = cos_sin_fixed((-imf*log)>>wp, wp, pi2)
|
| 1126 |
+
tre += (w * wre) >> wp
|
| 1127 |
+
tim += (w * wim) >> wp
|
| 1128 |
+
tre //= (-d[n])
|
| 1129 |
+
tim //= (-d[n])
|
| 1130 |
+
tre = from_man_exp(tre, -wp, wp)
|
| 1131 |
+
tim = from_man_exp(tim, -wp, wp)
|
| 1132 |
+
if alt:
|
| 1133 |
+
return mpc_pos((tre, tim), prec, rnd)
|
| 1134 |
+
else:
|
| 1135 |
+
q = mpc_sub(mpc_one, mpc_pow(mpc_two, r, wp), wp)
|
| 1136 |
+
return mpc_div((tre, tim), q, prec, rnd)
|
| 1137 |
+
|
| 1138 |
+
def mpf_altzeta(s, prec, rnd=round_fast):
|
| 1139 |
+
return mpf_zeta(s, prec, rnd, 1)
|
| 1140 |
+
|
| 1141 |
+
def mpc_altzeta(s, prec, rnd=round_fast):
|
| 1142 |
+
return mpc_zeta(s, prec, rnd, 1)
|
| 1143 |
+
|
| 1144 |
+
# Not optimized currently
|
| 1145 |
+
mpf_zetasum = None
|
| 1146 |
+
|
| 1147 |
+
|
| 1148 |
+
def pow_fixed(x, n, wp):
|
| 1149 |
+
if n == 1:
|
| 1150 |
+
return x
|
| 1151 |
+
y = MPZ_ONE << wp
|
| 1152 |
+
while n:
|
| 1153 |
+
if n & 1:
|
| 1154 |
+
y = (y*x) >> wp
|
| 1155 |
+
n -= 1
|
| 1156 |
+
x = (x*x) >> wp
|
| 1157 |
+
n //= 2
|
| 1158 |
+
return y
|
| 1159 |
+
|
| 1160 |
+
# TODO: optimize / cleanup interface / unify with list_primes
|
| 1161 |
+
sieve_cache = []
|
| 1162 |
+
primes_cache = []
|
| 1163 |
+
mult_cache = []
|
| 1164 |
+
|
| 1165 |
+
def primesieve(n):
|
| 1166 |
+
global sieve_cache, primes_cache, mult_cache
|
| 1167 |
+
if n < len(sieve_cache):
|
| 1168 |
+
sieve = sieve_cache#[:n+1]
|
| 1169 |
+
primes = primes_cache[:primes_cache.index(max(sieve))+1]
|
| 1170 |
+
mult = mult_cache#[:n+1]
|
| 1171 |
+
return sieve, primes, mult
|
| 1172 |
+
sieve = [0] * (n+1)
|
| 1173 |
+
mult = [0] * (n+1)
|
| 1174 |
+
primes = list_primes(n)
|
| 1175 |
+
for p in primes:
|
| 1176 |
+
#sieve[p::p] = p
|
| 1177 |
+
for k in xrange(p,n+1,p):
|
| 1178 |
+
sieve[k] = p
|
| 1179 |
+
for i, p in enumerate(sieve):
|
| 1180 |
+
if i >= 2:
|
| 1181 |
+
m = 1
|
| 1182 |
+
n = i // p
|
| 1183 |
+
while not n % p:
|
| 1184 |
+
n //= p
|
| 1185 |
+
m += 1
|
| 1186 |
+
mult[i] = m
|
| 1187 |
+
sieve_cache = sieve
|
| 1188 |
+
primes_cache = primes
|
| 1189 |
+
mult_cache = mult
|
| 1190 |
+
return sieve, primes, mult
|
| 1191 |
+
|
| 1192 |
+
def zetasum_sieved(critical_line, sre, sim, a, n, wp):
|
| 1193 |
+
if a < 1:
|
| 1194 |
+
raise ValueError("a cannot be less than 1")
|
| 1195 |
+
sieve, primes, mult = primesieve(a+n)
|
| 1196 |
+
basic_powers = {}
|
| 1197 |
+
one = MPZ_ONE << wp
|
| 1198 |
+
one_2wp = MPZ_ONE << (2*wp)
|
| 1199 |
+
wp2 = wp+wp
|
| 1200 |
+
ln2 = ln2_fixed(wp)
|
| 1201 |
+
pi2 = pi_fixed(wp-1)
|
| 1202 |
+
for p in primes:
|
| 1203 |
+
if p*2 > a+n:
|
| 1204 |
+
break
|
| 1205 |
+
log = log_int_fixed(p, wp, ln2)
|
| 1206 |
+
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
|
| 1207 |
+
if critical_line:
|
| 1208 |
+
u = one_2wp // isqrt_fast(p<<wp2)
|
| 1209 |
+
else:
|
| 1210 |
+
u = exp_fixed((-sre*log)>>wp, wp)
|
| 1211 |
+
pre = (u*cos) >> wp
|
| 1212 |
+
pim = (u*sin) >> wp
|
| 1213 |
+
basic_powers[p] = [(pre, pim)]
|
| 1214 |
+
tre, tim = pre, pim
|
| 1215 |
+
for m in range(1,int(math.log(a+n,p)+0.01)+1):
|
| 1216 |
+
tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
|
| 1217 |
+
basic_powers[p].append((tre,tim))
|
| 1218 |
+
xre = MPZ_ZERO
|
| 1219 |
+
xim = MPZ_ZERO
|
| 1220 |
+
if a == 1:
|
| 1221 |
+
xre += one
|
| 1222 |
+
aa = max(a,2)
|
| 1223 |
+
for k in xrange(aa, a+n+1):
|
| 1224 |
+
p = sieve[k]
|
| 1225 |
+
if p in basic_powers:
|
| 1226 |
+
m = mult[k]
|
| 1227 |
+
tre, tim = basic_powers[p][m-1]
|
| 1228 |
+
while 1:
|
| 1229 |
+
k //= p**m
|
| 1230 |
+
if k == 1:
|
| 1231 |
+
break
|
| 1232 |
+
p = sieve[k]
|
| 1233 |
+
m = mult[k]
|
| 1234 |
+
pre, pim = basic_powers[p][m-1]
|
| 1235 |
+
tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp)
|
| 1236 |
+
else:
|
| 1237 |
+
log = log_int_fixed(k, wp, ln2)
|
| 1238 |
+
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
|
| 1239 |
+
if critical_line:
|
| 1240 |
+
u = one_2wp // isqrt_fast(k<<wp2)
|
| 1241 |
+
else:
|
| 1242 |
+
u = exp_fixed((-sre*log)>>wp, wp)
|
| 1243 |
+
tre = (u*cos) >> wp
|
| 1244 |
+
tim = (u*sin) >> wp
|
| 1245 |
+
xre += tre
|
| 1246 |
+
xim += tim
|
| 1247 |
+
return xre, xim
|
| 1248 |
+
|
| 1249 |
+
# Set to something large to disable
|
| 1250 |
+
ZETASUM_SIEVE_CUTOFF = 10
|
| 1251 |
+
|
| 1252 |
+
def mpc_zetasum(s, a, n, derivatives, reflect, prec):
|
| 1253 |
+
"""
|
| 1254 |
+
Fast version of mp._zetasum, assuming s = complex, a = integer.
|
| 1255 |
+
"""
|
| 1256 |
+
|
| 1257 |
+
wp = prec + 10
|
| 1258 |
+
derivatives = list(derivatives)
|
| 1259 |
+
have_derivatives = derivatives != [0]
|
| 1260 |
+
have_one_derivative = len(derivatives) == 1
|
| 1261 |
+
|
| 1262 |
+
# parse s
|
| 1263 |
+
sre, sim = s
|
| 1264 |
+
critical_line = (sre == fhalf)
|
| 1265 |
+
sre = to_fixed(sre, wp)
|
| 1266 |
+
sim = to_fixed(sim, wp)
|
| 1267 |
+
|
| 1268 |
+
if a > 0 and n > ZETASUM_SIEVE_CUTOFF and not have_derivatives \
|
| 1269 |
+
and not reflect and (n < 4e7 or sys.maxsize > 2**32):
|
| 1270 |
+
re, im = zetasum_sieved(critical_line, sre, sim, a, n, wp)
|
| 1271 |
+
xs = [(from_man_exp(re, -wp, prec, 'n'), from_man_exp(im, -wp, prec, 'n'))]
|
| 1272 |
+
return xs, []
|
| 1273 |
+
|
| 1274 |
+
maxd = max(derivatives)
|
| 1275 |
+
if not have_one_derivative:
|
| 1276 |
+
derivatives = range(maxd+1)
|
| 1277 |
+
|
| 1278 |
+
# x_d = 0, y_d = 0
|
| 1279 |
+
xre = [MPZ_ZERO for d in derivatives]
|
| 1280 |
+
xim = [MPZ_ZERO for d in derivatives]
|
| 1281 |
+
if reflect:
|
| 1282 |
+
yre = [MPZ_ZERO for d in derivatives]
|
| 1283 |
+
yim = [MPZ_ZERO for d in derivatives]
|
| 1284 |
+
else:
|
| 1285 |
+
yre = yim = []
|
| 1286 |
+
|
| 1287 |
+
one = MPZ_ONE << wp
|
| 1288 |
+
one_2wp = MPZ_ONE << (2*wp)
|
| 1289 |
+
|
| 1290 |
+
ln2 = ln2_fixed(wp)
|
| 1291 |
+
pi2 = pi_fixed(wp-1)
|
| 1292 |
+
wp2 = wp+wp
|
| 1293 |
+
|
| 1294 |
+
for w in xrange(a, a+n+1):
|
| 1295 |
+
log = log_int_fixed(w, wp, ln2)
|
| 1296 |
+
cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2)
|
| 1297 |
+
if critical_line:
|
| 1298 |
+
u = one_2wp // isqrt_fast(w<<wp2)
|
| 1299 |
+
else:
|
| 1300 |
+
u = exp_fixed((-sre*log)>>wp, wp)
|
| 1301 |
+
xterm_re = (u * cos) >> wp
|
| 1302 |
+
xterm_im = (u * sin) >> wp
|
| 1303 |
+
if reflect:
|
| 1304 |
+
reciprocal = (one_2wp // (u*w))
|
| 1305 |
+
yterm_re = (reciprocal * cos) >> wp
|
| 1306 |
+
yterm_im = (reciprocal * sin) >> wp
|
| 1307 |
+
|
| 1308 |
+
if have_derivatives:
|
| 1309 |
+
if have_one_derivative:
|
| 1310 |
+
log = pow_fixed(log, maxd, wp)
|
| 1311 |
+
xre[0] += (xterm_re * log) >> wp
|
| 1312 |
+
xim[0] += (xterm_im * log) >> wp
|
| 1313 |
+
if reflect:
|
| 1314 |
+
yre[0] += (yterm_re * log) >> wp
|
| 1315 |
+
yim[0] += (yterm_im * log) >> wp
|
| 1316 |
+
else:
|
| 1317 |
+
t = MPZ_ONE << wp
|
| 1318 |
+
for d in derivatives:
|
| 1319 |
+
xre[d] += (xterm_re * t) >> wp
|
| 1320 |
+
xim[d] += (xterm_im * t) >> wp
|
| 1321 |
+
if reflect:
|
| 1322 |
+
yre[d] += (yterm_re * t) >> wp
|
| 1323 |
+
yim[d] += (yterm_im * t) >> wp
|
| 1324 |
+
t = (t * log) >> wp
|
| 1325 |
+
else:
|
| 1326 |
+
xre[0] += xterm_re
|
| 1327 |
+
xim[0] += xterm_im
|
| 1328 |
+
if reflect:
|
| 1329 |
+
yre[0] += yterm_re
|
| 1330 |
+
yim[0] += yterm_im
|
| 1331 |
+
if have_derivatives:
|
| 1332 |
+
if have_one_derivative:
|
| 1333 |
+
if maxd % 2:
|
| 1334 |
+
xre[0] = -xre[0]
|
| 1335 |
+
xim[0] = -xim[0]
|
| 1336 |
+
if reflect:
|
| 1337 |
+
yre[0] = -yre[0]
|
| 1338 |
+
yim[0] = -yim[0]
|
| 1339 |
+
else:
|
| 1340 |
+
xre = [(-1)**d * xre[d] for d in derivatives]
|
| 1341 |
+
xim = [(-1)**d * xim[d] for d in derivatives]
|
| 1342 |
+
if reflect:
|
| 1343 |
+
yre = [(-1)**d * yre[d] for d in derivatives]
|
| 1344 |
+
yim = [(-1)**d * yim[d] for d in derivatives]
|
| 1345 |
+
xs = [(from_man_exp(xa, -wp, prec, 'n'), from_man_exp(xb, -wp, prec, 'n'))
|
| 1346 |
+
for (xa, xb) in zip(xre, xim)]
|
| 1347 |
+
ys = [(from_man_exp(ya, -wp, prec, 'n'), from_man_exp(yb, -wp, prec, 'n'))
|
| 1348 |
+
for (ya, yb) in zip(yre, yim)]
|
| 1349 |
+
return xs, ys
|
| 1350 |
+
|
| 1351 |
+
|
| 1352 |
+
#-----------------------------------------------------------------------#
|
| 1353 |
+
# #
|
| 1354 |
+
# The gamma function (NEW IMPLEMENTATION) #
|
| 1355 |
+
# #
|
| 1356 |
+
#-----------------------------------------------------------------------#
|
| 1357 |
+
|
| 1358 |
+
# Higher means faster, but more precomputation time
|
| 1359 |
+
MAX_GAMMA_TAYLOR_PREC = 5000
|
| 1360 |
+
# Need to derive higher bounds for Taylor series to go higher
|
| 1361 |
+
assert MAX_GAMMA_TAYLOR_PREC < 15000
|
| 1362 |
+
|
| 1363 |
+
# Use Stirling's series if abs(x) > beta*prec
|
| 1364 |
+
# Important: must be large enough for convergence!
|
| 1365 |
+
GAMMA_STIRLING_BETA = 0.2
|
| 1366 |
+
|
| 1367 |
+
SMALL_FACTORIAL_CACHE_SIZE = 150
|
| 1368 |
+
|
| 1369 |
+
gamma_taylor_cache = {}
|
| 1370 |
+
gamma_stirling_cache = {}
|
| 1371 |
+
|
| 1372 |
+
small_factorial_cache = [from_int(ifac(n)) for \
|
| 1373 |
+
n in range(SMALL_FACTORIAL_CACHE_SIZE+1)]
|
| 1374 |
+
|
| 1375 |
+
def zeta_array(N, prec):
|
| 1376 |
+
"""
|
| 1377 |
+
zeta(n) = A * pi**n / n! + B
|
| 1378 |
+
|
| 1379 |
+
where A is a rational number (A = Bernoulli number
|
| 1380 |
+
for n even) and B is an infinite sum over powers of exp(2*pi).
|
| 1381 |
+
(B = 0 for n even).
|
| 1382 |
+
|
| 1383 |
+
TODO: this is currently only used for gamma, but could
|
| 1384 |
+
be very useful elsewhere.
|
| 1385 |
+
"""
|
| 1386 |
+
extra = 30
|
| 1387 |
+
wp = prec+extra
|
| 1388 |
+
zeta_values = [MPZ_ZERO] * (N+2)
|
| 1389 |
+
pi = pi_fixed(wp)
|
| 1390 |
+
# STEP 1:
|
| 1391 |
+
one = MPZ_ONE << wp
|
| 1392 |
+
zeta_values[0] = -one//2
|
| 1393 |
+
f_2pi = mpf_shift(mpf_pi(wp),1)
|
| 1394 |
+
exp_2pi_k = exp_2pi = mpf_exp(f_2pi, wp)
|
| 1395 |
+
# Compute exponential series
|
| 1396 |
+
# Store values of 1/(exp(2*pi*k)-1),
|
| 1397 |
+
# exp(2*pi*k)/(exp(2*pi*k)-1)**2, 1/(exp(2*pi*k)-1)**2
|
| 1398 |
+
# pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
|
| 1399 |
+
exps3 = []
|
| 1400 |
+
k = 1
|
| 1401 |
+
while 1:
|
| 1402 |
+
tp = wp - 9*k
|
| 1403 |
+
if tp < 1:
|
| 1404 |
+
break
|
| 1405 |
+
# 1/(exp(2*pi*k-1)
|
| 1406 |
+
q1 = mpf_div(fone, mpf_sub(exp_2pi_k, fone, tp), tp)
|
| 1407 |
+
# pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2
|
| 1408 |
+
q2 = mpf_mul(exp_2pi_k, mpf_mul(q1,q1,tp), tp)
|
| 1409 |
+
q1 = to_fixed(q1, wp)
|
| 1410 |
+
q2 = to_fixed(q2, wp)
|
| 1411 |
+
q2 = (k * q2 * pi) >> wp
|
| 1412 |
+
exps3.append((q1, q2))
|
| 1413 |
+
# Multiply for next round
|
| 1414 |
+
exp_2pi_k = mpf_mul(exp_2pi_k, exp_2pi, wp)
|
| 1415 |
+
k += 1
|
| 1416 |
+
# Exponential sum
|
| 1417 |
+
for n in xrange(3, N+1, 2):
|
| 1418 |
+
s = MPZ_ZERO
|
| 1419 |
+
k = 1
|
| 1420 |
+
for e1, e2 in exps3:
|
| 1421 |
+
if n%4 == 3:
|
| 1422 |
+
t = e1 // k**n
|
| 1423 |
+
else:
|
| 1424 |
+
U = (n-1)//4
|
| 1425 |
+
t = (e1 + e2//U) // k**n
|
| 1426 |
+
if not t:
|
| 1427 |
+
break
|
| 1428 |
+
s += t
|
| 1429 |
+
k += 1
|
| 1430 |
+
zeta_values[n] = -2*s
|
| 1431 |
+
# Even zeta values
|
| 1432 |
+
B = [mpf_abs(mpf_bernoulli(k,wp)) for k in xrange(N+2)]
|
| 1433 |
+
pi_pow = fpi = mpf_pow_int(mpf_shift(mpf_pi(wp), 1), 2, wp)
|
| 1434 |
+
pi_pow = mpf_div(pi_pow, from_int(4), wp)
|
| 1435 |
+
for n in xrange(2,N+2,2):
|
| 1436 |
+
z = mpf_mul(B[n], pi_pow, wp)
|
| 1437 |
+
zeta_values[n] = to_fixed(z, wp)
|
| 1438 |
+
pi_pow = mpf_mul(pi_pow, fpi, wp)
|
| 1439 |
+
pi_pow = mpf_div(pi_pow, from_int((n+1)*(n+2)), wp)
|
| 1440 |
+
# Zeta sum
|
| 1441 |
+
reciprocal_pi = (one << wp) // pi
|
| 1442 |
+
for n in xrange(3, N+1, 4):
|
| 1443 |
+
U = (n-3)//4
|
| 1444 |
+
s = zeta_values[4*U+4]*(4*U+7)//4
|
| 1445 |
+
for k in xrange(1, U+1):
|
| 1446 |
+
s -= (zeta_values[4*k] * zeta_values[4*U+4-4*k]) >> wp
|
| 1447 |
+
zeta_values[n] += (2*s*reciprocal_pi) >> wp
|
| 1448 |
+
for n in xrange(5, N+1, 4):
|
| 1449 |
+
U = (n-1)//4
|
| 1450 |
+
s = zeta_values[4*U+2]*(2*U+1)
|
| 1451 |
+
for k in xrange(1, 2*U+1):
|
| 1452 |
+
s += ((-1)**k*2*k* zeta_values[2*k] * zeta_values[4*U+2-2*k])>>wp
|
| 1453 |
+
zeta_values[n] += ((s*reciprocal_pi)>>wp)//(2*U)
|
| 1454 |
+
return [x>>extra for x in zeta_values]
|
| 1455 |
+
|
| 1456 |
+
def gamma_taylor_coefficients(inprec):
|
| 1457 |
+
"""
|
| 1458 |
+
Gives the Taylor coefficients of 1/gamma(1+x) as
|
| 1459 |
+
a list of fixed-point numbers. Enough coefficients are returned
|
| 1460 |
+
to ensure that the series converges to the given precision
|
| 1461 |
+
when x is in [0.5, 1.5].
|
| 1462 |
+
"""
|
| 1463 |
+
# Reuse nearby cache values (small case)
|
| 1464 |
+
if inprec < 400:
|
| 1465 |
+
prec = inprec + (10-(inprec%10))
|
| 1466 |
+
elif inprec < 1000:
|
| 1467 |
+
prec = inprec + (30-(inprec%30))
|
| 1468 |
+
else:
|
| 1469 |
+
prec = inprec
|
| 1470 |
+
if prec in gamma_taylor_cache:
|
| 1471 |
+
return gamma_taylor_cache[prec], prec
|
| 1472 |
+
|
| 1473 |
+
# Experimentally determined bounds
|
| 1474 |
+
if prec < 1000:
|
| 1475 |
+
N = int(prec**0.76 + 2)
|
| 1476 |
+
else:
|
| 1477 |
+
# Valid to at least 15000 bits
|
| 1478 |
+
N = int(prec**0.787 + 2)
|
| 1479 |
+
|
| 1480 |
+
# Reuse higher precision values
|
| 1481 |
+
for cprec in gamma_taylor_cache:
|
| 1482 |
+
if cprec > prec:
|
| 1483 |
+
coeffs = [x>>(cprec-prec) for x in gamma_taylor_cache[cprec][-N:]]
|
| 1484 |
+
if inprec < 1000:
|
| 1485 |
+
gamma_taylor_cache[prec] = coeffs
|
| 1486 |
+
return coeffs, prec
|
| 1487 |
+
|
| 1488 |
+
# Cache at a higher precision (large case)
|
| 1489 |
+
if prec > 1000:
|
| 1490 |
+
prec = int(prec * 1.2)
|
| 1491 |
+
|
| 1492 |
+
wp = prec + 20
|
| 1493 |
+
A = [0] * N
|
| 1494 |
+
A[0] = MPZ_ZERO
|
| 1495 |
+
A[1] = MPZ_ONE << wp
|
| 1496 |
+
A[2] = euler_fixed(wp)
|
| 1497 |
+
# SLOW, reference implementation
|
| 1498 |
+
#zeta_values = [0,0]+[to_fixed(mpf_zeta_int(k,wp),wp) for k in xrange(2,N)]
|
| 1499 |
+
zeta_values = zeta_array(N, wp)
|
| 1500 |
+
for k in xrange(3, N):
|
| 1501 |
+
a = (-A[2]*A[k-1])>>wp
|
| 1502 |
+
for j in xrange(2,k):
|
| 1503 |
+
a += ((-1)**j * zeta_values[j] * A[k-j]) >> wp
|
| 1504 |
+
a //= (1-k)
|
| 1505 |
+
A[k] = a
|
| 1506 |
+
A = [a>>20 for a in A]
|
| 1507 |
+
A = A[::-1]
|
| 1508 |
+
A = A[:-1]
|
| 1509 |
+
gamma_taylor_cache[prec] = A
|
| 1510 |
+
#return A, prec
|
| 1511 |
+
return gamma_taylor_coefficients(inprec)
|
| 1512 |
+
|
| 1513 |
+
def gamma_fixed_taylor(xmpf, x, wp, prec, rnd, type):
|
| 1514 |
+
# Determine nearest multiple of N/2
|
| 1515 |
+
#n = int(x >> (wp-1))
|
| 1516 |
+
#steps = (n-1)>>1
|
| 1517 |
+
nearest_int = ((x >> (wp-1)) + MPZ_ONE) >> 1
|
| 1518 |
+
one = MPZ_ONE << wp
|
| 1519 |
+
coeffs, cwp = gamma_taylor_coefficients(wp)
|
| 1520 |
+
if nearest_int > 0:
|
| 1521 |
+
r = one
|
| 1522 |
+
for i in xrange(nearest_int-1):
|
| 1523 |
+
x -= one
|
| 1524 |
+
r = (r*x) >> wp
|
| 1525 |
+
x -= one
|
| 1526 |
+
p = MPZ_ZERO
|
| 1527 |
+
for c in coeffs:
|
| 1528 |
+
p = c + ((x*p)>>wp)
|
| 1529 |
+
p >>= (cwp-wp)
|
| 1530 |
+
if type == 0:
|
| 1531 |
+
return from_man_exp((r<<wp)//p, -wp, prec, rnd)
|
| 1532 |
+
if type == 2:
|
| 1533 |
+
return mpf_shift(from_rational(p, (r<<wp), prec, rnd), wp)
|
| 1534 |
+
if type == 3:
|
| 1535 |
+
return mpf_log(mpf_abs(from_man_exp((r<<wp)//p, -wp)), prec, rnd)
|
| 1536 |
+
else:
|
| 1537 |
+
r = one
|
| 1538 |
+
for i in xrange(-nearest_int):
|
| 1539 |
+
r = (r*x) >> wp
|
| 1540 |
+
x += one
|
| 1541 |
+
p = MPZ_ZERO
|
| 1542 |
+
for c in coeffs:
|
| 1543 |
+
p = c + ((x*p)>>wp)
|
| 1544 |
+
p >>= (cwp-wp)
|
| 1545 |
+
if wp - bitcount(abs(x)) > 10:
|
| 1546 |
+
# pass very close to 0, so do floating-point multiply
|
| 1547 |
+
g = mpf_add(xmpf, from_int(-nearest_int)) # exact
|
| 1548 |
+
r = from_man_exp(p*r,-wp-wp)
|
| 1549 |
+
r = mpf_mul(r, g, wp)
|
| 1550 |
+
if type == 0:
|
| 1551 |
+
return mpf_div(fone, r, prec, rnd)
|
| 1552 |
+
if type == 2:
|
| 1553 |
+
return mpf_pos(r, prec, rnd)
|
| 1554 |
+
if type == 3:
|
| 1555 |
+
return mpf_log(mpf_abs(mpf_div(fone, r, wp)), prec, rnd)
|
| 1556 |
+
else:
|
| 1557 |
+
r = from_man_exp(x*p*r,-3*wp)
|
| 1558 |
+
if type == 0: return mpf_div(fone, r, prec, rnd)
|
| 1559 |
+
if type == 2: return mpf_pos(r, prec, rnd)
|
| 1560 |
+
if type == 3: return mpf_neg(mpf_log(mpf_abs(r), prec, rnd))
|
| 1561 |
+
|
| 1562 |
+
def stirling_coefficient(n):
|
| 1563 |
+
if n in gamma_stirling_cache:
|
| 1564 |
+
return gamma_stirling_cache[n]
|
| 1565 |
+
p, q = bernfrac(n)
|
| 1566 |
+
q *= MPZ(n*(n-1))
|
| 1567 |
+
gamma_stirling_cache[n] = p, q, bitcount(abs(p)), bitcount(q)
|
| 1568 |
+
return gamma_stirling_cache[n]
|
| 1569 |
+
|
| 1570 |
+
def real_stirling_series(x, prec):
|
| 1571 |
+
"""
|
| 1572 |
+
Sums the rational part of Stirling's expansion,
|
| 1573 |
+
|
| 1574 |
+
log(sqrt(2*pi)) - z + 1/(12*z) - 1/(360*z^3) + ...
|
| 1575 |
+
|
| 1576 |
+
"""
|
| 1577 |
+
t = (MPZ_ONE<<(prec+prec)) // x # t = 1/x
|
| 1578 |
+
u = (t*t)>>prec # u = 1/x**2
|
| 1579 |
+
s = ln_sqrt2pi_fixed(prec) - x
|
| 1580 |
+
# Add initial terms of Stirling's series
|
| 1581 |
+
s += t//12; t = (t*u)>>prec
|
| 1582 |
+
s -= t//360; t = (t*u)>>prec
|
| 1583 |
+
s += t//1260; t = (t*u)>>prec
|
| 1584 |
+
s -= t//1680; t = (t*u)>>prec
|
| 1585 |
+
if not t: return s
|
| 1586 |
+
s += t//1188; t = (t*u)>>prec
|
| 1587 |
+
s -= 691*t//360360; t = (t*u)>>prec
|
| 1588 |
+
s += t//156; t = (t*u)>>prec
|
| 1589 |
+
if not t: return s
|
| 1590 |
+
s -= 3617*t//122400; t = (t*u)>>prec
|
| 1591 |
+
s += 43867*t//244188; t = (t*u)>>prec
|
| 1592 |
+
s -= 174611*t//125400; t = (t*u)>>prec
|
| 1593 |
+
if not t: return s
|
| 1594 |
+
k = 22
|
| 1595 |
+
# From here on, the coefficients are growing, so we
|
| 1596 |
+
# have to keep t at a roughly constant size
|
| 1597 |
+
usize = bitcount(abs(u))
|
| 1598 |
+
tsize = bitcount(abs(t))
|
| 1599 |
+
texp = 0
|
| 1600 |
+
while 1:
|
| 1601 |
+
p, q, pb, qb = stirling_coefficient(k)
|
| 1602 |
+
term_mag = tsize + pb + texp
|
| 1603 |
+
shift = -texp
|
| 1604 |
+
m = pb - term_mag
|
| 1605 |
+
if m > 0 and shift < m:
|
| 1606 |
+
p >>= m
|
| 1607 |
+
shift -= m
|
| 1608 |
+
m = tsize - term_mag
|
| 1609 |
+
if m > 0 and shift < m:
|
| 1610 |
+
w = t >> m
|
| 1611 |
+
shift -= m
|
| 1612 |
+
else:
|
| 1613 |
+
w = t
|
| 1614 |
+
term = (t*p//q) >> shift
|
| 1615 |
+
if not term:
|
| 1616 |
+
break
|
| 1617 |
+
s += term
|
| 1618 |
+
t = (t*u) >> usize
|
| 1619 |
+
texp -= (prec - usize)
|
| 1620 |
+
k += 2
|
| 1621 |
+
return s
|
| 1622 |
+
|
| 1623 |
+
def complex_stirling_series(x, y, prec):
|
| 1624 |
+
# t = 1/z
|
| 1625 |
+
_m = (x*x + y*y) >> prec
|
| 1626 |
+
tre = (x << prec) // _m
|
| 1627 |
+
tim = (-y << prec) // _m
|
| 1628 |
+
# u = 1/z**2
|
| 1629 |
+
ure = (tre*tre - tim*tim) >> prec
|
| 1630 |
+
uim = tim*tre >> (prec-1)
|
| 1631 |
+
# s = log(sqrt(2*pi)) - z
|
| 1632 |
+
sre = ln_sqrt2pi_fixed(prec) - x
|
| 1633 |
+
sim = -y
|
| 1634 |
+
|
| 1635 |
+
# Add initial terms of Stirling's series
|
| 1636 |
+
sre += tre//12; sim += tim//12;
|
| 1637 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1638 |
+
sre -= tre//360; sim -= tim//360;
|
| 1639 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1640 |
+
sre += tre//1260; sim += tim//1260;
|
| 1641 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1642 |
+
sre -= tre//1680; sim -= tim//1680;
|
| 1643 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1644 |
+
if abs(tre) + abs(tim) < 5: return sre, sim
|
| 1645 |
+
sre += tre//1188; sim += tim//1188;
|
| 1646 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1647 |
+
sre -= 691*tre//360360; sim -= 691*tim//360360;
|
| 1648 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1649 |
+
sre += tre//156; sim += tim//156;
|
| 1650 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1651 |
+
if abs(tre) + abs(tim) < 5: return sre, sim
|
| 1652 |
+
sre -= 3617*tre//122400; sim -= 3617*tim//122400;
|
| 1653 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1654 |
+
sre += 43867*tre//244188; sim += 43867*tim//244188;
|
| 1655 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1656 |
+
sre -= 174611*tre//125400; sim -= 174611*tim//125400;
|
| 1657 |
+
tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec)
|
| 1658 |
+
if abs(tre) + abs(tim) < 5: return sre, sim
|
| 1659 |
+
|
| 1660 |
+
k = 22
|
| 1661 |
+
# From here on, the coefficients are growing, so we
|
| 1662 |
+
# have to keep t at a roughly constant size
|
| 1663 |
+
usize = bitcount(max(abs(ure), abs(uim)))
|
| 1664 |
+
tsize = bitcount(max(abs(tre), abs(tim)))
|
| 1665 |
+
texp = 0
|
| 1666 |
+
while 1:
|
| 1667 |
+
p, q, pb, qb = stirling_coefficient(k)
|
| 1668 |
+
term_mag = tsize + pb + texp
|
| 1669 |
+
shift = -texp
|
| 1670 |
+
m = pb - term_mag
|
| 1671 |
+
if m > 0 and shift < m:
|
| 1672 |
+
p >>= m
|
| 1673 |
+
shift -= m
|
| 1674 |
+
m = tsize - term_mag
|
| 1675 |
+
if m > 0 and shift < m:
|
| 1676 |
+
wre = tre >> m
|
| 1677 |
+
wim = tim >> m
|
| 1678 |
+
shift -= m
|
| 1679 |
+
else:
|
| 1680 |
+
wre = tre
|
| 1681 |
+
wim = tim
|
| 1682 |
+
termre = (tre*p//q) >> shift
|
| 1683 |
+
termim = (tim*p//q) >> shift
|
| 1684 |
+
if abs(termre) + abs(termim) < 5:
|
| 1685 |
+
break
|
| 1686 |
+
sre += termre
|
| 1687 |
+
sim += termim
|
| 1688 |
+
tre, tim = ((tre*ure - tim*uim)>>usize), \
|
| 1689 |
+
((tre*uim + tim*ure)>>usize)
|
| 1690 |
+
texp -= (prec - usize)
|
| 1691 |
+
k += 2
|
| 1692 |
+
return sre, sim
|
| 1693 |
+
|
| 1694 |
+
|
| 1695 |
+
def mpf_gamma(x, prec, rnd='d', type=0):
|
| 1696 |
+
"""
|
| 1697 |
+
This function implements multipurpose evaluation of the gamma
|
| 1698 |
+
function, G(x), as well as the following versions of the same:
|
| 1699 |
+
|
| 1700 |
+
type = 0 -- G(x) [standard gamma function]
|
| 1701 |
+
type = 1 -- G(x+1) = x*G(x+1) = x! [factorial]
|
| 1702 |
+
type = 2 -- 1/G(x) [reciprocal gamma function]
|
| 1703 |
+
type = 3 -- log(|G(x)|) [log-gamma function, real part]
|
| 1704 |
+
"""
|
| 1705 |
+
|
| 1706 |
+
# Specal values
|
| 1707 |
+
sign, man, exp, bc = x
|
| 1708 |
+
if not man:
|
| 1709 |
+
if x == fzero:
|
| 1710 |
+
if type == 1: return fone
|
| 1711 |
+
if type == 2: return fzero
|
| 1712 |
+
raise ValueError("gamma function pole")
|
| 1713 |
+
if x == finf:
|
| 1714 |
+
if type == 2: return fzero
|
| 1715 |
+
return finf
|
| 1716 |
+
return fnan
|
| 1717 |
+
|
| 1718 |
+
# First of all, for log gamma, numbers can be well beyond the fixed-point
|
| 1719 |
+
# range, so we must take care of huge numbers before e.g. trying
|
| 1720 |
+
# to convert x to the nearest integer
|
| 1721 |
+
if type == 3:
|
| 1722 |
+
wp = prec+20
|
| 1723 |
+
if exp+bc > wp and not sign:
|
| 1724 |
+
return mpf_sub(mpf_mul(x, mpf_log(x, wp), wp), x, prec, rnd)
|
| 1725 |
+
|
| 1726 |
+
# We strongly want to special-case small integers
|
| 1727 |
+
is_integer = exp >= 0
|
| 1728 |
+
if is_integer:
|
| 1729 |
+
# Poles
|
| 1730 |
+
if sign:
|
| 1731 |
+
if type == 2:
|
| 1732 |
+
return fzero
|
| 1733 |
+
raise ValueError("gamma function pole")
|
| 1734 |
+
# n = x
|
| 1735 |
+
n = man << exp
|
| 1736 |
+
if n < SMALL_FACTORIAL_CACHE_SIZE:
|
| 1737 |
+
if type == 0:
|
| 1738 |
+
return mpf_pos(small_factorial_cache[n-1], prec, rnd)
|
| 1739 |
+
if type == 1:
|
| 1740 |
+
return mpf_pos(small_factorial_cache[n], prec, rnd)
|
| 1741 |
+
if type == 2:
|
| 1742 |
+
return mpf_div(fone, small_factorial_cache[n-1], prec, rnd)
|
| 1743 |
+
if type == 3:
|
| 1744 |
+
return mpf_log(small_factorial_cache[n-1], prec, rnd)
|
| 1745 |
+
else:
|
| 1746 |
+
# floor(abs(x))
|
| 1747 |
+
n = int(man >> (-exp))
|
| 1748 |
+
|
| 1749 |
+
# Estimate size and precision
|
| 1750 |
+
# Estimate log(gamma(|x|),2) as x*log(x,2)
|
| 1751 |
+
mag = exp + bc
|
| 1752 |
+
gamma_size = n*mag
|
| 1753 |
+
|
| 1754 |
+
if type == 3:
|
| 1755 |
+
wp = prec + 20
|
| 1756 |
+
else:
|
| 1757 |
+
wp = prec + bitcount(gamma_size) + 20
|
| 1758 |
+
|
| 1759 |
+
# Very close to 0, pole
|
| 1760 |
+
if mag < -wp:
|
| 1761 |
+
if type == 0:
|
| 1762 |
+
return mpf_sub(mpf_div(fone,x, wp),mpf_shift(fone,-wp),prec,rnd)
|
| 1763 |
+
if type == 1: return mpf_sub(fone, x, prec, rnd)
|
| 1764 |
+
if type == 2: return mpf_add(x, mpf_shift(fone,mag-wp), prec, rnd)
|
| 1765 |
+
if type == 3: return mpf_neg(mpf_log(mpf_abs(x), prec, rnd))
|
| 1766 |
+
|
| 1767 |
+
# From now on, we assume having a gamma function
|
| 1768 |
+
if type == 1:
|
| 1769 |
+
return mpf_gamma(mpf_add(x, fone), prec, rnd, 0)
|
| 1770 |
+
|
| 1771 |
+
# Special case integers (those not small enough to be caught above,
|
| 1772 |
+
# but still small enough for an exact factorial to be faster
|
| 1773 |
+
# than an approximate algorithm), and half-integers
|
| 1774 |
+
if exp >= -1:
|
| 1775 |
+
if is_integer:
|
| 1776 |
+
if gamma_size < 10*wp:
|
| 1777 |
+
if type == 0:
|
| 1778 |
+
return from_int(ifac(n-1), prec, rnd)
|
| 1779 |
+
if type == 2:
|
| 1780 |
+
return from_rational(MPZ_ONE, ifac(n-1), prec, rnd)
|
| 1781 |
+
if type == 3:
|
| 1782 |
+
return mpf_log(from_int(ifac(n-1)), prec, rnd)
|
| 1783 |
+
# half-integer
|
| 1784 |
+
if n < 100 or gamma_size < 10*wp:
|
| 1785 |
+
if sign:
|
| 1786 |
+
w = sqrtpi_fixed(wp)
|
| 1787 |
+
if n % 2: f = ifac2(2*n+1)
|
| 1788 |
+
else: f = -ifac2(2*n+1)
|
| 1789 |
+
if type == 0:
|
| 1790 |
+
return mpf_shift(from_rational(w, f, prec, rnd), -wp+n+1)
|
| 1791 |
+
if type == 2:
|
| 1792 |
+
return mpf_shift(from_rational(f, w, prec, rnd), wp-n-1)
|
| 1793 |
+
if type == 3:
|
| 1794 |
+
return mpf_log(mpf_shift(from_rational(w, abs(f),
|
| 1795 |
+
prec, rnd), -wp+n+1), prec, rnd)
|
| 1796 |
+
elif n == 0:
|
| 1797 |
+
if type == 0: return mpf_sqrtpi(prec, rnd)
|
| 1798 |
+
if type == 2: return mpf_div(fone, mpf_sqrtpi(wp), prec, rnd)
|
| 1799 |
+
if type == 3: return mpf_log(mpf_sqrtpi(wp), prec, rnd)
|
| 1800 |
+
else:
|
| 1801 |
+
w = sqrtpi_fixed(wp)
|
| 1802 |
+
w = from_man_exp(w * ifac2(2*n-1), -wp-n)
|
| 1803 |
+
if type == 0: return mpf_pos(w, prec, rnd)
|
| 1804 |
+
if type == 2: return mpf_div(fone, w, prec, rnd)
|
| 1805 |
+
if type == 3: return mpf_log(mpf_abs(w), prec, rnd)
|
| 1806 |
+
|
| 1807 |
+
# Convert to fixed point
|
| 1808 |
+
offset = exp + wp
|
| 1809 |
+
if offset >= 0: absxman = man << offset
|
| 1810 |
+
else: absxman = man >> (-offset)
|
| 1811 |
+
|
| 1812 |
+
# For log gamma, provide accurate evaluation for x = 1+eps and 2+eps
|
| 1813 |
+
if type == 3 and not sign:
|
| 1814 |
+
one = MPZ_ONE << wp
|
| 1815 |
+
one_dist = abs(absxman-one)
|
| 1816 |
+
two_dist = abs(absxman-2*one)
|
| 1817 |
+
cancellation = (wp - bitcount(min(one_dist, two_dist)))
|
| 1818 |
+
if cancellation > 10:
|
| 1819 |
+
xsub1 = mpf_sub(fone, x)
|
| 1820 |
+
xsub2 = mpf_sub(ftwo, x)
|
| 1821 |
+
xsub1mag = xsub1[2]+xsub1[3]
|
| 1822 |
+
xsub2mag = xsub2[2]+xsub2[3]
|
| 1823 |
+
if xsub1mag < -wp:
|
| 1824 |
+
return mpf_mul(mpf_euler(wp), mpf_sub(fone, x), prec, rnd)
|
| 1825 |
+
if xsub2mag < -wp:
|
| 1826 |
+
return mpf_mul(mpf_sub(fone, mpf_euler(wp)),
|
| 1827 |
+
mpf_sub(x, ftwo), prec, rnd)
|
| 1828 |
+
# Proceed but increase precision
|
| 1829 |
+
wp += max(-xsub1mag, -xsub2mag)
|
| 1830 |
+
offset = exp + wp
|
| 1831 |
+
if offset >= 0: absxman = man << offset
|
| 1832 |
+
else: absxman = man >> (-offset)
|
| 1833 |
+
|
| 1834 |
+
# Use Taylor series if appropriate
|
| 1835 |
+
n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
|
| 1836 |
+
if n < max(100, n_for_stirling) and wp < MAX_GAMMA_TAYLOR_PREC:
|
| 1837 |
+
if sign:
|
| 1838 |
+
absxman = -absxman
|
| 1839 |
+
return gamma_fixed_taylor(x, absxman, wp, prec, rnd, type)
|
| 1840 |
+
|
| 1841 |
+
# Use Stirling's series
|
| 1842 |
+
# First ensure that |x| is large enough for rapid convergence
|
| 1843 |
+
xorig = x
|
| 1844 |
+
|
| 1845 |
+
# Argument reduction
|
| 1846 |
+
r = 0
|
| 1847 |
+
if n < n_for_stirling:
|
| 1848 |
+
r = one = MPZ_ONE << wp
|
| 1849 |
+
d = n_for_stirling - n
|
| 1850 |
+
for k in xrange(d):
|
| 1851 |
+
r = (r * absxman) >> wp
|
| 1852 |
+
absxman += one
|
| 1853 |
+
x = xabs = from_man_exp(absxman, -wp)
|
| 1854 |
+
if sign:
|
| 1855 |
+
x = mpf_neg(x)
|
| 1856 |
+
else:
|
| 1857 |
+
xabs = mpf_abs(x)
|
| 1858 |
+
|
| 1859 |
+
# Asymptotic series
|
| 1860 |
+
y = real_stirling_series(absxman, wp)
|
| 1861 |
+
u = to_fixed(mpf_log(xabs, wp), wp)
|
| 1862 |
+
u = ((absxman - (MPZ_ONE<<(wp-1))) * u) >> wp
|
| 1863 |
+
y += u
|
| 1864 |
+
w = from_man_exp(y, -wp)
|
| 1865 |
+
|
| 1866 |
+
# Compute final value
|
| 1867 |
+
if sign:
|
| 1868 |
+
# Reflection formula
|
| 1869 |
+
A = mpf_mul(mpf_sin_pi(xorig, wp), xorig, wp)
|
| 1870 |
+
B = mpf_neg(mpf_pi(wp))
|
| 1871 |
+
if type == 0 or type == 2:
|
| 1872 |
+
A = mpf_mul(A, mpf_exp(w, wp))
|
| 1873 |
+
if r:
|
| 1874 |
+
B = mpf_mul(B, from_man_exp(r, -wp), wp)
|
| 1875 |
+
if type == 0:
|
| 1876 |
+
return mpf_div(B, A, prec, rnd)
|
| 1877 |
+
if type == 2:
|
| 1878 |
+
return mpf_div(A, B, prec, rnd)
|
| 1879 |
+
if type == 3:
|
| 1880 |
+
if r:
|
| 1881 |
+
B = mpf_mul(B, from_man_exp(r, -wp), wp)
|
| 1882 |
+
A = mpf_add(mpf_log(mpf_abs(A), wp), w, wp)
|
| 1883 |
+
return mpf_sub(mpf_log(mpf_abs(B), wp), A, prec, rnd)
|
| 1884 |
+
else:
|
| 1885 |
+
if type == 0:
|
| 1886 |
+
if r:
|
| 1887 |
+
return mpf_div(mpf_exp(w, wp),
|
| 1888 |
+
from_man_exp(r, -wp), prec, rnd)
|
| 1889 |
+
return mpf_exp(w, prec, rnd)
|
| 1890 |
+
if type == 2:
|
| 1891 |
+
if r:
|
| 1892 |
+
return mpf_div(from_man_exp(r, -wp),
|
| 1893 |
+
mpf_exp(w, wp), prec, rnd)
|
| 1894 |
+
return mpf_exp(mpf_neg(w), prec, rnd)
|
| 1895 |
+
if type == 3:
|
| 1896 |
+
if r:
|
| 1897 |
+
return mpf_sub(w, mpf_log(from_man_exp(r,-wp), wp), prec, rnd)
|
| 1898 |
+
return mpf_pos(w, prec, rnd)
|
| 1899 |
+
|
| 1900 |
+
|
| 1901 |
+
def mpc_gamma(z, prec, rnd='d', type=0):
|
| 1902 |
+
a, b = z
|
| 1903 |
+
asign, aman, aexp, abc = a
|
| 1904 |
+
bsign, bman, bexp, bbc = b
|
| 1905 |
+
|
| 1906 |
+
if b == fzero:
|
| 1907 |
+
# Imaginary part on negative half-axis for log-gamma function
|
| 1908 |
+
if type == 3 and asign:
|
| 1909 |
+
re = mpf_gamma(a, prec, rnd, 3)
|
| 1910 |
+
n = (-aman) >> (-aexp)
|
| 1911 |
+
im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
|
| 1912 |
+
return re, im
|
| 1913 |
+
return mpf_gamma(a, prec, rnd, type), fzero
|
| 1914 |
+
|
| 1915 |
+
# Some kind of complex inf/nan
|
| 1916 |
+
if (not aman and aexp) or (not bman and bexp):
|
| 1917 |
+
return (fnan, fnan)
|
| 1918 |
+
|
| 1919 |
+
# Initial working precision
|
| 1920 |
+
wp = prec + 20
|
| 1921 |
+
|
| 1922 |
+
amag = aexp+abc
|
| 1923 |
+
bmag = bexp+bbc
|
| 1924 |
+
if aman:
|
| 1925 |
+
mag = max(amag, bmag)
|
| 1926 |
+
else:
|
| 1927 |
+
mag = bmag
|
| 1928 |
+
|
| 1929 |
+
# Close to 0
|
| 1930 |
+
if mag < -8:
|
| 1931 |
+
if mag < -wp:
|
| 1932 |
+
# 1/gamma(z) = z + euler*z^2 + O(z^3)
|
| 1933 |
+
v = mpc_add(z, mpc_mul_mpf(mpc_mul(z,z,wp),mpf_euler(wp),wp), wp)
|
| 1934 |
+
if type == 0: return mpc_reciprocal(v, prec, rnd)
|
| 1935 |
+
if type == 1: return mpc_div(z, v, prec, rnd)
|
| 1936 |
+
if type == 2: return mpc_pos(v, prec, rnd)
|
| 1937 |
+
if type == 3: return mpc_log(mpc_reciprocal(v, prec), prec, rnd)
|
| 1938 |
+
elif type != 1:
|
| 1939 |
+
wp += (-mag)
|
| 1940 |
+
|
| 1941 |
+
# Handle huge log-gamma values; must do this before converting to
|
| 1942 |
+
# a fixed-point value. TODO: determine a precise cutoff of validity
|
| 1943 |
+
# depending on amag and bmag
|
| 1944 |
+
if type == 3 and mag > wp and ((not asign) or (bmag >= amag)):
|
| 1945 |
+
return mpc_sub(mpc_mul(z, mpc_log(z, wp), wp), z, prec, rnd)
|
| 1946 |
+
|
| 1947 |
+
# From now on, we assume having a gamma function
|
| 1948 |
+
if type == 1:
|
| 1949 |
+
return mpc_gamma((mpf_add(a, fone), b), prec, rnd, 0)
|
| 1950 |
+
|
| 1951 |
+
an = abs(to_int(a))
|
| 1952 |
+
bn = abs(to_int(b))
|
| 1953 |
+
absn = max(an, bn)
|
| 1954 |
+
gamma_size = absn*mag
|
| 1955 |
+
if type == 3:
|
| 1956 |
+
pass
|
| 1957 |
+
else:
|
| 1958 |
+
wp += bitcount(gamma_size)
|
| 1959 |
+
|
| 1960 |
+
# Reflect to the right half-plane. Note that Stirling's expansion
|
| 1961 |
+
# is valid in the left half-plane too, as long as we're not too close
|
| 1962 |
+
# to the real axis, but in order to use this argument reduction
|
| 1963 |
+
# in the negative direction must be implemented.
|
| 1964 |
+
#need_reflection = asign and ((bmag < 0) or (amag-bmag > 4))
|
| 1965 |
+
need_reflection = asign
|
| 1966 |
+
zorig = z
|
| 1967 |
+
if need_reflection:
|
| 1968 |
+
z = mpc_neg(z)
|
| 1969 |
+
asign, aman, aexp, abc = a = z[0]
|
| 1970 |
+
bsign, bman, bexp, bbc = b = z[1]
|
| 1971 |
+
|
| 1972 |
+
# Imaginary part very small compared to real one?
|
| 1973 |
+
yfinal = 0
|
| 1974 |
+
balance_prec = 0
|
| 1975 |
+
if bmag < -10:
|
| 1976 |
+
# Check z ~= 1 and z ~= 2 for loggamma
|
| 1977 |
+
if type == 3:
|
| 1978 |
+
zsub1 = mpc_sub_mpf(z, fone)
|
| 1979 |
+
if zsub1[0] == fzero:
|
| 1980 |
+
cancel1 = -bmag
|
| 1981 |
+
else:
|
| 1982 |
+
cancel1 = -max(zsub1[0][2]+zsub1[0][3], bmag)
|
| 1983 |
+
if cancel1 > wp:
|
| 1984 |
+
pi = mpf_pi(wp)
|
| 1985 |
+
x = mpc_mul_mpf(zsub1, pi, wp)
|
| 1986 |
+
x = mpc_mul(x, x, wp)
|
| 1987 |
+
x = mpc_div_mpf(x, from_int(12), wp)
|
| 1988 |
+
y = mpc_mul_mpf(zsub1, mpf_neg(mpf_euler(wp)), wp)
|
| 1989 |
+
yfinal = mpc_add(x, y, wp)
|
| 1990 |
+
if not need_reflection:
|
| 1991 |
+
return mpc_pos(yfinal, prec, rnd)
|
| 1992 |
+
elif cancel1 > 0:
|
| 1993 |
+
wp += cancel1
|
| 1994 |
+
zsub2 = mpc_sub_mpf(z, ftwo)
|
| 1995 |
+
if zsub2[0] == fzero:
|
| 1996 |
+
cancel2 = -bmag
|
| 1997 |
+
else:
|
| 1998 |
+
cancel2 = -max(zsub2[0][2]+zsub2[0][3], bmag)
|
| 1999 |
+
if cancel2 > wp:
|
| 2000 |
+
pi = mpf_pi(wp)
|
| 2001 |
+
t = mpf_sub(mpf_mul(pi, pi), from_int(6))
|
| 2002 |
+
x = mpc_mul_mpf(mpc_mul(zsub2, zsub2, wp), t, wp)
|
| 2003 |
+
x = mpc_div_mpf(x, from_int(12), wp)
|
| 2004 |
+
y = mpc_mul_mpf(zsub2, mpf_sub(fone, mpf_euler(wp)), wp)
|
| 2005 |
+
yfinal = mpc_add(x, y, wp)
|
| 2006 |
+
if not need_reflection:
|
| 2007 |
+
return mpc_pos(yfinal, prec, rnd)
|
| 2008 |
+
elif cancel2 > 0:
|
| 2009 |
+
wp += cancel2
|
| 2010 |
+
if bmag < -wp:
|
| 2011 |
+
# Compute directly from the real gamma function.
|
| 2012 |
+
pp = 2*(wp+10)
|
| 2013 |
+
aabs = mpf_abs(a)
|
| 2014 |
+
eps = mpf_shift(fone, amag-wp)
|
| 2015 |
+
x1 = mpf_gamma(aabs, pp, type=type)
|
| 2016 |
+
x2 = mpf_gamma(mpf_add(aabs, eps), pp, type=type)
|
| 2017 |
+
xprime = mpf_div(mpf_sub(x2, x1, pp), eps, pp)
|
| 2018 |
+
y = mpf_mul(b, xprime, prec, rnd)
|
| 2019 |
+
yfinal = (x1, y)
|
| 2020 |
+
# Note: we still need to use the reflection formula for
|
| 2021 |
+
# near-poles, and the correct branch of the log-gamma function
|
| 2022 |
+
if not need_reflection:
|
| 2023 |
+
return mpc_pos(yfinal, prec, rnd)
|
| 2024 |
+
else:
|
| 2025 |
+
balance_prec += (-bmag)
|
| 2026 |
+
|
| 2027 |
+
wp += balance_prec
|
| 2028 |
+
n_for_stirling = int(GAMMA_STIRLING_BETA*wp)
|
| 2029 |
+
need_reduction = absn < n_for_stirling
|
| 2030 |
+
|
| 2031 |
+
afix = to_fixed(a, wp)
|
| 2032 |
+
bfix = to_fixed(b, wp)
|
| 2033 |
+
|
| 2034 |
+
r = 0
|
| 2035 |
+
if not yfinal:
|
| 2036 |
+
zprered = z
|
| 2037 |
+
# Argument reduction
|
| 2038 |
+
if absn < n_for_stirling:
|
| 2039 |
+
absn = complex(an, bn)
|
| 2040 |
+
d = int((1 + n_for_stirling**2 - bn**2)**0.5 - an)
|
| 2041 |
+
rre = one = MPZ_ONE << wp
|
| 2042 |
+
rim = MPZ_ZERO
|
| 2043 |
+
for k in xrange(d):
|
| 2044 |
+
rre, rim = ((afix*rre-bfix*rim)>>wp), ((afix*rim + bfix*rre)>>wp)
|
| 2045 |
+
afix += one
|
| 2046 |
+
r = from_man_exp(rre, -wp), from_man_exp(rim, -wp)
|
| 2047 |
+
a = from_man_exp(afix, -wp)
|
| 2048 |
+
z = a, b
|
| 2049 |
+
|
| 2050 |
+
yre, yim = complex_stirling_series(afix, bfix, wp)
|
| 2051 |
+
# (z-1/2)*log(z) + S
|
| 2052 |
+
lre, lim = mpc_log(z, wp)
|
| 2053 |
+
lre = to_fixed(lre, wp)
|
| 2054 |
+
lim = to_fixed(lim, wp)
|
| 2055 |
+
yre = ((lre*afix - lim*bfix)>>wp) - (lre>>1) + yre
|
| 2056 |
+
yim = ((lre*bfix + lim*afix)>>wp) - (lim>>1) + yim
|
| 2057 |
+
y = from_man_exp(yre, -wp), from_man_exp(yim, -wp)
|
| 2058 |
+
|
| 2059 |
+
if r and type == 3:
|
| 2060 |
+
# If re(z) > 0 and abs(z) <= 4, the branches of loggamma(z)
|
| 2061 |
+
# and log(gamma(z)) coincide. Otherwise, use the zeroth order
|
| 2062 |
+
# Stirling expansion to compute the correct imaginary part.
|
| 2063 |
+
y = mpc_sub(y, mpc_log(r, wp), wp)
|
| 2064 |
+
zfa = to_float(zprered[0])
|
| 2065 |
+
zfb = to_float(zprered[1])
|
| 2066 |
+
zfabs = math.hypot(zfa,zfb)
|
| 2067 |
+
#if not (zfa > 0.0 and zfabs <= 4):
|
| 2068 |
+
yfb = to_float(y[1])
|
| 2069 |
+
u = math.atan2(zfb, zfa)
|
| 2070 |
+
if zfabs <= 0.5:
|
| 2071 |
+
gi = 0.577216*zfb - u
|
| 2072 |
+
else:
|
| 2073 |
+
gi = -zfb - 0.5*u + zfa*u + zfb*math.log(zfabs)
|
| 2074 |
+
n = int(math.floor((gi-yfb)/(2*math.pi)+0.5))
|
| 2075 |
+
y = (y[0], mpf_add(y[1], mpf_mul_int(mpf_pi(wp), 2*n, wp), wp))
|
| 2076 |
+
|
| 2077 |
+
if need_reflection:
|
| 2078 |
+
if type == 0 or type == 2:
|
| 2079 |
+
A = mpc_mul(mpc_sin_pi(zorig, wp), zorig, wp)
|
| 2080 |
+
B = (mpf_neg(mpf_pi(wp)), fzero)
|
| 2081 |
+
if yfinal:
|
| 2082 |
+
if type == 2:
|
| 2083 |
+
A = mpc_div(A, yfinal, wp)
|
| 2084 |
+
else:
|
| 2085 |
+
A = mpc_mul(A, yfinal, wp)
|
| 2086 |
+
else:
|
| 2087 |
+
A = mpc_mul(A, mpc_exp(y, wp), wp)
|
| 2088 |
+
if r:
|
| 2089 |
+
B = mpc_mul(B, r, wp)
|
| 2090 |
+
if type == 0: return mpc_div(B, A, prec, rnd)
|
| 2091 |
+
if type == 2: return mpc_div(A, B, prec, rnd)
|
| 2092 |
+
|
| 2093 |
+
# Reflection formula for the log-gamma function with correct branch
|
| 2094 |
+
# http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0006/
|
| 2095 |
+
# LogGamma[z] == -LogGamma[-z] - Log[-z] +
|
| 2096 |
+
# Sign[Im[z]] Floor[Re[z]] Pi I + Log[Pi] -
|
| 2097 |
+
# Log[Sin[Pi (z - Floor[Re[z]])]] -
|
| 2098 |
+
# Pi I (1 - Abs[Sign[Im[z]]]) Abs[Floor[Re[z]]]
|
| 2099 |
+
if type == 3:
|
| 2100 |
+
if yfinal:
|
| 2101 |
+
s1 = mpc_neg(yfinal)
|
| 2102 |
+
else:
|
| 2103 |
+
s1 = mpc_neg(y)
|
| 2104 |
+
# s -= log(-z)
|
| 2105 |
+
s1 = mpc_sub(s1, mpc_log(mpc_neg(zorig), wp), wp)
|
| 2106 |
+
# floor(re(z))
|
| 2107 |
+
rezfloor = mpf_floor(zorig[0])
|
| 2108 |
+
imzsign = mpf_sign(zorig[1])
|
| 2109 |
+
pi = mpf_pi(wp)
|
| 2110 |
+
t = mpf_mul(pi, rezfloor)
|
| 2111 |
+
t = mpf_mul_int(t, imzsign, wp)
|
| 2112 |
+
s1 = (s1[0], mpf_add(s1[1], t, wp))
|
| 2113 |
+
s1 = mpc_add_mpf(s1, mpf_log(pi, wp), wp)
|
| 2114 |
+
t = mpc_sin_pi(mpc_sub_mpf(zorig, rezfloor), wp)
|
| 2115 |
+
t = mpc_log(t, wp)
|
| 2116 |
+
s1 = mpc_sub(s1, t, wp)
|
| 2117 |
+
# Note: may actually be unused, because we fall back
|
| 2118 |
+
# to the mpf_ function for real arguments
|
| 2119 |
+
if not imzsign:
|
| 2120 |
+
t = mpf_mul(pi, mpf_floor(rezfloor), wp)
|
| 2121 |
+
s1 = (s1[0], mpf_sub(s1[1], t, wp))
|
| 2122 |
+
return mpc_pos(s1, prec, rnd)
|
| 2123 |
+
else:
|
| 2124 |
+
if type == 0:
|
| 2125 |
+
if r:
|
| 2126 |
+
return mpc_div(mpc_exp(y, wp), r, prec, rnd)
|
| 2127 |
+
return mpc_exp(y, prec, rnd)
|
| 2128 |
+
if type == 2:
|
| 2129 |
+
if r:
|
| 2130 |
+
return mpc_div(r, mpc_exp(y, wp), prec, rnd)
|
| 2131 |
+
return mpc_exp(mpc_neg(y), prec, rnd)
|
| 2132 |
+
if type == 3:
|
| 2133 |
+
return mpc_pos(y, prec, rnd)
|
| 2134 |
+
|
| 2135 |
+
def mpf_factorial(x, prec, rnd='d'):
|
| 2136 |
+
return mpf_gamma(x, prec, rnd, 1)
|
| 2137 |
+
|
| 2138 |
+
def mpc_factorial(x, prec, rnd='d'):
|
| 2139 |
+
return mpc_gamma(x, prec, rnd, 1)
|
| 2140 |
+
|
| 2141 |
+
def mpf_rgamma(x, prec, rnd='d'):
|
| 2142 |
+
return mpf_gamma(x, prec, rnd, 2)
|
| 2143 |
+
|
| 2144 |
+
def mpc_rgamma(x, prec, rnd='d'):
|
| 2145 |
+
return mpc_gamma(x, prec, rnd, 2)
|
| 2146 |
+
|
| 2147 |
+
def mpf_loggamma(x, prec, rnd='d'):
|
| 2148 |
+
sign, man, exp, bc = x
|
| 2149 |
+
if sign:
|
| 2150 |
+
raise ComplexResult
|
| 2151 |
+
return mpf_gamma(x, prec, rnd, 3)
|
| 2152 |
+
|
| 2153 |
+
def mpc_loggamma(z, prec, rnd='d'):
|
| 2154 |
+
a, b = z
|
| 2155 |
+
asign, aman, aexp, abc = a
|
| 2156 |
+
bsign, bman, bexp, bbc = b
|
| 2157 |
+
if b == fzero and asign:
|
| 2158 |
+
re = mpf_gamma(a, prec, rnd, 3)
|
| 2159 |
+
n = (-aman) >> (-aexp)
|
| 2160 |
+
im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd)
|
| 2161 |
+
return re, im
|
| 2162 |
+
return mpc_gamma(z, prec, rnd, 3)
|
| 2163 |
+
|
| 2164 |
+
def mpf_gamma_int(n, prec, rnd=round_fast):
|
| 2165 |
+
if n < SMALL_FACTORIAL_CACHE_SIZE:
|
| 2166 |
+
return mpf_pos(small_factorial_cache[n-1], prec, rnd)
|
| 2167 |
+
return mpf_gamma(from_int(n), prec, rnd)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpf.py
ADDED
|
@@ -0,0 +1,1414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Low-level functions for arbitrary-precision floating-point arithmetic.
|
| 3 |
+
"""
|
| 4 |
+
|
| 5 |
+
__docformat__ = 'plaintext'
|
| 6 |
+
|
| 7 |
+
import math
|
| 8 |
+
|
| 9 |
+
from bisect import bisect
|
| 10 |
+
|
| 11 |
+
import sys
|
| 12 |
+
|
| 13 |
+
# Importing random is slow
|
| 14 |
+
#from random import getrandbits
|
| 15 |
+
getrandbits = None
|
| 16 |
+
|
| 17 |
+
from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE,
|
| 18 |
+
BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils)
|
| 19 |
+
|
| 20 |
+
from .libintmath import (giant_steps,
|
| 21 |
+
trailtable, bctable, lshift, rshift, bitcount, trailing,
|
| 22 |
+
sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem,
|
| 23 |
+
bin_to_radix)
|
| 24 |
+
|
| 25 |
+
# We don't pickle tuples directly for the following reasons:
|
| 26 |
+
# 1: pickle uses str() for ints, which is inefficient when they are large
|
| 27 |
+
# 2: pickle doesn't work for gmpy mpzs
|
| 28 |
+
# Both problems are solved by using hex()
|
| 29 |
+
|
| 30 |
+
if BACKEND == 'sage':
|
| 31 |
+
def to_pickable(x):
|
| 32 |
+
sign, man, exp, bc = x
|
| 33 |
+
return sign, hex(man), exp, bc
|
| 34 |
+
else:
|
| 35 |
+
def to_pickable(x):
|
| 36 |
+
sign, man, exp, bc = x
|
| 37 |
+
return sign, hex(man)[2:], exp, bc
|
| 38 |
+
|
| 39 |
+
def from_pickable(x):
|
| 40 |
+
sign, man, exp, bc = x
|
| 41 |
+
return (sign, MPZ(man, 16), exp, bc)
|
| 42 |
+
|
| 43 |
+
class ComplexResult(ValueError):
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
try:
|
| 47 |
+
intern
|
| 48 |
+
except NameError:
|
| 49 |
+
intern = lambda x: x
|
| 50 |
+
|
| 51 |
+
# All supported rounding modes
|
| 52 |
+
round_nearest = intern('n')
|
| 53 |
+
round_floor = intern('f')
|
| 54 |
+
round_ceiling = intern('c')
|
| 55 |
+
round_up = intern('u')
|
| 56 |
+
round_down = intern('d')
|
| 57 |
+
round_fast = round_down
|
| 58 |
+
|
| 59 |
+
def prec_to_dps(n):
|
| 60 |
+
"""Return number of accurate decimals that can be represented
|
| 61 |
+
with a precision of n bits."""
|
| 62 |
+
return max(1, int(round(int(n)/3.3219280948873626)-1))
|
| 63 |
+
|
| 64 |
+
def dps_to_prec(n):
|
| 65 |
+
"""Return the number of bits required to represent n decimals
|
| 66 |
+
accurately."""
|
| 67 |
+
return max(1, int(round((int(n)+1)*3.3219280948873626)))
|
| 68 |
+
|
| 69 |
+
def repr_dps(n):
|
| 70 |
+
"""Return the number of decimal digits required to represent
|
| 71 |
+
a number with n-bit precision so that it can be uniquely
|
| 72 |
+
reconstructed from the representation."""
|
| 73 |
+
dps = prec_to_dps(n)
|
| 74 |
+
if dps == 15:
|
| 75 |
+
return 17
|
| 76 |
+
return dps + 3
|
| 77 |
+
|
| 78 |
+
#----------------------------------------------------------------------------#
|
| 79 |
+
# Some commonly needed float values #
|
| 80 |
+
#----------------------------------------------------------------------------#
|
| 81 |
+
|
| 82 |
+
# Regular number format:
|
| 83 |
+
# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa
|
| 84 |
+
fzero = (0, MPZ_ZERO, 0, 0)
|
| 85 |
+
fnzero = (1, MPZ_ZERO, 0, 0)
|
| 86 |
+
fone = (0, MPZ_ONE, 0, 1)
|
| 87 |
+
fnone = (1, MPZ_ONE, 0, 1)
|
| 88 |
+
ftwo = (0, MPZ_ONE, 1, 1)
|
| 89 |
+
ften = (0, MPZ_FIVE, 1, 3)
|
| 90 |
+
fhalf = (0, MPZ_ONE, -1, 1)
|
| 91 |
+
|
| 92 |
+
# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent
|
| 93 |
+
fnan = (0, MPZ_ZERO, -123, -1)
|
| 94 |
+
finf = (0, MPZ_ZERO, -456, -2)
|
| 95 |
+
fninf = (1, MPZ_ZERO, -789, -3)
|
| 96 |
+
|
| 97 |
+
# Was 1e1000; this is broken in Python 2.4
|
| 98 |
+
math_float_inf = 1e300 * 1e300
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
#----------------------------------------------------------------------------#
|
| 102 |
+
# Rounding #
|
| 103 |
+
#----------------------------------------------------------------------------#
|
| 104 |
+
|
| 105 |
+
# This function can be used to round a mantissa generally. However,
|
| 106 |
+
# we will try to do most rounding inline for efficiency.
|
| 107 |
+
def round_int(x, n, rnd):
|
| 108 |
+
if rnd == round_nearest:
|
| 109 |
+
if x >= 0:
|
| 110 |
+
t = x >> (n-1)
|
| 111 |
+
if t & 1 and ((t & 2) or (x & h_mask[n<300][n])):
|
| 112 |
+
return (t>>1)+1
|
| 113 |
+
else:
|
| 114 |
+
return t>>1
|
| 115 |
+
else:
|
| 116 |
+
return -round_int(-x, n, rnd)
|
| 117 |
+
if rnd == round_floor:
|
| 118 |
+
return x >> n
|
| 119 |
+
if rnd == round_ceiling:
|
| 120 |
+
return -((-x) >> n)
|
| 121 |
+
if rnd == round_down:
|
| 122 |
+
if x >= 0:
|
| 123 |
+
return x >> n
|
| 124 |
+
return -((-x) >> n)
|
| 125 |
+
if rnd == round_up:
|
| 126 |
+
if x >= 0:
|
| 127 |
+
return -((-x) >> n)
|
| 128 |
+
return x >> n
|
| 129 |
+
|
| 130 |
+
# These masks are used to pick out segments of numbers to determine
|
| 131 |
+
# which direction to round when rounding to nearest.
|
| 132 |
+
class h_mask_big:
|
| 133 |
+
def __getitem__(self, n):
|
| 134 |
+
return (MPZ_ONE<<(n-1))-1
|
| 135 |
+
|
| 136 |
+
h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)]
|
| 137 |
+
h_mask = [h_mask_big(), h_mask_small]
|
| 138 |
+
|
| 139 |
+
# The >> operator rounds to floor. shifts_down[rnd][sign]
|
| 140 |
+
# tells whether this is the right direction to use, or if the
|
| 141 |
+
# number should be negated before shifting
|
| 142 |
+
shifts_down = {round_floor:(1,0), round_ceiling:(0,1),
|
| 143 |
+
round_down:(1,1), round_up:(0,0)}
|
| 144 |
+
|
| 145 |
+
|
| 146 |
+
#----------------------------------------------------------------------------#
|
| 147 |
+
# Normalization of raw mpfs #
|
| 148 |
+
#----------------------------------------------------------------------------#
|
| 149 |
+
|
| 150 |
+
# This function is called almost every time an mpf is created.
|
| 151 |
+
# It has been optimized accordingly.
|
| 152 |
+
|
| 153 |
+
def _normalize(sign, man, exp, bc, prec, rnd):
|
| 154 |
+
"""
|
| 155 |
+
Create a raw mpf tuple with value (-1)**sign * man * 2**exp and
|
| 156 |
+
normalized mantissa. The mantissa is rounded in the specified
|
| 157 |
+
direction if its size exceeds the precision. Trailing zero bits
|
| 158 |
+
are also stripped from the mantissa to ensure that the
|
| 159 |
+
representation is canonical.
|
| 160 |
+
|
| 161 |
+
Conditions on the input:
|
| 162 |
+
* The input must represent a regular (finite) number
|
| 163 |
+
* The sign bit must be 0 or 1
|
| 164 |
+
* The mantissa must be positive
|
| 165 |
+
* The exponent must be an integer
|
| 166 |
+
* The bitcount must be exact
|
| 167 |
+
|
| 168 |
+
If these conditions are not met, use from_man_exp, mpf_pos, or any
|
| 169 |
+
of the conversion functions to create normalized raw mpf tuples.
|
| 170 |
+
"""
|
| 171 |
+
if not man:
|
| 172 |
+
return fzero
|
| 173 |
+
# Cut mantissa down to size if larger than target precision
|
| 174 |
+
n = bc - prec
|
| 175 |
+
if n > 0:
|
| 176 |
+
if rnd == round_nearest:
|
| 177 |
+
t = man >> (n-1)
|
| 178 |
+
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
|
| 179 |
+
man = (t>>1)+1
|
| 180 |
+
else:
|
| 181 |
+
man = t>>1
|
| 182 |
+
elif shifts_down[rnd][sign]:
|
| 183 |
+
man >>= n
|
| 184 |
+
else:
|
| 185 |
+
man = -((-man)>>n)
|
| 186 |
+
exp += n
|
| 187 |
+
bc = prec
|
| 188 |
+
# Strip trailing bits
|
| 189 |
+
if not man & 1:
|
| 190 |
+
t = trailtable[int(man & 255)]
|
| 191 |
+
if not t:
|
| 192 |
+
while not man & 255:
|
| 193 |
+
man >>= 8
|
| 194 |
+
exp += 8
|
| 195 |
+
bc -= 8
|
| 196 |
+
t = trailtable[int(man & 255)]
|
| 197 |
+
man >>= t
|
| 198 |
+
exp += t
|
| 199 |
+
bc -= t
|
| 200 |
+
# Bit count can be wrong if the input mantissa was 1 less than
|
| 201 |
+
# a power of 2 and got rounded up, thereby adding an extra bit.
|
| 202 |
+
# With trailing bits removed, all powers of two have mantissa 1,
|
| 203 |
+
# so this is easy to check for.
|
| 204 |
+
if man == 1:
|
| 205 |
+
bc = 1
|
| 206 |
+
return sign, man, exp, bc
|
| 207 |
+
|
| 208 |
+
def _normalize1(sign, man, exp, bc, prec, rnd):
|
| 209 |
+
"""same as normalize, but with the added condition that
|
| 210 |
+
man is odd or zero
|
| 211 |
+
"""
|
| 212 |
+
if not man:
|
| 213 |
+
return fzero
|
| 214 |
+
if bc <= prec:
|
| 215 |
+
return sign, man, exp, bc
|
| 216 |
+
n = bc - prec
|
| 217 |
+
if rnd == round_nearest:
|
| 218 |
+
t = man >> (n-1)
|
| 219 |
+
if t & 1 and ((t & 2) or (man & h_mask[n<300][n])):
|
| 220 |
+
man = (t>>1)+1
|
| 221 |
+
else:
|
| 222 |
+
man = t>>1
|
| 223 |
+
elif shifts_down[rnd][sign]:
|
| 224 |
+
man >>= n
|
| 225 |
+
else:
|
| 226 |
+
man = -((-man)>>n)
|
| 227 |
+
exp += n
|
| 228 |
+
bc = prec
|
| 229 |
+
# Strip trailing bits
|
| 230 |
+
if not man & 1:
|
| 231 |
+
t = trailtable[int(man & 255)]
|
| 232 |
+
if not t:
|
| 233 |
+
while not man & 255:
|
| 234 |
+
man >>= 8
|
| 235 |
+
exp += 8
|
| 236 |
+
bc -= 8
|
| 237 |
+
t = trailtable[int(man & 255)]
|
| 238 |
+
man >>= t
|
| 239 |
+
exp += t
|
| 240 |
+
bc -= t
|
| 241 |
+
# Bit count can be wrong if the input mantissa was 1 less than
|
| 242 |
+
# a power of 2 and got rounded up, thereby adding an extra bit.
|
| 243 |
+
# With trailing bits removed, all powers of two have mantissa 1,
|
| 244 |
+
# so this is easy to check for.
|
| 245 |
+
if man == 1:
|
| 246 |
+
bc = 1
|
| 247 |
+
return sign, man, exp, bc
|
| 248 |
+
|
| 249 |
+
try:
|
| 250 |
+
_exp_types = (int, long)
|
| 251 |
+
except NameError:
|
| 252 |
+
_exp_types = (int,)
|
| 253 |
+
|
| 254 |
+
def strict_normalize(sign, man, exp, bc, prec, rnd):
|
| 255 |
+
"""Additional checks on the components of an mpf. Enable tests by setting
|
| 256 |
+
the environment variable MPMATH_STRICT to Y."""
|
| 257 |
+
assert type(man) == MPZ_TYPE
|
| 258 |
+
assert type(bc) in _exp_types
|
| 259 |
+
assert type(exp) in _exp_types
|
| 260 |
+
assert bc == bitcount(man)
|
| 261 |
+
return _normalize(sign, man, exp, bc, prec, rnd)
|
| 262 |
+
|
| 263 |
+
def strict_normalize1(sign, man, exp, bc, prec, rnd):
|
| 264 |
+
"""Additional checks on the components of an mpf. Enable tests by setting
|
| 265 |
+
the environment variable MPMATH_STRICT to Y."""
|
| 266 |
+
assert type(man) == MPZ_TYPE
|
| 267 |
+
assert type(bc) in _exp_types
|
| 268 |
+
assert type(exp) in _exp_types
|
| 269 |
+
assert bc == bitcount(man)
|
| 270 |
+
assert (not man) or (man & 1)
|
| 271 |
+
return _normalize1(sign, man, exp, bc, prec, rnd)
|
| 272 |
+
|
| 273 |
+
if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy):
|
| 274 |
+
_normalize = gmpy._mpmath_normalize
|
| 275 |
+
_normalize1 = gmpy._mpmath_normalize
|
| 276 |
+
|
| 277 |
+
if BACKEND == 'sage':
|
| 278 |
+
_normalize = _normalize1 = sage_utils.normalize
|
| 279 |
+
|
| 280 |
+
if STRICT:
|
| 281 |
+
normalize = strict_normalize
|
| 282 |
+
normalize1 = strict_normalize1
|
| 283 |
+
else:
|
| 284 |
+
normalize = _normalize
|
| 285 |
+
normalize1 = _normalize1
|
| 286 |
+
|
| 287 |
+
#----------------------------------------------------------------------------#
|
| 288 |
+
# Conversion functions #
|
| 289 |
+
#----------------------------------------------------------------------------#
|
| 290 |
+
|
| 291 |
+
def from_man_exp(man, exp, prec=None, rnd=round_fast):
|
| 292 |
+
"""Create raw mpf from (man, exp) pair. The mantissa may be signed.
|
| 293 |
+
If no precision is specified, the mantissa is stored exactly."""
|
| 294 |
+
man = MPZ(man)
|
| 295 |
+
sign = 0
|
| 296 |
+
if man < 0:
|
| 297 |
+
sign = 1
|
| 298 |
+
man = -man
|
| 299 |
+
if man < 1024:
|
| 300 |
+
bc = bctable[int(man)]
|
| 301 |
+
else:
|
| 302 |
+
bc = bitcount(man)
|
| 303 |
+
if not prec:
|
| 304 |
+
if not man:
|
| 305 |
+
return fzero
|
| 306 |
+
if not man & 1:
|
| 307 |
+
if man & 2:
|
| 308 |
+
return (sign, man >> 1, exp + 1, bc - 1)
|
| 309 |
+
t = trailtable[int(man & 255)]
|
| 310 |
+
if not t:
|
| 311 |
+
while not man & 255:
|
| 312 |
+
man >>= 8
|
| 313 |
+
exp += 8
|
| 314 |
+
bc -= 8
|
| 315 |
+
t = trailtable[int(man & 255)]
|
| 316 |
+
man >>= t
|
| 317 |
+
exp += t
|
| 318 |
+
bc -= t
|
| 319 |
+
return (sign, man, exp, bc)
|
| 320 |
+
return normalize(sign, man, exp, bc, prec, rnd)
|
| 321 |
+
|
| 322 |
+
int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257))
|
| 323 |
+
|
| 324 |
+
if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy):
|
| 325 |
+
from_man_exp = gmpy._mpmath_create
|
| 326 |
+
|
| 327 |
+
if BACKEND == 'sage':
|
| 328 |
+
from_man_exp = sage_utils.from_man_exp
|
| 329 |
+
|
| 330 |
+
def from_int(n, prec=0, rnd=round_fast):
|
| 331 |
+
"""Create a raw mpf from an integer. If no precision is specified,
|
| 332 |
+
the mantissa is stored exactly."""
|
| 333 |
+
if not prec:
|
| 334 |
+
if n in int_cache:
|
| 335 |
+
return int_cache[n]
|
| 336 |
+
return from_man_exp(n, 0, prec, rnd)
|
| 337 |
+
|
| 338 |
+
def to_man_exp(s):
|
| 339 |
+
"""Return (man, exp) of a raw mpf. Raise an error if inf/nan."""
|
| 340 |
+
sign, man, exp, bc = s
|
| 341 |
+
if (not man) and exp:
|
| 342 |
+
raise ValueError("mantissa and exponent are undefined for %s" % man)
|
| 343 |
+
return man, exp
|
| 344 |
+
|
| 345 |
+
def to_int(s, rnd=None):
|
| 346 |
+
"""Convert a raw mpf to the nearest int. Rounding is done down by
|
| 347 |
+
default (same as int(float) in Python), but can be changed. If the
|
| 348 |
+
input is inf/nan, an exception is raised."""
|
| 349 |
+
sign, man, exp, bc = s
|
| 350 |
+
if (not man) and exp:
|
| 351 |
+
raise ValueError("cannot convert inf or nan to int")
|
| 352 |
+
if exp >= 0:
|
| 353 |
+
if sign:
|
| 354 |
+
return (-man) << exp
|
| 355 |
+
return man << exp
|
| 356 |
+
# Make default rounding fast
|
| 357 |
+
if not rnd:
|
| 358 |
+
if sign:
|
| 359 |
+
return -(man >> (-exp))
|
| 360 |
+
else:
|
| 361 |
+
return man >> (-exp)
|
| 362 |
+
if sign:
|
| 363 |
+
return round_int(-man, -exp, rnd)
|
| 364 |
+
else:
|
| 365 |
+
return round_int(man, -exp, rnd)
|
| 366 |
+
|
| 367 |
+
def mpf_round_int(s, rnd):
|
| 368 |
+
sign, man, exp, bc = s
|
| 369 |
+
if (not man) and exp:
|
| 370 |
+
return s
|
| 371 |
+
if exp >= 0:
|
| 372 |
+
return s
|
| 373 |
+
mag = exp+bc
|
| 374 |
+
if mag < 1:
|
| 375 |
+
if rnd == round_ceiling:
|
| 376 |
+
if sign: return fzero
|
| 377 |
+
else: return fone
|
| 378 |
+
elif rnd == round_floor:
|
| 379 |
+
if sign: return fnone
|
| 380 |
+
else: return fzero
|
| 381 |
+
elif rnd == round_nearest:
|
| 382 |
+
if mag < 0 or man == MPZ_ONE: return fzero
|
| 383 |
+
elif sign: return fnone
|
| 384 |
+
else: return fone
|
| 385 |
+
else:
|
| 386 |
+
raise NotImplementedError
|
| 387 |
+
return mpf_pos(s, min(bc, mag), rnd)
|
| 388 |
+
|
| 389 |
+
def mpf_floor(s, prec=0, rnd=round_fast):
|
| 390 |
+
v = mpf_round_int(s, round_floor)
|
| 391 |
+
if prec:
|
| 392 |
+
v = mpf_pos(v, prec, rnd)
|
| 393 |
+
return v
|
| 394 |
+
|
| 395 |
+
def mpf_ceil(s, prec=0, rnd=round_fast):
|
| 396 |
+
v = mpf_round_int(s, round_ceiling)
|
| 397 |
+
if prec:
|
| 398 |
+
v = mpf_pos(v, prec, rnd)
|
| 399 |
+
return v
|
| 400 |
+
|
| 401 |
+
def mpf_nint(s, prec=0, rnd=round_fast):
|
| 402 |
+
v = mpf_round_int(s, round_nearest)
|
| 403 |
+
if prec:
|
| 404 |
+
v = mpf_pos(v, prec, rnd)
|
| 405 |
+
return v
|
| 406 |
+
|
| 407 |
+
def mpf_frac(s, prec=0, rnd=round_fast):
|
| 408 |
+
return mpf_sub(s, mpf_floor(s), prec, rnd)
|
| 409 |
+
|
| 410 |
+
def from_float(x, prec=53, rnd=round_fast):
|
| 411 |
+
"""Create a raw mpf from a Python float, rounding if necessary.
|
| 412 |
+
If prec >= 53, the result is guaranteed to represent exactly the
|
| 413 |
+
same number as the input. If prec is not specified, use prec=53."""
|
| 414 |
+
# frexp only raises an exception for nan on some platforms
|
| 415 |
+
if x != x:
|
| 416 |
+
return fnan
|
| 417 |
+
# in Python2.5 math.frexp gives an exception for float infinity
|
| 418 |
+
# in Python2.6 it returns (float infinity, 0)
|
| 419 |
+
try:
|
| 420 |
+
m, e = math.frexp(x)
|
| 421 |
+
except:
|
| 422 |
+
if x == math_float_inf: return finf
|
| 423 |
+
if x == -math_float_inf: return fninf
|
| 424 |
+
return fnan
|
| 425 |
+
if x == math_float_inf: return finf
|
| 426 |
+
if x == -math_float_inf: return fninf
|
| 427 |
+
return from_man_exp(int(m*(1<<53)), e-53, prec, rnd)
|
| 428 |
+
|
| 429 |
+
def from_npfloat(x, prec=113, rnd=round_fast):
|
| 430 |
+
"""Create a raw mpf from a numpy float, rounding if necessary.
|
| 431 |
+
If prec >= 113, the result is guaranteed to represent exactly the
|
| 432 |
+
same number as the input. If prec is not specified, use prec=113."""
|
| 433 |
+
y = float(x)
|
| 434 |
+
if x == y: # ldexp overflows for float16
|
| 435 |
+
return from_float(y, prec, rnd)
|
| 436 |
+
import numpy as np
|
| 437 |
+
if np.isfinite(x):
|
| 438 |
+
m, e = np.frexp(x)
|
| 439 |
+
return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd)
|
| 440 |
+
if np.isposinf(x): return finf
|
| 441 |
+
if np.isneginf(x): return fninf
|
| 442 |
+
return fnan
|
| 443 |
+
|
| 444 |
+
def from_Decimal(x, prec=None, rnd=round_fast):
|
| 445 |
+
"""Create a raw mpf from a Decimal, rounding if necessary.
|
| 446 |
+
If prec is not specified, use the equivalent bit precision
|
| 447 |
+
of the number of significant digits in x."""
|
| 448 |
+
if x.is_nan(): return fnan
|
| 449 |
+
if x.is_infinite(): return fninf if x.is_signed() else finf
|
| 450 |
+
if prec is None:
|
| 451 |
+
prec = int(len(x.as_tuple()[1])*3.3219280948873626)
|
| 452 |
+
return from_str(str(x), prec, rnd)
|
| 453 |
+
|
| 454 |
+
def to_float(s, strict=False, rnd=round_fast):
|
| 455 |
+
"""
|
| 456 |
+
Convert a raw mpf to a Python float. The result is exact if the
|
| 457 |
+
bitcount of s is <= 53 and no underflow/overflow occurs.
|
| 458 |
+
|
| 459 |
+
If the number is too large or too small to represent as a regular
|
| 460 |
+
float, it will be converted to inf or 0.0. Setting strict=True
|
| 461 |
+
forces an OverflowError to be raised instead.
|
| 462 |
+
|
| 463 |
+
Warning: with a directed rounding mode, the correct nearest representable
|
| 464 |
+
floating-point number in the specified direction might not be computed
|
| 465 |
+
in case of overflow or (gradual) underflow.
|
| 466 |
+
"""
|
| 467 |
+
sign, man, exp, bc = s
|
| 468 |
+
if not man:
|
| 469 |
+
if s == fzero: return 0.0
|
| 470 |
+
if s == finf: return math_float_inf
|
| 471 |
+
if s == fninf: return -math_float_inf
|
| 472 |
+
return math_float_inf/math_float_inf
|
| 473 |
+
if bc > 53:
|
| 474 |
+
sign, man, exp, bc = normalize1(sign, man, exp, bc, 53, rnd)
|
| 475 |
+
if sign:
|
| 476 |
+
man = -man
|
| 477 |
+
try:
|
| 478 |
+
return math.ldexp(man, exp)
|
| 479 |
+
except OverflowError:
|
| 480 |
+
if strict:
|
| 481 |
+
raise
|
| 482 |
+
# Overflow to infinity
|
| 483 |
+
if exp + bc > 0:
|
| 484 |
+
if sign:
|
| 485 |
+
return -math_float_inf
|
| 486 |
+
else:
|
| 487 |
+
return math_float_inf
|
| 488 |
+
# Underflow to zero
|
| 489 |
+
return 0.0
|
| 490 |
+
|
| 491 |
+
def from_rational(p, q, prec, rnd=round_fast):
|
| 492 |
+
"""Create a raw mpf from a rational number p/q, round if
|
| 493 |
+
necessary."""
|
| 494 |
+
return mpf_div(from_int(p), from_int(q), prec, rnd)
|
| 495 |
+
|
| 496 |
+
def to_rational(s):
|
| 497 |
+
"""Convert a raw mpf to a rational number. Return integers (p, q)
|
| 498 |
+
such that s = p/q exactly."""
|
| 499 |
+
sign, man, exp, bc = s
|
| 500 |
+
if sign:
|
| 501 |
+
man = -man
|
| 502 |
+
if bc == -1:
|
| 503 |
+
raise ValueError("cannot convert %s to a rational number" % man)
|
| 504 |
+
if exp >= 0:
|
| 505 |
+
return man * (1<<exp), 1
|
| 506 |
+
else:
|
| 507 |
+
return man, 1<<(-exp)
|
| 508 |
+
|
| 509 |
+
def to_fixed(s, prec):
|
| 510 |
+
"""Convert a raw mpf to a fixed-point big integer"""
|
| 511 |
+
sign, man, exp, bc = s
|
| 512 |
+
offset = exp + prec
|
| 513 |
+
if sign:
|
| 514 |
+
if offset >= 0: return (-man) << offset
|
| 515 |
+
else: return (-man) >> (-offset)
|
| 516 |
+
else:
|
| 517 |
+
if offset >= 0: return man << offset
|
| 518 |
+
else: return man >> (-offset)
|
| 519 |
+
|
| 520 |
+
|
| 521 |
+
##############################################################################
|
| 522 |
+
##############################################################################
|
| 523 |
+
|
| 524 |
+
#----------------------------------------------------------------------------#
|
| 525 |
+
# Arithmetic operations, etc. #
|
| 526 |
+
#----------------------------------------------------------------------------#
|
| 527 |
+
|
| 528 |
+
def mpf_rand(prec):
|
| 529 |
+
"""Return a raw mpf chosen randomly from [0, 1), with prec bits
|
| 530 |
+
in the mantissa."""
|
| 531 |
+
global getrandbits
|
| 532 |
+
if not getrandbits:
|
| 533 |
+
import random
|
| 534 |
+
getrandbits = random.getrandbits
|
| 535 |
+
return from_man_exp(getrandbits(prec), -prec, prec, round_floor)
|
| 536 |
+
|
| 537 |
+
def mpf_eq(s, t):
|
| 538 |
+
"""Test equality of two raw mpfs. This is simply tuple comparison
|
| 539 |
+
unless either number is nan, in which case the result is False."""
|
| 540 |
+
if not s[1] or not t[1]:
|
| 541 |
+
if s == fnan or t == fnan:
|
| 542 |
+
return False
|
| 543 |
+
return s == t
|
| 544 |
+
|
| 545 |
+
def mpf_hash(s):
|
| 546 |
+
# Duplicate the new hash algorithm introduces in Python 3.2.
|
| 547 |
+
if sys.version_info >= (3, 2):
|
| 548 |
+
ssign, sman, sexp, sbc = s
|
| 549 |
+
|
| 550 |
+
# Handle special numbers
|
| 551 |
+
if not sman:
|
| 552 |
+
if s == fnan: return sys.hash_info.nan
|
| 553 |
+
if s == finf: return sys.hash_info.inf
|
| 554 |
+
if s == fninf: return -sys.hash_info.inf
|
| 555 |
+
h = sman % HASH_MODULUS
|
| 556 |
+
if sexp >= 0:
|
| 557 |
+
sexp = sexp % HASH_BITS
|
| 558 |
+
else:
|
| 559 |
+
sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS)
|
| 560 |
+
h = (h << sexp) % HASH_MODULUS
|
| 561 |
+
if ssign: h = -h
|
| 562 |
+
if h == -1: h = -2
|
| 563 |
+
return int(h)
|
| 564 |
+
else:
|
| 565 |
+
try:
|
| 566 |
+
# Try to be compatible with hash values for floats and ints
|
| 567 |
+
return hash(to_float(s, strict=1))
|
| 568 |
+
except OverflowError:
|
| 569 |
+
# We must unfortunately sacrifice compatibility with ints here.
|
| 570 |
+
# We could do hash(man << exp) when the exponent is positive, but
|
| 571 |
+
# this would cause unreasonable inefficiency for large numbers.
|
| 572 |
+
return hash(s)
|
| 573 |
+
|
| 574 |
+
def mpf_cmp(s, t):
|
| 575 |
+
"""Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t,
|
| 576 |
+
and 1 if s > t. (Same convention as Python's cmp() function.)"""
|
| 577 |
+
|
| 578 |
+
# In principle, a comparison amounts to determining the sign of s-t.
|
| 579 |
+
# A full subtraction is relatively slow, however, so we first try to
|
| 580 |
+
# look at the components.
|
| 581 |
+
ssign, sman, sexp, sbc = s
|
| 582 |
+
tsign, tman, texp, tbc = t
|
| 583 |
+
|
| 584 |
+
# Handle zeros and special numbers
|
| 585 |
+
if not sman or not tman:
|
| 586 |
+
if s == fzero: return -mpf_sign(t)
|
| 587 |
+
if t == fzero: return mpf_sign(s)
|
| 588 |
+
if s == t: return 0
|
| 589 |
+
# Follow same convention as Python's cmp for float nan
|
| 590 |
+
if t == fnan: return 1
|
| 591 |
+
if s == finf: return 1
|
| 592 |
+
if t == fninf: return 1
|
| 593 |
+
return -1
|
| 594 |
+
# Different sides of zero
|
| 595 |
+
if ssign != tsign:
|
| 596 |
+
if not ssign: return 1
|
| 597 |
+
return -1
|
| 598 |
+
# This reduces to direct integer comparison
|
| 599 |
+
if sexp == texp:
|
| 600 |
+
if sman == tman:
|
| 601 |
+
return 0
|
| 602 |
+
if sman > tman:
|
| 603 |
+
if ssign: return -1
|
| 604 |
+
else: return 1
|
| 605 |
+
else:
|
| 606 |
+
if ssign: return 1
|
| 607 |
+
else: return -1
|
| 608 |
+
# Check position of the highest set bit in each number. If
|
| 609 |
+
# different, there is certainly an inequality.
|
| 610 |
+
a = sbc + sexp
|
| 611 |
+
b = tbc + texp
|
| 612 |
+
if ssign:
|
| 613 |
+
if a < b: return 1
|
| 614 |
+
if a > b: return -1
|
| 615 |
+
else:
|
| 616 |
+
if a < b: return -1
|
| 617 |
+
if a > b: return 1
|
| 618 |
+
|
| 619 |
+
# Both numbers have the same highest bit. Subtract to find
|
| 620 |
+
# how the lower bits compare.
|
| 621 |
+
delta = mpf_sub(s, t, 5, round_floor)
|
| 622 |
+
if delta[0]:
|
| 623 |
+
return -1
|
| 624 |
+
return 1
|
| 625 |
+
|
| 626 |
+
def mpf_lt(s, t):
|
| 627 |
+
if s == fnan or t == fnan:
|
| 628 |
+
return False
|
| 629 |
+
return mpf_cmp(s, t) < 0
|
| 630 |
+
|
| 631 |
+
def mpf_le(s, t):
|
| 632 |
+
if s == fnan or t == fnan:
|
| 633 |
+
return False
|
| 634 |
+
return mpf_cmp(s, t) <= 0
|
| 635 |
+
|
| 636 |
+
def mpf_gt(s, t):
|
| 637 |
+
if s == fnan or t == fnan:
|
| 638 |
+
return False
|
| 639 |
+
return mpf_cmp(s, t) > 0
|
| 640 |
+
|
| 641 |
+
def mpf_ge(s, t):
|
| 642 |
+
if s == fnan or t == fnan:
|
| 643 |
+
return False
|
| 644 |
+
return mpf_cmp(s, t) >= 0
|
| 645 |
+
|
| 646 |
+
def mpf_min_max(seq):
|
| 647 |
+
min = max = seq[0]
|
| 648 |
+
for x in seq[1:]:
|
| 649 |
+
if mpf_lt(x, min): min = x
|
| 650 |
+
if mpf_gt(x, max): max = x
|
| 651 |
+
return min, max
|
| 652 |
+
|
| 653 |
+
def mpf_pos(s, prec=0, rnd=round_fast):
|
| 654 |
+
"""Calculate 0+s for a raw mpf (i.e., just round s to the specified
|
| 655 |
+
precision)."""
|
| 656 |
+
if prec:
|
| 657 |
+
sign, man, exp, bc = s
|
| 658 |
+
if (not man) and exp:
|
| 659 |
+
return s
|
| 660 |
+
return normalize1(sign, man, exp, bc, prec, rnd)
|
| 661 |
+
return s
|
| 662 |
+
|
| 663 |
+
def mpf_neg(s, prec=None, rnd=round_fast):
|
| 664 |
+
"""Negate a raw mpf (return -s), rounding the result to the
|
| 665 |
+
specified precision. The prec argument can be omitted to do the
|
| 666 |
+
operation exactly."""
|
| 667 |
+
sign, man, exp, bc = s
|
| 668 |
+
if not man:
|
| 669 |
+
if exp:
|
| 670 |
+
if s == finf: return fninf
|
| 671 |
+
if s == fninf: return finf
|
| 672 |
+
return s
|
| 673 |
+
if not prec:
|
| 674 |
+
return (1-sign, man, exp, bc)
|
| 675 |
+
return normalize1(1-sign, man, exp, bc, prec, rnd)
|
| 676 |
+
|
| 677 |
+
def mpf_abs(s, prec=None, rnd=round_fast):
|
| 678 |
+
"""Return abs(s) of the raw mpf s, rounded to the specified
|
| 679 |
+
precision. The prec argument can be omitted to generate an
|
| 680 |
+
exact result."""
|
| 681 |
+
sign, man, exp, bc = s
|
| 682 |
+
if (not man) and exp:
|
| 683 |
+
if s == fninf:
|
| 684 |
+
return finf
|
| 685 |
+
return s
|
| 686 |
+
if not prec:
|
| 687 |
+
if sign:
|
| 688 |
+
return (0, man, exp, bc)
|
| 689 |
+
return s
|
| 690 |
+
return normalize1(0, man, exp, bc, prec, rnd)
|
| 691 |
+
|
| 692 |
+
def mpf_sign(s):
|
| 693 |
+
"""Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on
|
| 694 |
+
whether s is negative, zero, or positive. (Nan is taken to give 0.)"""
|
| 695 |
+
sign, man, exp, bc = s
|
| 696 |
+
if not man:
|
| 697 |
+
if s == finf: return 1
|
| 698 |
+
if s == fninf: return -1
|
| 699 |
+
return 0
|
| 700 |
+
return (-1) ** sign
|
| 701 |
+
|
| 702 |
+
def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0):
|
| 703 |
+
"""
|
| 704 |
+
Add the two raw mpf values s and t.
|
| 705 |
+
|
| 706 |
+
With prec=0, no rounding is performed. Note that this can
|
| 707 |
+
produce a very large mantissa (potentially too large to fit
|
| 708 |
+
in memory) if exponents are far apart.
|
| 709 |
+
"""
|
| 710 |
+
ssign, sman, sexp, sbc = s
|
| 711 |
+
tsign, tman, texp, tbc = t
|
| 712 |
+
tsign ^= _sub
|
| 713 |
+
# Standard case: two nonzero, regular numbers
|
| 714 |
+
if sman and tman:
|
| 715 |
+
offset = sexp - texp
|
| 716 |
+
if offset:
|
| 717 |
+
if offset > 0:
|
| 718 |
+
# Outside precision range; only need to perturb
|
| 719 |
+
if offset > 100 and prec:
|
| 720 |
+
delta = sbc + sexp - tbc - texp
|
| 721 |
+
if delta > prec + 4:
|
| 722 |
+
offset = prec + 4
|
| 723 |
+
sman <<= offset
|
| 724 |
+
if tsign == ssign: sman += 1
|
| 725 |
+
else: sman -= 1
|
| 726 |
+
return normalize1(ssign, sman, sexp-offset,
|
| 727 |
+
bitcount(sman), prec, rnd)
|
| 728 |
+
# Add
|
| 729 |
+
if ssign == tsign:
|
| 730 |
+
man = tman + (sman << offset)
|
| 731 |
+
# Subtract
|
| 732 |
+
else:
|
| 733 |
+
if ssign: man = tman - (sman << offset)
|
| 734 |
+
else: man = (sman << offset) - tman
|
| 735 |
+
if man >= 0:
|
| 736 |
+
ssign = 0
|
| 737 |
+
else:
|
| 738 |
+
man = -man
|
| 739 |
+
ssign = 1
|
| 740 |
+
bc = bitcount(man)
|
| 741 |
+
return normalize1(ssign, man, texp, bc, prec or bc, rnd)
|
| 742 |
+
elif offset < 0:
|
| 743 |
+
# Outside precision range; only need to perturb
|
| 744 |
+
if offset < -100 and prec:
|
| 745 |
+
delta = tbc + texp - sbc - sexp
|
| 746 |
+
if delta > prec + 4:
|
| 747 |
+
offset = prec + 4
|
| 748 |
+
tman <<= offset
|
| 749 |
+
if ssign == tsign: tman += 1
|
| 750 |
+
else: tman -= 1
|
| 751 |
+
return normalize1(tsign, tman, texp-offset,
|
| 752 |
+
bitcount(tman), prec, rnd)
|
| 753 |
+
# Add
|
| 754 |
+
if ssign == tsign:
|
| 755 |
+
man = sman + (tman << -offset)
|
| 756 |
+
# Subtract
|
| 757 |
+
else:
|
| 758 |
+
if tsign: man = sman - (tman << -offset)
|
| 759 |
+
else: man = (tman << -offset) - sman
|
| 760 |
+
if man >= 0:
|
| 761 |
+
ssign = 0
|
| 762 |
+
else:
|
| 763 |
+
man = -man
|
| 764 |
+
ssign = 1
|
| 765 |
+
bc = bitcount(man)
|
| 766 |
+
return normalize1(ssign, man, sexp, bc, prec or bc, rnd)
|
| 767 |
+
# Equal exponents; no shifting necessary
|
| 768 |
+
if ssign == tsign:
|
| 769 |
+
man = tman + sman
|
| 770 |
+
else:
|
| 771 |
+
if ssign: man = tman - sman
|
| 772 |
+
else: man = sman - tman
|
| 773 |
+
if man >= 0:
|
| 774 |
+
ssign = 0
|
| 775 |
+
else:
|
| 776 |
+
man = -man
|
| 777 |
+
ssign = 1
|
| 778 |
+
bc = bitcount(man)
|
| 779 |
+
return normalize(ssign, man, texp, bc, prec or bc, rnd)
|
| 780 |
+
# Handle zeros and special numbers
|
| 781 |
+
if _sub:
|
| 782 |
+
t = mpf_neg(t)
|
| 783 |
+
if not sman:
|
| 784 |
+
if sexp:
|
| 785 |
+
if s == t or tman or not texp:
|
| 786 |
+
return s
|
| 787 |
+
return fnan
|
| 788 |
+
if tman:
|
| 789 |
+
return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd)
|
| 790 |
+
return t
|
| 791 |
+
if texp:
|
| 792 |
+
return t
|
| 793 |
+
if sman:
|
| 794 |
+
return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd)
|
| 795 |
+
return s
|
| 796 |
+
|
| 797 |
+
def mpf_sub(s, t, prec=0, rnd=round_fast):
|
| 798 |
+
"""Return the difference of two raw mpfs, s-t. This function is
|
| 799 |
+
simply a wrapper of mpf_add that changes the sign of t."""
|
| 800 |
+
return mpf_add(s, t, prec, rnd, 1)
|
| 801 |
+
|
| 802 |
+
def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False):
|
| 803 |
+
"""
|
| 804 |
+
Sum a list of mpf values efficiently and accurately
|
| 805 |
+
(typically no temporary roundoff occurs). If prec=0,
|
| 806 |
+
the final result will not be rounded either.
|
| 807 |
+
|
| 808 |
+
There may be roundoff error or cancellation if extremely
|
| 809 |
+
large exponent differences occur.
|
| 810 |
+
|
| 811 |
+
With absolute=True, sums the absolute values.
|
| 812 |
+
"""
|
| 813 |
+
man = 0
|
| 814 |
+
exp = 0
|
| 815 |
+
max_extra_prec = prec*2 or 1000000 # XXX
|
| 816 |
+
special = None
|
| 817 |
+
for x in xs:
|
| 818 |
+
xsign, xman, xexp, xbc = x
|
| 819 |
+
if xman:
|
| 820 |
+
if xsign and not absolute:
|
| 821 |
+
xman = -xman
|
| 822 |
+
delta = xexp - exp
|
| 823 |
+
if xexp >= exp:
|
| 824 |
+
# x much larger than existing sum?
|
| 825 |
+
# first: quick test
|
| 826 |
+
if (delta > max_extra_prec) and \
|
| 827 |
+
((not man) or delta-bitcount(abs(man)) > max_extra_prec):
|
| 828 |
+
man = xman
|
| 829 |
+
exp = xexp
|
| 830 |
+
else:
|
| 831 |
+
man += (xman << delta)
|
| 832 |
+
else:
|
| 833 |
+
delta = -delta
|
| 834 |
+
# x much smaller than existing sum?
|
| 835 |
+
if delta-xbc > max_extra_prec:
|
| 836 |
+
if not man:
|
| 837 |
+
man, exp = xman, xexp
|
| 838 |
+
else:
|
| 839 |
+
man = (man << delta) + xman
|
| 840 |
+
exp = xexp
|
| 841 |
+
elif xexp:
|
| 842 |
+
if absolute:
|
| 843 |
+
x = mpf_abs(x)
|
| 844 |
+
special = mpf_add(special or fzero, x, 1)
|
| 845 |
+
# Will be inf or nan
|
| 846 |
+
if special:
|
| 847 |
+
return special
|
| 848 |
+
return from_man_exp(man, exp, prec, rnd)
|
| 849 |
+
|
| 850 |
+
def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast):
|
| 851 |
+
"""Multiply two raw mpfs"""
|
| 852 |
+
ssign, sman, sexp, sbc = s
|
| 853 |
+
tsign, tman, texp, tbc = t
|
| 854 |
+
sign = ssign ^ tsign
|
| 855 |
+
man = sman*tman
|
| 856 |
+
if man:
|
| 857 |
+
bc = bitcount(man)
|
| 858 |
+
if prec:
|
| 859 |
+
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
|
| 860 |
+
else:
|
| 861 |
+
return (sign, man, sexp+texp, bc)
|
| 862 |
+
s_special = (not sman) and sexp
|
| 863 |
+
t_special = (not tman) and texp
|
| 864 |
+
if not s_special and not t_special:
|
| 865 |
+
return fzero
|
| 866 |
+
if fnan in (s, t): return fnan
|
| 867 |
+
if (not tman) and texp: s, t = t, s
|
| 868 |
+
if t == fzero: return fnan
|
| 869 |
+
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
|
| 870 |
+
|
| 871 |
+
def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast):
|
| 872 |
+
"""Multiply by a Python integer."""
|
| 873 |
+
sign, man, exp, bc = s
|
| 874 |
+
if not man:
|
| 875 |
+
return mpf_mul(s, from_int(n), prec, rnd)
|
| 876 |
+
if not n:
|
| 877 |
+
return fzero
|
| 878 |
+
if n < 0:
|
| 879 |
+
sign ^= 1
|
| 880 |
+
n = -n
|
| 881 |
+
man *= n
|
| 882 |
+
return normalize(sign, man, exp, bitcount(man), prec, rnd)
|
| 883 |
+
|
| 884 |
+
def python_mpf_mul(s, t, prec=0, rnd=round_fast):
|
| 885 |
+
"""Multiply two raw mpfs"""
|
| 886 |
+
ssign, sman, sexp, sbc = s
|
| 887 |
+
tsign, tman, texp, tbc = t
|
| 888 |
+
sign = ssign ^ tsign
|
| 889 |
+
man = sman*tman
|
| 890 |
+
if man:
|
| 891 |
+
bc = sbc + tbc - 1
|
| 892 |
+
bc += int(man>>bc)
|
| 893 |
+
if prec:
|
| 894 |
+
return normalize1(sign, man, sexp+texp, bc, prec, rnd)
|
| 895 |
+
else:
|
| 896 |
+
return (sign, man, sexp+texp, bc)
|
| 897 |
+
s_special = (not sman) and sexp
|
| 898 |
+
t_special = (not tman) and texp
|
| 899 |
+
if not s_special and not t_special:
|
| 900 |
+
return fzero
|
| 901 |
+
if fnan in (s, t): return fnan
|
| 902 |
+
if (not tman) and texp: s, t = t, s
|
| 903 |
+
if t == fzero: return fnan
|
| 904 |
+
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
|
| 905 |
+
|
| 906 |
+
def python_mpf_mul_int(s, n, prec, rnd=round_fast):
|
| 907 |
+
"""Multiply by a Python integer."""
|
| 908 |
+
sign, man, exp, bc = s
|
| 909 |
+
if not man:
|
| 910 |
+
return mpf_mul(s, from_int(n), prec, rnd)
|
| 911 |
+
if not n:
|
| 912 |
+
return fzero
|
| 913 |
+
if n < 0:
|
| 914 |
+
sign ^= 1
|
| 915 |
+
n = -n
|
| 916 |
+
man *= n
|
| 917 |
+
# Generally n will be small
|
| 918 |
+
if n < 1024:
|
| 919 |
+
bc += bctable[int(n)] - 1
|
| 920 |
+
else:
|
| 921 |
+
bc += bitcount(n) - 1
|
| 922 |
+
bc += int(man>>bc)
|
| 923 |
+
return normalize(sign, man, exp, bc, prec, rnd)
|
| 924 |
+
|
| 925 |
+
|
| 926 |
+
if BACKEND == 'gmpy':
|
| 927 |
+
mpf_mul = gmpy_mpf_mul
|
| 928 |
+
mpf_mul_int = gmpy_mpf_mul_int
|
| 929 |
+
else:
|
| 930 |
+
mpf_mul = python_mpf_mul
|
| 931 |
+
mpf_mul_int = python_mpf_mul_int
|
| 932 |
+
|
| 933 |
+
def mpf_shift(s, n):
|
| 934 |
+
"""Quickly multiply the raw mpf s by 2**n without rounding."""
|
| 935 |
+
sign, man, exp, bc = s
|
| 936 |
+
if not man:
|
| 937 |
+
return s
|
| 938 |
+
return sign, man, exp+n, bc
|
| 939 |
+
|
| 940 |
+
def mpf_frexp(x):
|
| 941 |
+
"""Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero"""
|
| 942 |
+
sign, man, exp, bc = x
|
| 943 |
+
if not man:
|
| 944 |
+
if x == fzero:
|
| 945 |
+
return (fzero, 0)
|
| 946 |
+
else:
|
| 947 |
+
raise ValueError
|
| 948 |
+
return mpf_shift(x, -bc-exp), bc+exp
|
| 949 |
+
|
| 950 |
+
def mpf_div(s, t, prec, rnd=round_fast):
|
| 951 |
+
"""Floating-point division"""
|
| 952 |
+
ssign, sman, sexp, sbc = s
|
| 953 |
+
tsign, tman, texp, tbc = t
|
| 954 |
+
if not sman or not tman:
|
| 955 |
+
if s == fzero:
|
| 956 |
+
if t == fzero: raise ZeroDivisionError
|
| 957 |
+
if t == fnan: return fnan
|
| 958 |
+
return fzero
|
| 959 |
+
if t == fzero:
|
| 960 |
+
raise ZeroDivisionError
|
| 961 |
+
s_special = (not sman) and sexp
|
| 962 |
+
t_special = (not tman) and texp
|
| 963 |
+
if s_special and t_special:
|
| 964 |
+
return fnan
|
| 965 |
+
if s == fnan or t == fnan:
|
| 966 |
+
return fnan
|
| 967 |
+
if not t_special:
|
| 968 |
+
if t == fzero:
|
| 969 |
+
return fnan
|
| 970 |
+
return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)]
|
| 971 |
+
return fzero
|
| 972 |
+
sign = ssign ^ tsign
|
| 973 |
+
if tman == 1:
|
| 974 |
+
return normalize1(sign, sman, sexp-texp, sbc, prec, rnd)
|
| 975 |
+
# Same strategy as for addition: if there is a remainder, perturb
|
| 976 |
+
# the result a few bits outside the precision range before rounding
|
| 977 |
+
extra = prec - sbc + tbc + 5
|
| 978 |
+
if extra < 5:
|
| 979 |
+
extra = 5
|
| 980 |
+
quot, rem = divmod(sman<<extra, tman)
|
| 981 |
+
if rem:
|
| 982 |
+
quot = (quot<<1) + 1
|
| 983 |
+
extra += 1
|
| 984 |
+
return normalize1(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
|
| 985 |
+
return normalize(sign, quot, sexp-texp-extra, bitcount(quot), prec, rnd)
|
| 986 |
+
|
| 987 |
+
def mpf_rdiv_int(n, t, prec, rnd=round_fast):
|
| 988 |
+
"""Floating-point division n/t with a Python integer as numerator"""
|
| 989 |
+
sign, man, exp, bc = t
|
| 990 |
+
if not n or not man:
|
| 991 |
+
return mpf_div(from_int(n), t, prec, rnd)
|
| 992 |
+
if n < 0:
|
| 993 |
+
sign ^= 1
|
| 994 |
+
n = -n
|
| 995 |
+
extra = prec + bc + 5
|
| 996 |
+
quot, rem = divmod(n<<extra, man)
|
| 997 |
+
if rem:
|
| 998 |
+
quot = (quot<<1) + 1
|
| 999 |
+
extra += 1
|
| 1000 |
+
return normalize1(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
|
| 1001 |
+
return normalize(sign, quot, -exp-extra, bitcount(quot), prec, rnd)
|
| 1002 |
+
|
| 1003 |
+
def mpf_mod(s, t, prec, rnd=round_fast):
|
| 1004 |
+
ssign, sman, sexp, sbc = s
|
| 1005 |
+
tsign, tman, texp, tbc = t
|
| 1006 |
+
if ((not sman) and sexp) or ((not tman) and texp):
|
| 1007 |
+
return fnan
|
| 1008 |
+
# Important special case: do nothing if t is larger
|
| 1009 |
+
if ssign == tsign and texp > sexp+sbc:
|
| 1010 |
+
return s
|
| 1011 |
+
# Another important special case: this allows us to do e.g. x % 1.0
|
| 1012 |
+
# to find the fractional part of x, and it will work when x is huge.
|
| 1013 |
+
if tman == 1 and sexp > texp+tbc:
|
| 1014 |
+
return fzero
|
| 1015 |
+
base = min(sexp, texp)
|
| 1016 |
+
sman = (-1)**ssign * sman
|
| 1017 |
+
tman = (-1)**tsign * tman
|
| 1018 |
+
man = (sman << (sexp-base)) % (tman << (texp-base))
|
| 1019 |
+
if man >= 0:
|
| 1020 |
+
sign = 0
|
| 1021 |
+
else:
|
| 1022 |
+
man = -man
|
| 1023 |
+
sign = 1
|
| 1024 |
+
return normalize(sign, man, base, bitcount(man), prec, rnd)
|
| 1025 |
+
|
| 1026 |
+
reciprocal_rnd = {
|
| 1027 |
+
round_down : round_up,
|
| 1028 |
+
round_up : round_down,
|
| 1029 |
+
round_floor : round_ceiling,
|
| 1030 |
+
round_ceiling : round_floor,
|
| 1031 |
+
round_nearest : round_nearest
|
| 1032 |
+
}
|
| 1033 |
+
|
| 1034 |
+
negative_rnd = {
|
| 1035 |
+
round_down : round_down,
|
| 1036 |
+
round_up : round_up,
|
| 1037 |
+
round_floor : round_ceiling,
|
| 1038 |
+
round_ceiling : round_floor,
|
| 1039 |
+
round_nearest : round_nearest
|
| 1040 |
+
}
|
| 1041 |
+
|
| 1042 |
+
def mpf_pow_int(s, n, prec, rnd=round_fast):
|
| 1043 |
+
"""Compute s**n, where s is a raw mpf and n is a Python integer."""
|
| 1044 |
+
sign, man, exp, bc = s
|
| 1045 |
+
|
| 1046 |
+
if (not man) and exp:
|
| 1047 |
+
if s == finf:
|
| 1048 |
+
if n > 0: return s
|
| 1049 |
+
if n == 0: return fnan
|
| 1050 |
+
return fzero
|
| 1051 |
+
if s == fninf:
|
| 1052 |
+
if n > 0: return [finf, fninf][n & 1]
|
| 1053 |
+
if n == 0: return fnan
|
| 1054 |
+
return fzero
|
| 1055 |
+
return fnan
|
| 1056 |
+
|
| 1057 |
+
n = int(n)
|
| 1058 |
+
if n == 0: return fone
|
| 1059 |
+
if n == 1: return mpf_pos(s, prec, rnd)
|
| 1060 |
+
if n == 2:
|
| 1061 |
+
_, man, exp, bc = s
|
| 1062 |
+
if not man:
|
| 1063 |
+
return fzero
|
| 1064 |
+
man = man*man
|
| 1065 |
+
if man == 1:
|
| 1066 |
+
return (0, MPZ_ONE, exp+exp, 1)
|
| 1067 |
+
bc = bc + bc - 2
|
| 1068 |
+
bc += bctable[int(man>>bc)]
|
| 1069 |
+
return normalize1(0, man, exp+exp, bc, prec, rnd)
|
| 1070 |
+
if n == -1: return mpf_div(fone, s, prec, rnd)
|
| 1071 |
+
if n < 0:
|
| 1072 |
+
inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd])
|
| 1073 |
+
return mpf_div(fone, inverse, prec, rnd)
|
| 1074 |
+
|
| 1075 |
+
result_sign = sign & n
|
| 1076 |
+
|
| 1077 |
+
# Use exact integer power when the exact mantissa is small
|
| 1078 |
+
if man == 1:
|
| 1079 |
+
return (result_sign, MPZ_ONE, exp*n, 1)
|
| 1080 |
+
if bc*n < 1000:
|
| 1081 |
+
man **= n
|
| 1082 |
+
return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd)
|
| 1083 |
+
|
| 1084 |
+
# Use directed rounding all the way through to maintain rigorous
|
| 1085 |
+
# bounds for interval arithmetic
|
| 1086 |
+
rounds_down = (rnd == round_nearest) or \
|
| 1087 |
+
shifts_down[rnd][result_sign]
|
| 1088 |
+
|
| 1089 |
+
# Now we perform binary exponentiation. Need to estimate precision
|
| 1090 |
+
# to avoid rounding errors from temporary operations. Roughly log_2(n)
|
| 1091 |
+
# operations are performed.
|
| 1092 |
+
workprec = prec + 4*bitcount(n) + 4
|
| 1093 |
+
_, pm, pe, pbc = fone
|
| 1094 |
+
while 1:
|
| 1095 |
+
if n & 1:
|
| 1096 |
+
pm = pm*man
|
| 1097 |
+
pe = pe+exp
|
| 1098 |
+
pbc += bc - 2
|
| 1099 |
+
pbc = pbc + bctable[int(pm >> pbc)]
|
| 1100 |
+
if pbc > workprec:
|
| 1101 |
+
if rounds_down:
|
| 1102 |
+
pm = pm >> (pbc-workprec)
|
| 1103 |
+
else:
|
| 1104 |
+
pm = -((-pm) >> (pbc-workprec))
|
| 1105 |
+
pe += pbc - workprec
|
| 1106 |
+
pbc = workprec
|
| 1107 |
+
n -= 1
|
| 1108 |
+
if not n:
|
| 1109 |
+
break
|
| 1110 |
+
man = man*man
|
| 1111 |
+
exp = exp+exp
|
| 1112 |
+
bc = bc + bc - 2
|
| 1113 |
+
bc = bc + bctable[int(man >> bc)]
|
| 1114 |
+
if bc > workprec:
|
| 1115 |
+
if rounds_down:
|
| 1116 |
+
man = man >> (bc-workprec)
|
| 1117 |
+
else:
|
| 1118 |
+
man = -((-man) >> (bc-workprec))
|
| 1119 |
+
exp += bc - workprec
|
| 1120 |
+
bc = workprec
|
| 1121 |
+
n = n // 2
|
| 1122 |
+
|
| 1123 |
+
return normalize(result_sign, pm, pe, pbc, prec, rnd)
|
| 1124 |
+
|
| 1125 |
+
|
| 1126 |
+
def mpf_perturb(x, eps_sign, prec, rnd):
|
| 1127 |
+
"""
|
| 1128 |
+
For nonzero x, calculate x + eps with directed rounding, where
|
| 1129 |
+
eps < prec relatively and eps has the given sign (0 for
|
| 1130 |
+
positive, 1 for negative).
|
| 1131 |
+
|
| 1132 |
+
With rounding to nearest, this is taken to simply normalize
|
| 1133 |
+
x to the given precision.
|
| 1134 |
+
"""
|
| 1135 |
+
if rnd == round_nearest:
|
| 1136 |
+
return mpf_pos(x, prec, rnd)
|
| 1137 |
+
sign, man, exp, bc = x
|
| 1138 |
+
eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1)
|
| 1139 |
+
if sign:
|
| 1140 |
+
away = (rnd in (round_down, round_ceiling)) ^ eps_sign
|
| 1141 |
+
else:
|
| 1142 |
+
away = (rnd in (round_up, round_ceiling)) ^ eps_sign
|
| 1143 |
+
if away:
|
| 1144 |
+
return mpf_add(x, eps, prec, rnd)
|
| 1145 |
+
else:
|
| 1146 |
+
return mpf_pos(x, prec, rnd)
|
| 1147 |
+
|
| 1148 |
+
|
| 1149 |
+
#----------------------------------------------------------------------------#
|
| 1150 |
+
# Radix conversion #
|
| 1151 |
+
#----------------------------------------------------------------------------#
|
| 1152 |
+
|
| 1153 |
+
def to_digits_exp(s, dps):
|
| 1154 |
+
"""Helper function for representing the floating-point number s as
|
| 1155 |
+
a decimal with dps digits. Returns (sign, string, exponent) where
|
| 1156 |
+
sign is '' or '-', string is the digit string, and exponent is
|
| 1157 |
+
the decimal exponent as an int.
|
| 1158 |
+
|
| 1159 |
+
If inexact, the decimal representation is rounded toward zero."""
|
| 1160 |
+
|
| 1161 |
+
# Extract sign first so it doesn't mess up the string digit count
|
| 1162 |
+
if s[0]:
|
| 1163 |
+
sign = '-'
|
| 1164 |
+
s = mpf_neg(s)
|
| 1165 |
+
else:
|
| 1166 |
+
sign = ''
|
| 1167 |
+
_sign, man, exp, bc = s
|
| 1168 |
+
|
| 1169 |
+
if not man:
|
| 1170 |
+
return '', '0', 0
|
| 1171 |
+
|
| 1172 |
+
bitprec = int(dps * math.log(10,2)) + 10
|
| 1173 |
+
|
| 1174 |
+
# Cut down to size
|
| 1175 |
+
# TODO: account for precision when doing this
|
| 1176 |
+
exp_from_1 = exp + bc
|
| 1177 |
+
if abs(exp_from_1) > 3500:
|
| 1178 |
+
from .libelefun import mpf_ln2, mpf_ln10
|
| 1179 |
+
# Set b = int(exp * log(2)/log(10))
|
| 1180 |
+
# If exp is huge, we must use high-precision arithmetic to
|
| 1181 |
+
# find the nearest power of ten
|
| 1182 |
+
expprec = bitcount(abs(exp)) + 5
|
| 1183 |
+
tmp = from_int(exp)
|
| 1184 |
+
tmp = mpf_mul(tmp, mpf_ln2(expprec))
|
| 1185 |
+
tmp = mpf_div(tmp, mpf_ln10(expprec), expprec)
|
| 1186 |
+
b = to_int(tmp)
|
| 1187 |
+
s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec)
|
| 1188 |
+
_sign, man, exp, bc = s
|
| 1189 |
+
exponent = b
|
| 1190 |
+
else:
|
| 1191 |
+
exponent = 0
|
| 1192 |
+
|
| 1193 |
+
# First, calculate mantissa digits by converting to a binary
|
| 1194 |
+
# fixed-point number and then converting that number to
|
| 1195 |
+
# a decimal fixed-point number.
|
| 1196 |
+
fixprec = max(bitprec - exp - bc, 0)
|
| 1197 |
+
fixdps = int(fixprec / math.log(10,2) + 0.5)
|
| 1198 |
+
sf = to_fixed(s, fixprec)
|
| 1199 |
+
sd = bin_to_radix(sf, fixprec, 10, fixdps)
|
| 1200 |
+
digits = numeral(sd, base=10, size=dps)
|
| 1201 |
+
|
| 1202 |
+
exponent += len(digits) - fixdps - 1
|
| 1203 |
+
return sign, digits, exponent
|
| 1204 |
+
|
| 1205 |
+
def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None,
|
| 1206 |
+
show_zero_exponent=False):
|
| 1207 |
+
"""
|
| 1208 |
+
Convert a raw mpf to a decimal floating-point literal with at
|
| 1209 |
+
most `dps` decimal digits in the mantissa (not counting extra zeros
|
| 1210 |
+
that may be inserted for visual purposes).
|
| 1211 |
+
|
| 1212 |
+
The number will be printed in fixed-point format if the position
|
| 1213 |
+
of the leading digit is strictly between min_fixed
|
| 1214 |
+
(default = min(-dps/3,-5)) and max_fixed (default = dps).
|
| 1215 |
+
|
| 1216 |
+
To force fixed-point format always, set min_fixed = -inf,
|
| 1217 |
+
max_fixed = +inf. To force floating-point format, set
|
| 1218 |
+
min_fixed >= max_fixed.
|
| 1219 |
+
|
| 1220 |
+
The literal is formatted so that it can be parsed back to a number
|
| 1221 |
+
by to_str, float() or Decimal().
|
| 1222 |
+
"""
|
| 1223 |
+
|
| 1224 |
+
# Special numbers
|
| 1225 |
+
if not s[1]:
|
| 1226 |
+
if s == fzero:
|
| 1227 |
+
if dps: t = '0.0'
|
| 1228 |
+
else: t = '.0'
|
| 1229 |
+
if show_zero_exponent:
|
| 1230 |
+
t += 'e+0'
|
| 1231 |
+
return t
|
| 1232 |
+
if s == finf: return '+inf'
|
| 1233 |
+
if s == fninf: return '-inf'
|
| 1234 |
+
if s == fnan: return 'nan'
|
| 1235 |
+
raise ValueError
|
| 1236 |
+
|
| 1237 |
+
if min_fixed is None: min_fixed = min(-(dps//3), -5)
|
| 1238 |
+
if max_fixed is None: max_fixed = dps
|
| 1239 |
+
|
| 1240 |
+
# to_digits_exp rounds to floor.
|
| 1241 |
+
# This sometimes kills some instances of "...00001"
|
| 1242 |
+
sign, digits, exponent = to_digits_exp(s, dps+3)
|
| 1243 |
+
|
| 1244 |
+
# No digits: show only .0; round exponent to nearest
|
| 1245 |
+
if not dps:
|
| 1246 |
+
if digits[0] in '56789':
|
| 1247 |
+
exponent += 1
|
| 1248 |
+
digits = ".0"
|
| 1249 |
+
|
| 1250 |
+
else:
|
| 1251 |
+
# Rounding up kills some instances of "...99999"
|
| 1252 |
+
if len(digits) > dps and digits[dps] in '56789':
|
| 1253 |
+
digits = digits[:dps]
|
| 1254 |
+
i = dps - 1
|
| 1255 |
+
while i >= 0 and digits[i] == '9':
|
| 1256 |
+
i -= 1
|
| 1257 |
+
if i >= 0:
|
| 1258 |
+
digits = digits[:i] + str(int(digits[i]) + 1) + '0' * (dps - i - 1)
|
| 1259 |
+
else:
|
| 1260 |
+
digits = '1' + '0' * (dps - 1)
|
| 1261 |
+
exponent += 1
|
| 1262 |
+
else:
|
| 1263 |
+
digits = digits[:dps]
|
| 1264 |
+
|
| 1265 |
+
# Prettify numbers close to unit magnitude
|
| 1266 |
+
if min_fixed < exponent < max_fixed:
|
| 1267 |
+
if exponent < 0:
|
| 1268 |
+
digits = ("0"*int(-exponent)) + digits
|
| 1269 |
+
split = 1
|
| 1270 |
+
else:
|
| 1271 |
+
split = exponent + 1
|
| 1272 |
+
if split > dps:
|
| 1273 |
+
digits += "0"*(split-dps)
|
| 1274 |
+
exponent = 0
|
| 1275 |
+
else:
|
| 1276 |
+
split = 1
|
| 1277 |
+
|
| 1278 |
+
digits = (digits[:split] + "." + digits[split:])
|
| 1279 |
+
|
| 1280 |
+
if strip_zeros:
|
| 1281 |
+
# Clean up trailing zeros
|
| 1282 |
+
digits = digits.rstrip('0')
|
| 1283 |
+
if digits[-1] == ".":
|
| 1284 |
+
digits += "0"
|
| 1285 |
+
|
| 1286 |
+
if exponent == 0 and dps and not show_zero_exponent: return sign + digits
|
| 1287 |
+
if exponent >= 0: return sign + digits + "e+" + str(exponent)
|
| 1288 |
+
if exponent < 0: return sign + digits + "e" + str(exponent)
|
| 1289 |
+
|
| 1290 |
+
def str_to_man_exp(x, base=10):
|
| 1291 |
+
"""Helper function for from_str."""
|
| 1292 |
+
x = x.lower().rstrip('l')
|
| 1293 |
+
# Verify that the input is a valid float literal
|
| 1294 |
+
float(x)
|
| 1295 |
+
# Split into mantissa, exponent
|
| 1296 |
+
parts = x.split('e')
|
| 1297 |
+
if len(parts) == 1:
|
| 1298 |
+
exp = 0
|
| 1299 |
+
else: # == 2
|
| 1300 |
+
x = parts[0]
|
| 1301 |
+
exp = int(parts[1])
|
| 1302 |
+
# Look for radix point in mantissa
|
| 1303 |
+
parts = x.split('.')
|
| 1304 |
+
if len(parts) == 2:
|
| 1305 |
+
a, b = parts[0], parts[1].rstrip('0')
|
| 1306 |
+
exp -= len(b)
|
| 1307 |
+
x = a + b
|
| 1308 |
+
x = MPZ(int(x, base))
|
| 1309 |
+
return x, exp
|
| 1310 |
+
|
| 1311 |
+
special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan}
|
| 1312 |
+
|
| 1313 |
+
def from_str(x, prec, rnd=round_fast):
|
| 1314 |
+
"""Create a raw mpf from a decimal literal, rounding in the
|
| 1315 |
+
specified direction if the input number cannot be represented
|
| 1316 |
+
exactly as a binary floating-point number with the given number of
|
| 1317 |
+
bits. The literal syntax accepted is the same as for Python
|
| 1318 |
+
floats.
|
| 1319 |
+
|
| 1320 |
+
TODO: the rounding does not work properly for large exponents.
|
| 1321 |
+
"""
|
| 1322 |
+
x = x.lower().strip()
|
| 1323 |
+
if x in special_str:
|
| 1324 |
+
return special_str[x]
|
| 1325 |
+
|
| 1326 |
+
if '/' in x:
|
| 1327 |
+
p, q = x.split('/')
|
| 1328 |
+
p, q = p.rstrip('l'), q.rstrip('l')
|
| 1329 |
+
return from_rational(int(p), int(q), prec, rnd)
|
| 1330 |
+
|
| 1331 |
+
man, exp = str_to_man_exp(x, base=10)
|
| 1332 |
+
|
| 1333 |
+
# XXX: appropriate cutoffs & track direction
|
| 1334 |
+
# note no factors of 5
|
| 1335 |
+
if abs(exp) > 400:
|
| 1336 |
+
s = from_int(man, prec+10)
|
| 1337 |
+
s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd)
|
| 1338 |
+
else:
|
| 1339 |
+
if exp >= 0:
|
| 1340 |
+
s = from_int(man * 10**exp, prec, rnd)
|
| 1341 |
+
else:
|
| 1342 |
+
s = from_rational(man, 10**-exp, prec, rnd)
|
| 1343 |
+
return s
|
| 1344 |
+
|
| 1345 |
+
# Binary string conversion. These are currently mainly used for debugging
|
| 1346 |
+
# and could use some improvement in the future
|
| 1347 |
+
|
| 1348 |
+
def from_bstr(x):
|
| 1349 |
+
man, exp = str_to_man_exp(x, base=2)
|
| 1350 |
+
man = MPZ(man)
|
| 1351 |
+
sign = 0
|
| 1352 |
+
if man < 0:
|
| 1353 |
+
man = -man
|
| 1354 |
+
sign = 1
|
| 1355 |
+
bc = bitcount(man)
|
| 1356 |
+
return normalize(sign, man, exp, bc, bc, round_floor)
|
| 1357 |
+
|
| 1358 |
+
def to_bstr(x):
|
| 1359 |
+
sign, man, exp, bc = x
|
| 1360 |
+
return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp)
|
| 1361 |
+
|
| 1362 |
+
|
| 1363 |
+
#----------------------------------------------------------------------------#
|
| 1364 |
+
# Square roots #
|
| 1365 |
+
#----------------------------------------------------------------------------#
|
| 1366 |
+
|
| 1367 |
+
|
| 1368 |
+
def mpf_sqrt(s, prec, rnd=round_fast):
|
| 1369 |
+
"""
|
| 1370 |
+
Compute the square root of a nonnegative mpf value. The
|
| 1371 |
+
result is correctly rounded.
|
| 1372 |
+
"""
|
| 1373 |
+
sign, man, exp, bc = s
|
| 1374 |
+
if sign:
|
| 1375 |
+
raise ComplexResult("square root of a negative number")
|
| 1376 |
+
if not man:
|
| 1377 |
+
return s
|
| 1378 |
+
if exp & 1:
|
| 1379 |
+
exp -= 1
|
| 1380 |
+
man <<= 1
|
| 1381 |
+
bc += 1
|
| 1382 |
+
elif man == 1:
|
| 1383 |
+
return normalize1(sign, man, exp//2, bc, prec, rnd)
|
| 1384 |
+
shift = max(4, 2*prec-bc+4)
|
| 1385 |
+
shift += shift & 1
|
| 1386 |
+
if rnd in 'fd':
|
| 1387 |
+
man = isqrt(man<<shift)
|
| 1388 |
+
else:
|
| 1389 |
+
man, rem = sqrtrem(man<<shift)
|
| 1390 |
+
# Perturb up
|
| 1391 |
+
if rem:
|
| 1392 |
+
man = (man<<1)+1
|
| 1393 |
+
shift += 2
|
| 1394 |
+
return from_man_exp(man, (exp-shift)//2, prec, rnd)
|
| 1395 |
+
|
| 1396 |
+
def mpf_hypot(x, y, prec, rnd=round_fast):
|
| 1397 |
+
"""Compute the Euclidean norm sqrt(x**2 + y**2) of two raw mpfs
|
| 1398 |
+
x and y."""
|
| 1399 |
+
if y == fzero: return mpf_abs(x, prec, rnd)
|
| 1400 |
+
if x == fzero: return mpf_abs(y, prec, rnd)
|
| 1401 |
+
hypot2 = mpf_add(mpf_mul(x,x), mpf_mul(y,y), prec+4)
|
| 1402 |
+
return mpf_sqrt(hypot2, prec, rnd)
|
| 1403 |
+
|
| 1404 |
+
|
| 1405 |
+
if BACKEND == 'sage':
|
| 1406 |
+
try:
|
| 1407 |
+
import sage.libs.mpmath.ext_libmp as ext_lib
|
| 1408 |
+
mpf_add = ext_lib.mpf_add
|
| 1409 |
+
mpf_sub = ext_lib.mpf_sub
|
| 1410 |
+
mpf_mul = ext_lib.mpf_mul
|
| 1411 |
+
mpf_div = ext_lib.mpf_div
|
| 1412 |
+
mpf_sqrt = ext_lib.mpf_sqrt
|
| 1413 |
+
except ImportError:
|
| 1414 |
+
pass
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpi.py
ADDED
|
@@ -0,0 +1,935 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
Computational functions for interval arithmetic.
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
|
| 6 |
+
from .backend import xrange
|
| 7 |
+
|
| 8 |
+
from .libmpf import (
|
| 9 |
+
ComplexResult,
|
| 10 |
+
round_down, round_up, round_floor, round_ceiling, round_nearest,
|
| 11 |
+
prec_to_dps, repr_dps, dps_to_prec,
|
| 12 |
+
bitcount,
|
| 13 |
+
from_float,
|
| 14 |
+
fnan, finf, fninf, fzero, fhalf, fone, fnone,
|
| 15 |
+
mpf_sign, mpf_lt, mpf_le, mpf_gt, mpf_ge, mpf_eq, mpf_cmp,
|
| 16 |
+
mpf_min_max,
|
| 17 |
+
mpf_floor, from_int, to_int, to_str, from_str,
|
| 18 |
+
mpf_abs, mpf_neg, mpf_pos, mpf_add, mpf_sub, mpf_mul, mpf_mul_int,
|
| 19 |
+
mpf_div, mpf_shift, mpf_pow_int,
|
| 20 |
+
from_man_exp, MPZ_ONE)
|
| 21 |
+
|
| 22 |
+
from .libelefun import (
|
| 23 |
+
mpf_log, mpf_exp, mpf_sqrt, mpf_atan, mpf_atan2,
|
| 24 |
+
mpf_pi, mod_pi2, mpf_cos_sin
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
from .gammazeta import mpf_gamma, mpf_rgamma, mpf_loggamma, mpc_loggamma
|
| 28 |
+
|
| 29 |
+
def mpi_str(s, prec):
|
| 30 |
+
sa, sb = s
|
| 31 |
+
dps = prec_to_dps(prec) + 5
|
| 32 |
+
return "[%s, %s]" % (to_str(sa, dps), to_str(sb, dps))
|
| 33 |
+
#dps = prec_to_dps(prec)
|
| 34 |
+
#m = mpi_mid(s, prec)
|
| 35 |
+
#d = mpf_shift(mpi_delta(s, 20), -1)
|
| 36 |
+
#return "%s +/- %s" % (to_str(m, dps), to_str(d, 3))
|
| 37 |
+
|
| 38 |
+
mpi_zero = (fzero, fzero)
|
| 39 |
+
mpi_one = (fone, fone)
|
| 40 |
+
|
| 41 |
+
def mpi_eq(s, t):
|
| 42 |
+
return s == t
|
| 43 |
+
|
| 44 |
+
def mpi_ne(s, t):
|
| 45 |
+
return s != t
|
| 46 |
+
|
| 47 |
+
def mpi_lt(s, t):
|
| 48 |
+
sa, sb = s
|
| 49 |
+
ta, tb = t
|
| 50 |
+
if mpf_lt(sb, ta): return True
|
| 51 |
+
if mpf_ge(sa, tb): return False
|
| 52 |
+
return None
|
| 53 |
+
|
| 54 |
+
def mpi_le(s, t):
|
| 55 |
+
sa, sb = s
|
| 56 |
+
ta, tb = t
|
| 57 |
+
if mpf_le(sb, ta): return True
|
| 58 |
+
if mpf_gt(sa, tb): return False
|
| 59 |
+
return None
|
| 60 |
+
|
| 61 |
+
def mpi_gt(s, t): return mpi_lt(t, s)
|
| 62 |
+
def mpi_ge(s, t): return mpi_le(t, s)
|
| 63 |
+
|
| 64 |
+
def mpi_add(s, t, prec=0):
|
| 65 |
+
sa, sb = s
|
| 66 |
+
ta, tb = t
|
| 67 |
+
a = mpf_add(sa, ta, prec, round_floor)
|
| 68 |
+
b = mpf_add(sb, tb, prec, round_ceiling)
|
| 69 |
+
if a == fnan: a = fninf
|
| 70 |
+
if b == fnan: b = finf
|
| 71 |
+
return a, b
|
| 72 |
+
|
| 73 |
+
def mpi_sub(s, t, prec=0):
|
| 74 |
+
sa, sb = s
|
| 75 |
+
ta, tb = t
|
| 76 |
+
a = mpf_sub(sa, tb, prec, round_floor)
|
| 77 |
+
b = mpf_sub(sb, ta, prec, round_ceiling)
|
| 78 |
+
if a == fnan: a = fninf
|
| 79 |
+
if b == fnan: b = finf
|
| 80 |
+
return a, b
|
| 81 |
+
|
| 82 |
+
def mpi_delta(s, prec):
|
| 83 |
+
sa, sb = s
|
| 84 |
+
return mpf_sub(sb, sa, prec, round_up)
|
| 85 |
+
|
| 86 |
+
def mpi_mid(s, prec):
|
| 87 |
+
sa, sb = s
|
| 88 |
+
return mpf_shift(mpf_add(sa, sb, prec, round_nearest), -1)
|
| 89 |
+
|
| 90 |
+
def mpi_pos(s, prec):
|
| 91 |
+
sa, sb = s
|
| 92 |
+
a = mpf_pos(sa, prec, round_floor)
|
| 93 |
+
b = mpf_pos(sb, prec, round_ceiling)
|
| 94 |
+
return a, b
|
| 95 |
+
|
| 96 |
+
def mpi_neg(s, prec=0):
|
| 97 |
+
sa, sb = s
|
| 98 |
+
a = mpf_neg(sb, prec, round_floor)
|
| 99 |
+
b = mpf_neg(sa, prec, round_ceiling)
|
| 100 |
+
return a, b
|
| 101 |
+
|
| 102 |
+
def mpi_abs(s, prec=0):
|
| 103 |
+
sa, sb = s
|
| 104 |
+
sas = mpf_sign(sa)
|
| 105 |
+
sbs = mpf_sign(sb)
|
| 106 |
+
# Both points nonnegative?
|
| 107 |
+
if sas >= 0:
|
| 108 |
+
a = mpf_pos(sa, prec, round_floor)
|
| 109 |
+
b = mpf_pos(sb, prec, round_ceiling)
|
| 110 |
+
# Upper point nonnegative?
|
| 111 |
+
elif sbs >= 0:
|
| 112 |
+
a = fzero
|
| 113 |
+
negsa = mpf_neg(sa)
|
| 114 |
+
if mpf_lt(negsa, sb):
|
| 115 |
+
b = mpf_pos(sb, prec, round_ceiling)
|
| 116 |
+
else:
|
| 117 |
+
b = mpf_pos(negsa, prec, round_ceiling)
|
| 118 |
+
# Both negative?
|
| 119 |
+
else:
|
| 120 |
+
a = mpf_neg(sb, prec, round_floor)
|
| 121 |
+
b = mpf_neg(sa, prec, round_ceiling)
|
| 122 |
+
return a, b
|
| 123 |
+
|
| 124 |
+
# TODO: optimize
|
| 125 |
+
def mpi_mul_mpf(s, t, prec):
|
| 126 |
+
return mpi_mul(s, (t, t), prec)
|
| 127 |
+
|
| 128 |
+
def mpi_div_mpf(s, t, prec):
|
| 129 |
+
return mpi_div(s, (t, t), prec)
|
| 130 |
+
|
| 131 |
+
def mpi_mul(s, t, prec=0):
|
| 132 |
+
sa, sb = s
|
| 133 |
+
ta, tb = t
|
| 134 |
+
sas = mpf_sign(sa)
|
| 135 |
+
sbs = mpf_sign(sb)
|
| 136 |
+
tas = mpf_sign(ta)
|
| 137 |
+
tbs = mpf_sign(tb)
|
| 138 |
+
if sas == sbs == 0:
|
| 139 |
+
# Should maybe be undefined
|
| 140 |
+
if ta == fninf or tb == finf:
|
| 141 |
+
return fninf, finf
|
| 142 |
+
return fzero, fzero
|
| 143 |
+
if tas == tbs == 0:
|
| 144 |
+
# Should maybe be undefined
|
| 145 |
+
if sa == fninf or sb == finf:
|
| 146 |
+
return fninf, finf
|
| 147 |
+
return fzero, fzero
|
| 148 |
+
if sas >= 0:
|
| 149 |
+
# positive * positive
|
| 150 |
+
if tas >= 0:
|
| 151 |
+
a = mpf_mul(sa, ta, prec, round_floor)
|
| 152 |
+
b = mpf_mul(sb, tb, prec, round_ceiling)
|
| 153 |
+
if a == fnan: a = fzero
|
| 154 |
+
if b == fnan: b = finf
|
| 155 |
+
# positive * negative
|
| 156 |
+
elif tbs <= 0:
|
| 157 |
+
a = mpf_mul(sb, ta, prec, round_floor)
|
| 158 |
+
b = mpf_mul(sa, tb, prec, round_ceiling)
|
| 159 |
+
if a == fnan: a = fninf
|
| 160 |
+
if b == fnan: b = fzero
|
| 161 |
+
# positive * both signs
|
| 162 |
+
else:
|
| 163 |
+
a = mpf_mul(sb, ta, prec, round_floor)
|
| 164 |
+
b = mpf_mul(sb, tb, prec, round_ceiling)
|
| 165 |
+
if a == fnan: a = fninf
|
| 166 |
+
if b == fnan: b = finf
|
| 167 |
+
elif sbs <= 0:
|
| 168 |
+
# negative * positive
|
| 169 |
+
if tas >= 0:
|
| 170 |
+
a = mpf_mul(sa, tb, prec, round_floor)
|
| 171 |
+
b = mpf_mul(sb, ta, prec, round_ceiling)
|
| 172 |
+
if a == fnan: a = fninf
|
| 173 |
+
if b == fnan: b = fzero
|
| 174 |
+
# negative * negative
|
| 175 |
+
elif tbs <= 0:
|
| 176 |
+
a = mpf_mul(sb, tb, prec, round_floor)
|
| 177 |
+
b = mpf_mul(sa, ta, prec, round_ceiling)
|
| 178 |
+
if a == fnan: a = fzero
|
| 179 |
+
if b == fnan: b = finf
|
| 180 |
+
# negative * both signs
|
| 181 |
+
else:
|
| 182 |
+
a = mpf_mul(sa, tb, prec, round_floor)
|
| 183 |
+
b = mpf_mul(sa, ta, prec, round_ceiling)
|
| 184 |
+
if a == fnan: a = fninf
|
| 185 |
+
if b == fnan: b = finf
|
| 186 |
+
else:
|
| 187 |
+
# General case: perform all cross-multiplications and compare
|
| 188 |
+
# Since the multiplications can be done exactly, we need only
|
| 189 |
+
# do 4 (instead of 8: two for each rounding mode)
|
| 190 |
+
cases = [mpf_mul(sa, ta), mpf_mul(sa, tb), mpf_mul(sb, ta), mpf_mul(sb, tb)]
|
| 191 |
+
if fnan in cases:
|
| 192 |
+
a, b = (fninf, finf)
|
| 193 |
+
else:
|
| 194 |
+
a, b = mpf_min_max(cases)
|
| 195 |
+
a = mpf_pos(a, prec, round_floor)
|
| 196 |
+
b = mpf_pos(b, prec, round_ceiling)
|
| 197 |
+
return a, b
|
| 198 |
+
|
| 199 |
+
def mpi_square(s, prec=0):
|
| 200 |
+
sa, sb = s
|
| 201 |
+
if mpf_ge(sa, fzero):
|
| 202 |
+
a = mpf_mul(sa, sa, prec, round_floor)
|
| 203 |
+
b = mpf_mul(sb, sb, prec, round_ceiling)
|
| 204 |
+
elif mpf_le(sb, fzero):
|
| 205 |
+
a = mpf_mul(sb, sb, prec, round_floor)
|
| 206 |
+
b = mpf_mul(sa, sa, prec, round_ceiling)
|
| 207 |
+
else:
|
| 208 |
+
sa = mpf_neg(sa)
|
| 209 |
+
sa, sb = mpf_min_max([sa, sb])
|
| 210 |
+
a = fzero
|
| 211 |
+
b = mpf_mul(sb, sb, prec, round_ceiling)
|
| 212 |
+
return a, b
|
| 213 |
+
|
| 214 |
+
def mpi_div(s, t, prec):
|
| 215 |
+
sa, sb = s
|
| 216 |
+
ta, tb = t
|
| 217 |
+
sas = mpf_sign(sa)
|
| 218 |
+
sbs = mpf_sign(sb)
|
| 219 |
+
tas = mpf_sign(ta)
|
| 220 |
+
tbs = mpf_sign(tb)
|
| 221 |
+
# 0 / X
|
| 222 |
+
if sas == sbs == 0:
|
| 223 |
+
# 0 / <interval containing 0>
|
| 224 |
+
if (tas < 0 and tbs > 0) or (tas == 0 or tbs == 0):
|
| 225 |
+
return fninf, finf
|
| 226 |
+
return fzero, fzero
|
| 227 |
+
# Denominator contains both negative and positive numbers;
|
| 228 |
+
# this should properly be a multi-interval, but the closest
|
| 229 |
+
# match is the entire (extended) real line
|
| 230 |
+
if tas < 0 and tbs > 0:
|
| 231 |
+
return fninf, finf
|
| 232 |
+
# Assume denominator to be nonnegative
|
| 233 |
+
if tas < 0:
|
| 234 |
+
return mpi_div(mpi_neg(s), mpi_neg(t), prec)
|
| 235 |
+
# Division by zero
|
| 236 |
+
# XXX: make sure all results make sense
|
| 237 |
+
if tas == 0:
|
| 238 |
+
# Numerator contains both signs?
|
| 239 |
+
if sas < 0 and sbs > 0:
|
| 240 |
+
return fninf, finf
|
| 241 |
+
if tas == tbs:
|
| 242 |
+
return fninf, finf
|
| 243 |
+
# Numerator positive?
|
| 244 |
+
if sas >= 0:
|
| 245 |
+
a = mpf_div(sa, tb, prec, round_floor)
|
| 246 |
+
b = finf
|
| 247 |
+
if sbs <= 0:
|
| 248 |
+
a = fninf
|
| 249 |
+
b = mpf_div(sb, tb, prec, round_ceiling)
|
| 250 |
+
# Division with positive denominator
|
| 251 |
+
# We still have to handle nans resulting from inf/0 or inf/inf
|
| 252 |
+
else:
|
| 253 |
+
# Nonnegative numerator
|
| 254 |
+
if sas >= 0:
|
| 255 |
+
a = mpf_div(sa, tb, prec, round_floor)
|
| 256 |
+
b = mpf_div(sb, ta, prec, round_ceiling)
|
| 257 |
+
if a == fnan: a = fzero
|
| 258 |
+
if b == fnan: b = finf
|
| 259 |
+
# Nonpositive numerator
|
| 260 |
+
elif sbs <= 0:
|
| 261 |
+
a = mpf_div(sa, ta, prec, round_floor)
|
| 262 |
+
b = mpf_div(sb, tb, prec, round_ceiling)
|
| 263 |
+
if a == fnan: a = fninf
|
| 264 |
+
if b == fnan: b = fzero
|
| 265 |
+
# Numerator contains both signs?
|
| 266 |
+
else:
|
| 267 |
+
a = mpf_div(sa, ta, prec, round_floor)
|
| 268 |
+
b = mpf_div(sb, ta, prec, round_ceiling)
|
| 269 |
+
if a == fnan: a = fninf
|
| 270 |
+
if b == fnan: b = finf
|
| 271 |
+
return a, b
|
| 272 |
+
|
| 273 |
+
def mpi_pi(prec):
|
| 274 |
+
a = mpf_pi(prec, round_floor)
|
| 275 |
+
b = mpf_pi(prec, round_ceiling)
|
| 276 |
+
return a, b
|
| 277 |
+
|
| 278 |
+
def mpi_exp(s, prec):
|
| 279 |
+
sa, sb = s
|
| 280 |
+
# exp is monotonic
|
| 281 |
+
a = mpf_exp(sa, prec, round_floor)
|
| 282 |
+
b = mpf_exp(sb, prec, round_ceiling)
|
| 283 |
+
return a, b
|
| 284 |
+
|
| 285 |
+
def mpi_log(s, prec):
|
| 286 |
+
sa, sb = s
|
| 287 |
+
# log is monotonic
|
| 288 |
+
a = mpf_log(sa, prec, round_floor)
|
| 289 |
+
b = mpf_log(sb, prec, round_ceiling)
|
| 290 |
+
return a, b
|
| 291 |
+
|
| 292 |
+
def mpi_sqrt(s, prec):
|
| 293 |
+
sa, sb = s
|
| 294 |
+
# sqrt is monotonic
|
| 295 |
+
a = mpf_sqrt(sa, prec, round_floor)
|
| 296 |
+
b = mpf_sqrt(sb, prec, round_ceiling)
|
| 297 |
+
return a, b
|
| 298 |
+
|
| 299 |
+
def mpi_atan(s, prec):
|
| 300 |
+
sa, sb = s
|
| 301 |
+
a = mpf_atan(sa, prec, round_floor)
|
| 302 |
+
b = mpf_atan(sb, prec, round_ceiling)
|
| 303 |
+
return a, b
|
| 304 |
+
|
| 305 |
+
def mpi_pow_int(s, n, prec):
|
| 306 |
+
sa, sb = s
|
| 307 |
+
if n < 0:
|
| 308 |
+
return mpi_div((fone, fone), mpi_pow_int(s, -n, prec+20), prec)
|
| 309 |
+
if n == 0:
|
| 310 |
+
return (fone, fone)
|
| 311 |
+
if n == 1:
|
| 312 |
+
return s
|
| 313 |
+
if n == 2:
|
| 314 |
+
return mpi_square(s, prec)
|
| 315 |
+
# Odd -- signs are preserved
|
| 316 |
+
if n & 1:
|
| 317 |
+
a = mpf_pow_int(sa, n, prec, round_floor)
|
| 318 |
+
b = mpf_pow_int(sb, n, prec, round_ceiling)
|
| 319 |
+
# Even -- important to ensure positivity
|
| 320 |
+
else:
|
| 321 |
+
sas = mpf_sign(sa)
|
| 322 |
+
sbs = mpf_sign(sb)
|
| 323 |
+
# Nonnegative?
|
| 324 |
+
if sas >= 0:
|
| 325 |
+
a = mpf_pow_int(sa, n, prec, round_floor)
|
| 326 |
+
b = mpf_pow_int(sb, n, prec, round_ceiling)
|
| 327 |
+
# Nonpositive?
|
| 328 |
+
elif sbs <= 0:
|
| 329 |
+
a = mpf_pow_int(sb, n, prec, round_floor)
|
| 330 |
+
b = mpf_pow_int(sa, n, prec, round_ceiling)
|
| 331 |
+
# Mixed signs?
|
| 332 |
+
else:
|
| 333 |
+
a = fzero
|
| 334 |
+
# max(-a,b)**n
|
| 335 |
+
sa = mpf_neg(sa)
|
| 336 |
+
if mpf_ge(sa, sb):
|
| 337 |
+
b = mpf_pow_int(sa, n, prec, round_ceiling)
|
| 338 |
+
else:
|
| 339 |
+
b = mpf_pow_int(sb, n, prec, round_ceiling)
|
| 340 |
+
return a, b
|
| 341 |
+
|
| 342 |
+
def mpi_pow(s, t, prec):
|
| 343 |
+
ta, tb = t
|
| 344 |
+
if ta == tb and ta not in (finf, fninf):
|
| 345 |
+
if ta == from_int(to_int(ta)):
|
| 346 |
+
return mpi_pow_int(s, to_int(ta), prec)
|
| 347 |
+
if ta == fhalf:
|
| 348 |
+
return mpi_sqrt(s, prec)
|
| 349 |
+
u = mpi_log(s, prec + 20)
|
| 350 |
+
v = mpi_mul(u, t, prec + 20)
|
| 351 |
+
return mpi_exp(v, prec)
|
| 352 |
+
|
| 353 |
+
def MIN(x, y):
|
| 354 |
+
if mpf_le(x, y):
|
| 355 |
+
return x
|
| 356 |
+
return y
|
| 357 |
+
|
| 358 |
+
def MAX(x, y):
|
| 359 |
+
if mpf_ge(x, y):
|
| 360 |
+
return x
|
| 361 |
+
return y
|
| 362 |
+
|
| 363 |
+
def cos_sin_quadrant(x, wp):
|
| 364 |
+
sign, man, exp, bc = x
|
| 365 |
+
if x == fzero:
|
| 366 |
+
return fone, fzero, 0
|
| 367 |
+
# TODO: combine evaluation code to avoid duplicate modulo
|
| 368 |
+
c, s = mpf_cos_sin(x, wp)
|
| 369 |
+
t, n, wp_ = mod_pi2(man, exp, exp+bc, 15)
|
| 370 |
+
if sign:
|
| 371 |
+
n = -1-n
|
| 372 |
+
return c, s, n
|
| 373 |
+
|
| 374 |
+
def mpi_cos_sin(x, prec):
|
| 375 |
+
a, b = x
|
| 376 |
+
if a == b == fzero:
|
| 377 |
+
return (fone, fone), (fzero, fzero)
|
| 378 |
+
# Guaranteed to contain both -1 and 1
|
| 379 |
+
if (finf in x) or (fninf in x):
|
| 380 |
+
return (fnone, fone), (fnone, fone)
|
| 381 |
+
wp = prec + 20
|
| 382 |
+
ca, sa, na = cos_sin_quadrant(a, wp)
|
| 383 |
+
cb, sb, nb = cos_sin_quadrant(b, wp)
|
| 384 |
+
ca, cb = mpf_min_max([ca, cb])
|
| 385 |
+
sa, sb = mpf_min_max([sa, sb])
|
| 386 |
+
# Both functions are monotonic within one quadrant
|
| 387 |
+
if na == nb:
|
| 388 |
+
pass
|
| 389 |
+
# Guaranteed to contain both -1 and 1
|
| 390 |
+
elif nb - na >= 4:
|
| 391 |
+
return (fnone, fone), (fnone, fone)
|
| 392 |
+
else:
|
| 393 |
+
# cos has maximum between a and b
|
| 394 |
+
if na//4 != nb//4:
|
| 395 |
+
cb = fone
|
| 396 |
+
# cos has minimum
|
| 397 |
+
if (na-2)//4 != (nb-2)//4:
|
| 398 |
+
ca = fnone
|
| 399 |
+
# sin has maximum
|
| 400 |
+
if (na-1)//4 != (nb-1)//4:
|
| 401 |
+
sb = fone
|
| 402 |
+
# sin has minimum
|
| 403 |
+
if (na-3)//4 != (nb-3)//4:
|
| 404 |
+
sa = fnone
|
| 405 |
+
# Perturb to force interval rounding
|
| 406 |
+
more = from_man_exp((MPZ_ONE<<wp) + (MPZ_ONE<<10), -wp)
|
| 407 |
+
less = from_man_exp((MPZ_ONE<<wp) - (MPZ_ONE<<10), -wp)
|
| 408 |
+
def finalize(v, rounding):
|
| 409 |
+
if bool(v[0]) == (rounding == round_floor):
|
| 410 |
+
p = more
|
| 411 |
+
else:
|
| 412 |
+
p = less
|
| 413 |
+
v = mpf_mul(v, p, prec, rounding)
|
| 414 |
+
sign, man, exp, bc = v
|
| 415 |
+
if exp+bc >= 1:
|
| 416 |
+
if sign:
|
| 417 |
+
return fnone
|
| 418 |
+
return fone
|
| 419 |
+
return v
|
| 420 |
+
ca = finalize(ca, round_floor)
|
| 421 |
+
cb = finalize(cb, round_ceiling)
|
| 422 |
+
sa = finalize(sa, round_floor)
|
| 423 |
+
sb = finalize(sb, round_ceiling)
|
| 424 |
+
return (ca,cb), (sa,sb)
|
| 425 |
+
|
| 426 |
+
def mpi_cos(x, prec):
|
| 427 |
+
return mpi_cos_sin(x, prec)[0]
|
| 428 |
+
|
| 429 |
+
def mpi_sin(x, prec):
|
| 430 |
+
return mpi_cos_sin(x, prec)[1]
|
| 431 |
+
|
| 432 |
+
def mpi_tan(x, prec):
|
| 433 |
+
cos, sin = mpi_cos_sin(x, prec+20)
|
| 434 |
+
return mpi_div(sin, cos, prec)
|
| 435 |
+
|
| 436 |
+
def mpi_cot(x, prec):
|
| 437 |
+
cos, sin = mpi_cos_sin(x, prec+20)
|
| 438 |
+
return mpi_div(cos, sin, prec)
|
| 439 |
+
|
| 440 |
+
def mpi_from_str_a_b(x, y, percent, prec):
|
| 441 |
+
wp = prec + 20
|
| 442 |
+
xa = from_str(x, wp, round_floor)
|
| 443 |
+
xb = from_str(x, wp, round_ceiling)
|
| 444 |
+
#ya = from_str(y, wp, round_floor)
|
| 445 |
+
y = from_str(y, wp, round_ceiling)
|
| 446 |
+
assert mpf_ge(y, fzero)
|
| 447 |
+
if percent:
|
| 448 |
+
y = mpf_mul(MAX(mpf_abs(xa), mpf_abs(xb)), y, wp, round_ceiling)
|
| 449 |
+
y = mpf_div(y, from_int(100), wp, round_ceiling)
|
| 450 |
+
a = mpf_sub(xa, y, prec, round_floor)
|
| 451 |
+
b = mpf_add(xb, y, prec, round_ceiling)
|
| 452 |
+
return a, b
|
| 453 |
+
|
| 454 |
+
def mpi_from_str(s, prec):
|
| 455 |
+
"""
|
| 456 |
+
Parse an interval number given as a string.
|
| 457 |
+
|
| 458 |
+
Allowed forms are
|
| 459 |
+
|
| 460 |
+
"-1.23e-27"
|
| 461 |
+
Any single decimal floating-point literal.
|
| 462 |
+
"a +- b" or "a (b)"
|
| 463 |
+
a is the midpoint of the interval and b is the half-width
|
| 464 |
+
"a +- b%" or "a (b%)"
|
| 465 |
+
a is the midpoint of the interval and the half-width
|
| 466 |
+
is b percent of a (`a \times b / 100`).
|
| 467 |
+
"[a, b]"
|
| 468 |
+
The interval indicated directly.
|
| 469 |
+
"x[y,z]e"
|
| 470 |
+
x are shared digits, y and z are unequal digits, e is the exponent.
|
| 471 |
+
|
| 472 |
+
"""
|
| 473 |
+
e = ValueError("Improperly formed interval number '%s'" % s)
|
| 474 |
+
s = s.replace(" ", "")
|
| 475 |
+
wp = prec + 20
|
| 476 |
+
if "+-" in s:
|
| 477 |
+
x, y = s.split("+-")
|
| 478 |
+
return mpi_from_str_a_b(x, y, False, prec)
|
| 479 |
+
# case 2
|
| 480 |
+
elif "(" in s:
|
| 481 |
+
# Don't confuse with a complex number (x,y)
|
| 482 |
+
if s[0] == "(" or ")" not in s:
|
| 483 |
+
raise e
|
| 484 |
+
s = s.replace(")", "")
|
| 485 |
+
percent = False
|
| 486 |
+
if "%" in s:
|
| 487 |
+
if s[-1] != "%":
|
| 488 |
+
raise e
|
| 489 |
+
percent = True
|
| 490 |
+
s = s.replace("%", "")
|
| 491 |
+
x, y = s.split("(")
|
| 492 |
+
return mpi_from_str_a_b(x, y, percent, prec)
|
| 493 |
+
elif "," in s:
|
| 494 |
+
if ('[' not in s) or (']' not in s):
|
| 495 |
+
raise e
|
| 496 |
+
if s[0] == '[':
|
| 497 |
+
# case 3
|
| 498 |
+
s = s.replace("[", "")
|
| 499 |
+
s = s.replace("]", "")
|
| 500 |
+
a, b = s.split(",")
|
| 501 |
+
a = from_str(a, prec, round_floor)
|
| 502 |
+
b = from_str(b, prec, round_ceiling)
|
| 503 |
+
return a, b
|
| 504 |
+
else:
|
| 505 |
+
# case 4
|
| 506 |
+
x, y = s.split('[')
|
| 507 |
+
y, z = y.split(',')
|
| 508 |
+
if 'e' in s:
|
| 509 |
+
z, e = z.split(']')
|
| 510 |
+
else:
|
| 511 |
+
z, e = z.rstrip(']'), ''
|
| 512 |
+
a = from_str(x+y+e, prec, round_floor)
|
| 513 |
+
b = from_str(x+z+e, prec, round_ceiling)
|
| 514 |
+
return a, b
|
| 515 |
+
else:
|
| 516 |
+
a = from_str(s, prec, round_floor)
|
| 517 |
+
b = from_str(s, prec, round_ceiling)
|
| 518 |
+
return a, b
|
| 519 |
+
|
| 520 |
+
def mpi_to_str(x, dps, use_spaces=True, brackets='[]', mode='brackets', error_dps=4, **kwargs):
|
| 521 |
+
"""
|
| 522 |
+
Convert a mpi interval to a string.
|
| 523 |
+
|
| 524 |
+
**Arguments**
|
| 525 |
+
|
| 526 |
+
*dps*
|
| 527 |
+
decimal places to use for printing
|
| 528 |
+
*use_spaces*
|
| 529 |
+
use spaces for more readable output, defaults to true
|
| 530 |
+
*brackets*
|
| 531 |
+
pair of strings (or two-character string) giving left and right brackets
|
| 532 |
+
*mode*
|
| 533 |
+
mode of display: 'plusminus', 'percent', 'brackets' (default) or 'diff'
|
| 534 |
+
*error_dps*
|
| 535 |
+
limit the error to *error_dps* digits (mode 'plusminus and 'percent')
|
| 536 |
+
|
| 537 |
+
Additional keyword arguments are forwarded to the mpf-to-string conversion
|
| 538 |
+
for the components of the output.
|
| 539 |
+
|
| 540 |
+
**Examples**
|
| 541 |
+
|
| 542 |
+
>>> from mpmath import mpi, mp
|
| 543 |
+
>>> mp.dps = 30
|
| 544 |
+
>>> x = mpi(1, 2)._mpi_
|
| 545 |
+
>>> mpi_to_str(x, 2, mode='plusminus')
|
| 546 |
+
'1.5 +- 0.5'
|
| 547 |
+
>>> mpi_to_str(x, 2, mode='percent')
|
| 548 |
+
'1.5 (33.33%)'
|
| 549 |
+
>>> mpi_to_str(x, 2, mode='brackets')
|
| 550 |
+
'[1.0, 2.0]'
|
| 551 |
+
>>> mpi_to_str(x, 2, mode='brackets' , brackets=('<', '>'))
|
| 552 |
+
'<1.0, 2.0>'
|
| 553 |
+
>>> x = mpi('5.2582327113062393041', '5.2582327113062749951')._mpi_
|
| 554 |
+
>>> mpi_to_str(x, 15, mode='diff')
|
| 555 |
+
'5.2582327113062[4, 7]'
|
| 556 |
+
>>> mpi_to_str(mpi(0)._mpi_, 2, mode='percent')
|
| 557 |
+
'0.0 (0.0%)'
|
| 558 |
+
|
| 559 |
+
"""
|
| 560 |
+
prec = dps_to_prec(dps)
|
| 561 |
+
wp = prec + 20
|
| 562 |
+
a, b = x
|
| 563 |
+
mid = mpi_mid(x, prec)
|
| 564 |
+
delta = mpi_delta(x, prec)
|
| 565 |
+
a_str = to_str(a, dps, **kwargs)
|
| 566 |
+
b_str = to_str(b, dps, **kwargs)
|
| 567 |
+
mid_str = to_str(mid, dps, **kwargs)
|
| 568 |
+
sp = ""
|
| 569 |
+
if use_spaces:
|
| 570 |
+
sp = " "
|
| 571 |
+
br1, br2 = brackets
|
| 572 |
+
if mode == 'plusminus':
|
| 573 |
+
delta_str = to_str(mpf_shift(delta,-1), dps, **kwargs)
|
| 574 |
+
s = mid_str + sp + "+-" + sp + delta_str
|
| 575 |
+
elif mode == 'percent':
|
| 576 |
+
if mid == fzero:
|
| 577 |
+
p = fzero
|
| 578 |
+
else:
|
| 579 |
+
# p = 100 * delta(x) / (2*mid(x))
|
| 580 |
+
p = mpf_mul(delta, from_int(100))
|
| 581 |
+
p = mpf_div(p, mpf_mul(mid, from_int(2)), wp)
|
| 582 |
+
s = mid_str + sp + "(" + to_str(p, error_dps) + "%)"
|
| 583 |
+
elif mode == 'brackets':
|
| 584 |
+
s = br1 + a_str + "," + sp + b_str + br2
|
| 585 |
+
elif mode == 'diff':
|
| 586 |
+
# use more digits if str(x.a) and str(x.b) are equal
|
| 587 |
+
if a_str == b_str:
|
| 588 |
+
a_str = to_str(a, dps+3, **kwargs)
|
| 589 |
+
b_str = to_str(b, dps+3, **kwargs)
|
| 590 |
+
# separate mantissa and exponent
|
| 591 |
+
a = a_str.split('e')
|
| 592 |
+
if len(a) == 1:
|
| 593 |
+
a.append('')
|
| 594 |
+
b = b_str.split('e')
|
| 595 |
+
if len(b) == 1:
|
| 596 |
+
b.append('')
|
| 597 |
+
if a[1] == b[1]:
|
| 598 |
+
if a[0] != b[0]:
|
| 599 |
+
for i in xrange(len(a[0]) + 1):
|
| 600 |
+
if a[0][i] != b[0][i]:
|
| 601 |
+
break
|
| 602 |
+
s = (a[0][:i] + br1 + a[0][i:] + ',' + sp + b[0][i:] + br2
|
| 603 |
+
+ 'e'*min(len(a[1]), 1) + a[1])
|
| 604 |
+
else: # no difference
|
| 605 |
+
s = a[0] + br1 + br2 + 'e'*min(len(a[1]), 1) + a[1]
|
| 606 |
+
else:
|
| 607 |
+
s = br1 + 'e'.join(a) + ',' + sp + 'e'.join(b) + br2
|
| 608 |
+
else:
|
| 609 |
+
raise ValueError("'%s' is unknown mode for printing mpi" % mode)
|
| 610 |
+
return s
|
| 611 |
+
|
| 612 |
+
def mpci_add(x, y, prec):
|
| 613 |
+
a, b = x
|
| 614 |
+
c, d = y
|
| 615 |
+
return mpi_add(a, c, prec), mpi_add(b, d, prec)
|
| 616 |
+
|
| 617 |
+
def mpci_sub(x, y, prec):
|
| 618 |
+
a, b = x
|
| 619 |
+
c, d = y
|
| 620 |
+
return mpi_sub(a, c, prec), mpi_sub(b, d, prec)
|
| 621 |
+
|
| 622 |
+
def mpci_neg(x, prec=0):
|
| 623 |
+
a, b = x
|
| 624 |
+
return mpi_neg(a, prec), mpi_neg(b, prec)
|
| 625 |
+
|
| 626 |
+
def mpci_pos(x, prec):
|
| 627 |
+
a, b = x
|
| 628 |
+
return mpi_pos(a, prec), mpi_pos(b, prec)
|
| 629 |
+
|
| 630 |
+
def mpci_mul(x, y, prec):
|
| 631 |
+
# TODO: optimize for real/imag cases
|
| 632 |
+
a, b = x
|
| 633 |
+
c, d = y
|
| 634 |
+
r1 = mpi_mul(a,c)
|
| 635 |
+
r2 = mpi_mul(b,d)
|
| 636 |
+
re = mpi_sub(r1,r2,prec)
|
| 637 |
+
i1 = mpi_mul(a,d)
|
| 638 |
+
i2 = mpi_mul(b,c)
|
| 639 |
+
im = mpi_add(i1,i2,prec)
|
| 640 |
+
return re, im
|
| 641 |
+
|
| 642 |
+
def mpci_div(x, y, prec):
|
| 643 |
+
# TODO: optimize for real/imag cases
|
| 644 |
+
a, b = x
|
| 645 |
+
c, d = y
|
| 646 |
+
wp = prec+20
|
| 647 |
+
m1 = mpi_square(c)
|
| 648 |
+
m2 = mpi_square(d)
|
| 649 |
+
m = mpi_add(m1,m2,wp)
|
| 650 |
+
re = mpi_add(mpi_mul(a,c), mpi_mul(b,d), wp)
|
| 651 |
+
im = mpi_sub(mpi_mul(b,c), mpi_mul(a,d), wp)
|
| 652 |
+
re = mpi_div(re, m, prec)
|
| 653 |
+
im = mpi_div(im, m, prec)
|
| 654 |
+
return re, im
|
| 655 |
+
|
| 656 |
+
def mpci_exp(x, prec):
|
| 657 |
+
a, b = x
|
| 658 |
+
wp = prec+20
|
| 659 |
+
r = mpi_exp(a, wp)
|
| 660 |
+
c, s = mpi_cos_sin(b, wp)
|
| 661 |
+
a = mpi_mul(r, c, prec)
|
| 662 |
+
b = mpi_mul(r, s, prec)
|
| 663 |
+
return a, b
|
| 664 |
+
|
| 665 |
+
def mpi_shift(x, n):
|
| 666 |
+
a, b = x
|
| 667 |
+
return mpf_shift(a,n), mpf_shift(b,n)
|
| 668 |
+
|
| 669 |
+
def mpi_cosh_sinh(x, prec):
|
| 670 |
+
# TODO: accuracy for small x
|
| 671 |
+
wp = prec+20
|
| 672 |
+
e1 = mpi_exp(x, wp)
|
| 673 |
+
e2 = mpi_div(mpi_one, e1, wp)
|
| 674 |
+
c = mpi_add(e1, e2, prec)
|
| 675 |
+
s = mpi_sub(e1, e2, prec)
|
| 676 |
+
c = mpi_shift(c, -1)
|
| 677 |
+
s = mpi_shift(s, -1)
|
| 678 |
+
return c, s
|
| 679 |
+
|
| 680 |
+
def mpci_cos(x, prec):
|
| 681 |
+
a, b = x
|
| 682 |
+
wp = prec+10
|
| 683 |
+
c, s = mpi_cos_sin(a, wp)
|
| 684 |
+
ch, sh = mpi_cosh_sinh(b, wp)
|
| 685 |
+
re = mpi_mul(c, ch, prec)
|
| 686 |
+
im = mpi_mul(s, sh, prec)
|
| 687 |
+
return re, mpi_neg(im)
|
| 688 |
+
|
| 689 |
+
def mpci_sin(x, prec):
|
| 690 |
+
a, b = x
|
| 691 |
+
wp = prec+10
|
| 692 |
+
c, s = mpi_cos_sin(a, wp)
|
| 693 |
+
ch, sh = mpi_cosh_sinh(b, wp)
|
| 694 |
+
re = mpi_mul(s, ch, prec)
|
| 695 |
+
im = mpi_mul(c, sh, prec)
|
| 696 |
+
return re, im
|
| 697 |
+
|
| 698 |
+
def mpci_abs(x, prec):
|
| 699 |
+
a, b = x
|
| 700 |
+
if a == mpi_zero:
|
| 701 |
+
return mpi_abs(b)
|
| 702 |
+
if b == mpi_zero:
|
| 703 |
+
return mpi_abs(a)
|
| 704 |
+
# Important: nonnegative
|
| 705 |
+
a = mpi_square(a)
|
| 706 |
+
b = mpi_square(b)
|
| 707 |
+
t = mpi_add(a, b, prec+20)
|
| 708 |
+
return mpi_sqrt(t, prec)
|
| 709 |
+
|
| 710 |
+
def mpi_atan2(y, x, prec):
|
| 711 |
+
ya, yb = y
|
| 712 |
+
xa, xb = x
|
| 713 |
+
# Constrained to the real line
|
| 714 |
+
if ya == yb == fzero:
|
| 715 |
+
if mpf_ge(xa, fzero):
|
| 716 |
+
return mpi_zero
|
| 717 |
+
return mpi_pi(prec)
|
| 718 |
+
# Right half-plane
|
| 719 |
+
if mpf_ge(xa, fzero):
|
| 720 |
+
if mpf_ge(ya, fzero):
|
| 721 |
+
a = mpf_atan2(ya, xb, prec, round_floor)
|
| 722 |
+
else:
|
| 723 |
+
a = mpf_atan2(ya, xa, prec, round_floor)
|
| 724 |
+
if mpf_ge(yb, fzero):
|
| 725 |
+
b = mpf_atan2(yb, xa, prec, round_ceiling)
|
| 726 |
+
else:
|
| 727 |
+
b = mpf_atan2(yb, xb, prec, round_ceiling)
|
| 728 |
+
# Upper half-plane
|
| 729 |
+
elif mpf_ge(ya, fzero):
|
| 730 |
+
b = mpf_atan2(ya, xa, prec, round_ceiling)
|
| 731 |
+
if mpf_le(xb, fzero):
|
| 732 |
+
a = mpf_atan2(yb, xb, prec, round_floor)
|
| 733 |
+
else:
|
| 734 |
+
a = mpf_atan2(ya, xb, prec, round_floor)
|
| 735 |
+
# Lower half-plane
|
| 736 |
+
elif mpf_le(yb, fzero):
|
| 737 |
+
a = mpf_atan2(yb, xa, prec, round_floor)
|
| 738 |
+
if mpf_le(xb, fzero):
|
| 739 |
+
b = mpf_atan2(ya, xb, prec, round_ceiling)
|
| 740 |
+
else:
|
| 741 |
+
b = mpf_atan2(yb, xb, prec, round_ceiling)
|
| 742 |
+
# Covering the origin
|
| 743 |
+
else:
|
| 744 |
+
b = mpf_pi(prec, round_ceiling)
|
| 745 |
+
a = mpf_neg(b)
|
| 746 |
+
return a, b
|
| 747 |
+
|
| 748 |
+
def mpci_arg(z, prec):
|
| 749 |
+
x, y = z
|
| 750 |
+
return mpi_atan2(y, x, prec)
|
| 751 |
+
|
| 752 |
+
def mpci_log(z, prec):
|
| 753 |
+
x, y = z
|
| 754 |
+
re = mpi_log(mpci_abs(z, prec+20), prec)
|
| 755 |
+
im = mpci_arg(z, prec)
|
| 756 |
+
return re, im
|
| 757 |
+
|
| 758 |
+
def mpci_pow(x, y, prec):
|
| 759 |
+
# TODO: recognize/speed up real cases, integer y
|
| 760 |
+
yre, yim = y
|
| 761 |
+
if yim == mpi_zero:
|
| 762 |
+
ya, yb = yre
|
| 763 |
+
if ya == yb:
|
| 764 |
+
sign, man, exp, bc = yb
|
| 765 |
+
if man and exp >= 0:
|
| 766 |
+
return mpci_pow_int(x, (-1)**sign * int(man<<exp), prec)
|
| 767 |
+
# x^0
|
| 768 |
+
if yb == fzero:
|
| 769 |
+
return mpci_pow_int(x, 0, prec)
|
| 770 |
+
wp = prec+20
|
| 771 |
+
return mpci_exp(mpci_mul(y, mpci_log(x, wp), wp), prec)
|
| 772 |
+
|
| 773 |
+
def mpci_square(x, prec):
|
| 774 |
+
a, b = x
|
| 775 |
+
# (a+bi)^2 = (a^2-b^2) + 2abi
|
| 776 |
+
re = mpi_sub(mpi_square(a), mpi_square(b), prec)
|
| 777 |
+
im = mpi_mul(a, b, prec)
|
| 778 |
+
im = mpi_shift(im, 1)
|
| 779 |
+
return re, im
|
| 780 |
+
|
| 781 |
+
def mpci_pow_int(x, n, prec):
|
| 782 |
+
if n < 0:
|
| 783 |
+
return mpci_div((mpi_one,mpi_zero), mpci_pow_int(x, -n, prec+20), prec)
|
| 784 |
+
if n == 0:
|
| 785 |
+
return mpi_one, mpi_zero
|
| 786 |
+
if n == 1:
|
| 787 |
+
return mpci_pos(x, prec)
|
| 788 |
+
if n == 2:
|
| 789 |
+
return mpci_square(x, prec)
|
| 790 |
+
wp = prec + 20
|
| 791 |
+
result = (mpi_one, mpi_zero)
|
| 792 |
+
while n:
|
| 793 |
+
if n & 1:
|
| 794 |
+
result = mpci_mul(result, x, wp)
|
| 795 |
+
n -= 1
|
| 796 |
+
x = mpci_square(x, wp)
|
| 797 |
+
n >>= 1
|
| 798 |
+
return mpci_pos(result, prec)
|
| 799 |
+
|
| 800 |
+
gamma_min_a = from_float(1.46163214496)
|
| 801 |
+
gamma_min_b = from_float(1.46163214497)
|
| 802 |
+
gamma_min = (gamma_min_a, gamma_min_b)
|
| 803 |
+
gamma_mono_imag_a = from_float(-1.1)
|
| 804 |
+
gamma_mono_imag_b = from_float(1.1)
|
| 805 |
+
|
| 806 |
+
def mpi_overlap(x, y):
|
| 807 |
+
a, b = x
|
| 808 |
+
c, d = y
|
| 809 |
+
if mpf_lt(d, a): return False
|
| 810 |
+
if mpf_gt(c, b): return False
|
| 811 |
+
return True
|
| 812 |
+
|
| 813 |
+
# type = 0 -- gamma
|
| 814 |
+
# type = 1 -- factorial
|
| 815 |
+
# type = 2 -- 1/gamma
|
| 816 |
+
# type = 3 -- log-gamma
|
| 817 |
+
|
| 818 |
+
def mpi_gamma(z, prec, type=0):
|
| 819 |
+
a, b = z
|
| 820 |
+
wp = prec+20
|
| 821 |
+
|
| 822 |
+
if type == 1:
|
| 823 |
+
return mpi_gamma(mpi_add(z, mpi_one, wp), prec, 0)
|
| 824 |
+
|
| 825 |
+
# increasing
|
| 826 |
+
if mpf_gt(a, gamma_min_b):
|
| 827 |
+
if type == 0:
|
| 828 |
+
c = mpf_gamma(a, prec, round_floor)
|
| 829 |
+
d = mpf_gamma(b, prec, round_ceiling)
|
| 830 |
+
elif type == 2:
|
| 831 |
+
c = mpf_rgamma(b, prec, round_floor)
|
| 832 |
+
d = mpf_rgamma(a, prec, round_ceiling)
|
| 833 |
+
elif type == 3:
|
| 834 |
+
c = mpf_loggamma(a, prec, round_floor)
|
| 835 |
+
d = mpf_loggamma(b, prec, round_ceiling)
|
| 836 |
+
# decreasing
|
| 837 |
+
elif mpf_gt(a, fzero) and mpf_lt(b, gamma_min_a):
|
| 838 |
+
if type == 0:
|
| 839 |
+
c = mpf_gamma(b, prec, round_floor)
|
| 840 |
+
d = mpf_gamma(a, prec, round_ceiling)
|
| 841 |
+
elif type == 2:
|
| 842 |
+
c = mpf_rgamma(a, prec, round_floor)
|
| 843 |
+
d = mpf_rgamma(b, prec, round_ceiling)
|
| 844 |
+
elif type == 3:
|
| 845 |
+
c = mpf_loggamma(b, prec, round_floor)
|
| 846 |
+
d = mpf_loggamma(a, prec, round_ceiling)
|
| 847 |
+
else:
|
| 848 |
+
# TODO: reflection formula
|
| 849 |
+
znew = mpi_add(z, mpi_one, wp)
|
| 850 |
+
if type == 0: return mpi_div(mpi_gamma(znew, prec+2, 0), z, prec)
|
| 851 |
+
if type == 2: return mpi_mul(mpi_gamma(znew, prec+2, 2), z, prec)
|
| 852 |
+
if type == 3: return mpi_sub(mpi_gamma(znew, prec+2, 3), mpi_log(z, prec+2), prec)
|
| 853 |
+
return c, d
|
| 854 |
+
|
| 855 |
+
def mpci_gamma(z, prec, type=0):
|
| 856 |
+
(a1,a2), (b1,b2) = z
|
| 857 |
+
|
| 858 |
+
# Real case
|
| 859 |
+
if b1 == b2 == fzero and (type != 3 or mpf_gt(a1,fzero)):
|
| 860 |
+
return mpi_gamma(z, prec, type), mpi_zero
|
| 861 |
+
|
| 862 |
+
# Estimate precision
|
| 863 |
+
wp = prec+20
|
| 864 |
+
if type != 3:
|
| 865 |
+
amag = a2[2]+a2[3]
|
| 866 |
+
bmag = b2[2]+b2[3]
|
| 867 |
+
if a2 != fzero:
|
| 868 |
+
mag = max(amag, bmag)
|
| 869 |
+
else:
|
| 870 |
+
mag = bmag
|
| 871 |
+
an = abs(to_int(a2))
|
| 872 |
+
bn = abs(to_int(b2))
|
| 873 |
+
absn = max(an, bn)
|
| 874 |
+
gamma_size = max(0,absn*mag)
|
| 875 |
+
wp += bitcount(gamma_size)
|
| 876 |
+
|
| 877 |
+
# Assume type != 1
|
| 878 |
+
if type == 1:
|
| 879 |
+
(a1,a2) = mpi_add((a1,a2), mpi_one, wp); z = (a1,a2), (b1,b2)
|
| 880 |
+
type = 0
|
| 881 |
+
|
| 882 |
+
# Avoid non-monotonic region near the negative real axis
|
| 883 |
+
if mpf_lt(a1, gamma_min_b):
|
| 884 |
+
if mpi_overlap((b1,b2), (gamma_mono_imag_a, gamma_mono_imag_b)):
|
| 885 |
+
# TODO: reflection formula
|
| 886 |
+
#if mpf_lt(a2, mpf_shift(fone,-1)):
|
| 887 |
+
# znew = mpci_sub((mpi_one,mpi_zero),z,wp)
|
| 888 |
+
# ...
|
| 889 |
+
# Recurrence:
|
| 890 |
+
# gamma(z) = gamma(z+1)/z
|
| 891 |
+
znew = mpi_add((a1,a2), mpi_one, wp), (b1,b2)
|
| 892 |
+
if type == 0: return mpci_div(mpci_gamma(znew, prec+2, 0), z, prec)
|
| 893 |
+
if type == 2: return mpci_mul(mpci_gamma(znew, prec+2, 2), z, prec)
|
| 894 |
+
if type == 3: return mpci_sub(mpci_gamma(znew, prec+2, 3), mpci_log(z,prec+2), prec)
|
| 895 |
+
|
| 896 |
+
# Use monotonicity (except for a small region close to the
|
| 897 |
+
# origin and near poles)
|
| 898 |
+
# upper half-plane
|
| 899 |
+
if mpf_ge(b1, fzero):
|
| 900 |
+
minre = mpc_loggamma((a1,b2), wp, round_floor)
|
| 901 |
+
maxre = mpc_loggamma((a2,b1), wp, round_ceiling)
|
| 902 |
+
minim = mpc_loggamma((a1,b1), wp, round_floor)
|
| 903 |
+
maxim = mpc_loggamma((a2,b2), wp, round_ceiling)
|
| 904 |
+
# lower half-plane
|
| 905 |
+
elif mpf_le(b2, fzero):
|
| 906 |
+
minre = mpc_loggamma((a1,b1), wp, round_floor)
|
| 907 |
+
maxre = mpc_loggamma((a2,b2), wp, round_ceiling)
|
| 908 |
+
minim = mpc_loggamma((a2,b1), wp, round_floor)
|
| 909 |
+
maxim = mpc_loggamma((a1,b2), wp, round_ceiling)
|
| 910 |
+
# crosses real axis
|
| 911 |
+
else:
|
| 912 |
+
maxre = mpc_loggamma((a2,fzero), wp, round_ceiling)
|
| 913 |
+
# stretches more into the lower half-plane
|
| 914 |
+
if mpf_gt(mpf_neg(b1), b2):
|
| 915 |
+
minre = mpc_loggamma((a1,b1), wp, round_ceiling)
|
| 916 |
+
else:
|
| 917 |
+
minre = mpc_loggamma((a1,b2), wp, round_ceiling)
|
| 918 |
+
minim = mpc_loggamma((a2,b1), wp, round_floor)
|
| 919 |
+
maxim = mpc_loggamma((a2,b2), wp, round_floor)
|
| 920 |
+
|
| 921 |
+
w = (minre[0], maxre[0]), (minim[1], maxim[1])
|
| 922 |
+
if type == 3:
|
| 923 |
+
return mpi_pos(w[0], prec), mpi_pos(w[1], prec)
|
| 924 |
+
if type == 2:
|
| 925 |
+
w = mpci_neg(w)
|
| 926 |
+
return mpci_exp(w, prec)
|
| 927 |
+
|
| 928 |
+
def mpi_loggamma(z, prec): return mpi_gamma(z, prec, type=3)
|
| 929 |
+
def mpci_loggamma(z, prec): return mpci_gamma(z, prec, type=3)
|
| 930 |
+
|
| 931 |
+
def mpi_rgamma(z, prec): return mpi_gamma(z, prec, type=2)
|
| 932 |
+
def mpci_rgamma(z, prec): return mpci_gamma(z, prec, type=2)
|
| 933 |
+
|
| 934 |
+
def mpi_factorial(z, prec): return mpi_gamma(z, prec, type=1)
|
| 935 |
+
def mpci_factorial(z, prec): return mpci_gamma(z, prec, type=1)
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/math2.py
ADDED
|
@@ -0,0 +1,672 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
This module complements the math and cmath builtin modules by providing
|
| 3 |
+
fast machine precision versions of some additional functions (gamma, ...)
|
| 4 |
+
and wrapping math/cmath functions so that they can be called with either
|
| 5 |
+
real or complex arguments.
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import operator
|
| 9 |
+
import math
|
| 10 |
+
import cmath
|
| 11 |
+
|
| 12 |
+
# Irrational (?) constants
|
| 13 |
+
pi = 3.1415926535897932385
|
| 14 |
+
e = 2.7182818284590452354
|
| 15 |
+
sqrt2 = 1.4142135623730950488
|
| 16 |
+
sqrt5 = 2.2360679774997896964
|
| 17 |
+
phi = 1.6180339887498948482
|
| 18 |
+
ln2 = 0.69314718055994530942
|
| 19 |
+
ln10 = 2.302585092994045684
|
| 20 |
+
euler = 0.57721566490153286061
|
| 21 |
+
catalan = 0.91596559417721901505
|
| 22 |
+
khinchin = 2.6854520010653064453
|
| 23 |
+
apery = 1.2020569031595942854
|
| 24 |
+
|
| 25 |
+
logpi = 1.1447298858494001741
|
| 26 |
+
|
| 27 |
+
def _mathfun_real(f_real, f_complex):
|
| 28 |
+
def f(x, **kwargs):
|
| 29 |
+
if type(x) is float:
|
| 30 |
+
return f_real(x)
|
| 31 |
+
if type(x) is complex:
|
| 32 |
+
return f_complex(x)
|
| 33 |
+
try:
|
| 34 |
+
x = float(x)
|
| 35 |
+
return f_real(x)
|
| 36 |
+
except (TypeError, ValueError):
|
| 37 |
+
x = complex(x)
|
| 38 |
+
return f_complex(x)
|
| 39 |
+
f.__name__ = f_real.__name__
|
| 40 |
+
return f
|
| 41 |
+
|
| 42 |
+
def _mathfun(f_real, f_complex):
|
| 43 |
+
def f(x, **kwargs):
|
| 44 |
+
if type(x) is complex:
|
| 45 |
+
return f_complex(x)
|
| 46 |
+
try:
|
| 47 |
+
return f_real(float(x))
|
| 48 |
+
except (TypeError, ValueError):
|
| 49 |
+
return f_complex(complex(x))
|
| 50 |
+
f.__name__ = f_real.__name__
|
| 51 |
+
return f
|
| 52 |
+
|
| 53 |
+
def _mathfun_n(f_real, f_complex):
|
| 54 |
+
def f(*args, **kwargs):
|
| 55 |
+
try:
|
| 56 |
+
return f_real(*(float(x) for x in args))
|
| 57 |
+
except (TypeError, ValueError):
|
| 58 |
+
return f_complex(*(complex(x) for x in args))
|
| 59 |
+
f.__name__ = f_real.__name__
|
| 60 |
+
return f
|
| 61 |
+
|
| 62 |
+
# Workaround for non-raising log and sqrt in Python 2.5 and 2.4
|
| 63 |
+
# on Unix system
|
| 64 |
+
try:
|
| 65 |
+
math.log(-2.0)
|
| 66 |
+
def math_log(x):
|
| 67 |
+
if x <= 0.0:
|
| 68 |
+
raise ValueError("math domain error")
|
| 69 |
+
return math.log(x)
|
| 70 |
+
def math_sqrt(x):
|
| 71 |
+
if x < 0.0:
|
| 72 |
+
raise ValueError("math domain error")
|
| 73 |
+
return math.sqrt(x)
|
| 74 |
+
except (ValueError, TypeError):
|
| 75 |
+
math_log = math.log
|
| 76 |
+
math_sqrt = math.sqrt
|
| 77 |
+
|
| 78 |
+
pow = _mathfun_n(operator.pow, lambda x, y: complex(x)**y)
|
| 79 |
+
log = _mathfun_n(math_log, cmath.log)
|
| 80 |
+
sqrt = _mathfun(math_sqrt, cmath.sqrt)
|
| 81 |
+
exp = _mathfun_real(math.exp, cmath.exp)
|
| 82 |
+
|
| 83 |
+
cos = _mathfun_real(math.cos, cmath.cos)
|
| 84 |
+
sin = _mathfun_real(math.sin, cmath.sin)
|
| 85 |
+
tan = _mathfun_real(math.tan, cmath.tan)
|
| 86 |
+
|
| 87 |
+
acos = _mathfun(math.acos, cmath.acos)
|
| 88 |
+
asin = _mathfun(math.asin, cmath.asin)
|
| 89 |
+
atan = _mathfun_real(math.atan, cmath.atan)
|
| 90 |
+
|
| 91 |
+
cosh = _mathfun_real(math.cosh, cmath.cosh)
|
| 92 |
+
sinh = _mathfun_real(math.sinh, cmath.sinh)
|
| 93 |
+
tanh = _mathfun_real(math.tanh, cmath.tanh)
|
| 94 |
+
|
| 95 |
+
floor = _mathfun_real(math.floor,
|
| 96 |
+
lambda z: complex(math.floor(z.real), math.floor(z.imag)))
|
| 97 |
+
ceil = _mathfun_real(math.ceil,
|
| 98 |
+
lambda z: complex(math.ceil(z.real), math.ceil(z.imag)))
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
cos_sin = _mathfun_real(lambda x: (math.cos(x), math.sin(x)),
|
| 102 |
+
lambda z: (cmath.cos(z), cmath.sin(z)))
|
| 103 |
+
|
| 104 |
+
cbrt = _mathfun(lambda x: x**(1./3), lambda z: z**(1./3))
|
| 105 |
+
|
| 106 |
+
def nthroot(x, n):
|
| 107 |
+
r = 1./n
|
| 108 |
+
try:
|
| 109 |
+
return float(x) ** r
|
| 110 |
+
except (ValueError, TypeError):
|
| 111 |
+
return complex(x) ** r
|
| 112 |
+
|
| 113 |
+
def _sinpi_real(x):
|
| 114 |
+
if x < 0:
|
| 115 |
+
return -_sinpi_real(-x)
|
| 116 |
+
n, r = divmod(x, 0.5)
|
| 117 |
+
r *= pi
|
| 118 |
+
n %= 4
|
| 119 |
+
if n == 0: return math.sin(r)
|
| 120 |
+
if n == 1: return math.cos(r)
|
| 121 |
+
if n == 2: return -math.sin(r)
|
| 122 |
+
if n == 3: return -math.cos(r)
|
| 123 |
+
|
| 124 |
+
def _cospi_real(x):
|
| 125 |
+
if x < 0:
|
| 126 |
+
x = -x
|
| 127 |
+
n, r = divmod(x, 0.5)
|
| 128 |
+
r *= pi
|
| 129 |
+
n %= 4
|
| 130 |
+
if n == 0: return math.cos(r)
|
| 131 |
+
if n == 1: return -math.sin(r)
|
| 132 |
+
if n == 2: return -math.cos(r)
|
| 133 |
+
if n == 3: return math.sin(r)
|
| 134 |
+
|
| 135 |
+
def _sinpi_complex(z):
|
| 136 |
+
if z.real < 0:
|
| 137 |
+
return -_sinpi_complex(-z)
|
| 138 |
+
n, r = divmod(z.real, 0.5)
|
| 139 |
+
z = pi*complex(r, z.imag)
|
| 140 |
+
n %= 4
|
| 141 |
+
if n == 0: return cmath.sin(z)
|
| 142 |
+
if n == 1: return cmath.cos(z)
|
| 143 |
+
if n == 2: return -cmath.sin(z)
|
| 144 |
+
if n == 3: return -cmath.cos(z)
|
| 145 |
+
|
| 146 |
+
def _cospi_complex(z):
|
| 147 |
+
if z.real < 0:
|
| 148 |
+
z = -z
|
| 149 |
+
n, r = divmod(z.real, 0.5)
|
| 150 |
+
z = pi*complex(r, z.imag)
|
| 151 |
+
n %= 4
|
| 152 |
+
if n == 0: return cmath.cos(z)
|
| 153 |
+
if n == 1: return -cmath.sin(z)
|
| 154 |
+
if n == 2: return -cmath.cos(z)
|
| 155 |
+
if n == 3: return cmath.sin(z)
|
| 156 |
+
|
| 157 |
+
cospi = _mathfun_real(_cospi_real, _cospi_complex)
|
| 158 |
+
sinpi = _mathfun_real(_sinpi_real, _sinpi_complex)
|
| 159 |
+
|
| 160 |
+
def tanpi(x):
|
| 161 |
+
try:
|
| 162 |
+
return sinpi(x) / cospi(x)
|
| 163 |
+
except OverflowError:
|
| 164 |
+
if complex(x).imag > 10:
|
| 165 |
+
return 1j
|
| 166 |
+
if complex(x).imag < 10:
|
| 167 |
+
return -1j
|
| 168 |
+
raise
|
| 169 |
+
|
| 170 |
+
def cotpi(x):
|
| 171 |
+
try:
|
| 172 |
+
return cospi(x) / sinpi(x)
|
| 173 |
+
except OverflowError:
|
| 174 |
+
if complex(x).imag > 10:
|
| 175 |
+
return -1j
|
| 176 |
+
if complex(x).imag < 10:
|
| 177 |
+
return 1j
|
| 178 |
+
raise
|
| 179 |
+
|
| 180 |
+
INF = 1e300*1e300
|
| 181 |
+
NINF = -INF
|
| 182 |
+
NAN = INF-INF
|
| 183 |
+
EPS = 2.2204460492503131e-16
|
| 184 |
+
|
| 185 |
+
_exact_gamma = (INF, 1.0, 1.0, 2.0, 6.0, 24.0, 120.0, 720.0, 5040.0, 40320.0,
|
| 186 |
+
362880.0, 3628800.0, 39916800.0, 479001600.0, 6227020800.0, 87178291200.0,
|
| 187 |
+
1307674368000.0, 20922789888000.0, 355687428096000.0, 6402373705728000.0,
|
| 188 |
+
121645100408832000.0, 2432902008176640000.0)
|
| 189 |
+
|
| 190 |
+
_max_exact_gamma = len(_exact_gamma)-1
|
| 191 |
+
|
| 192 |
+
# Lanczos coefficients used by the GNU Scientific Library
|
| 193 |
+
_lanczos_g = 7
|
| 194 |
+
_lanczos_p = (0.99999999999980993, 676.5203681218851, -1259.1392167224028,
|
| 195 |
+
771.32342877765313, -176.61502916214059, 12.507343278686905,
|
| 196 |
+
-0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7)
|
| 197 |
+
|
| 198 |
+
def _gamma_real(x):
|
| 199 |
+
_intx = int(x)
|
| 200 |
+
if _intx == x:
|
| 201 |
+
if _intx <= 0:
|
| 202 |
+
#return (-1)**_intx * INF
|
| 203 |
+
raise ZeroDivisionError("gamma function pole")
|
| 204 |
+
if _intx <= _max_exact_gamma:
|
| 205 |
+
return _exact_gamma[_intx]
|
| 206 |
+
if x < 0.5:
|
| 207 |
+
# TODO: sinpi
|
| 208 |
+
return pi / (_sinpi_real(x)*_gamma_real(1-x))
|
| 209 |
+
else:
|
| 210 |
+
x -= 1.0
|
| 211 |
+
r = _lanczos_p[0]
|
| 212 |
+
for i in range(1, _lanczos_g+2):
|
| 213 |
+
r += _lanczos_p[i]/(x+i)
|
| 214 |
+
t = x + _lanczos_g + 0.5
|
| 215 |
+
return 2.506628274631000502417 * t**(x+0.5) * math.exp(-t) * r
|
| 216 |
+
|
| 217 |
+
def _gamma_complex(x):
|
| 218 |
+
if not x.imag:
|
| 219 |
+
return complex(_gamma_real(x.real))
|
| 220 |
+
if x.real < 0.5:
|
| 221 |
+
# TODO: sinpi
|
| 222 |
+
return pi / (_sinpi_complex(x)*_gamma_complex(1-x))
|
| 223 |
+
else:
|
| 224 |
+
x -= 1.0
|
| 225 |
+
r = _lanczos_p[0]
|
| 226 |
+
for i in range(1, _lanczos_g+2):
|
| 227 |
+
r += _lanczos_p[i]/(x+i)
|
| 228 |
+
t = x + _lanczos_g + 0.5
|
| 229 |
+
return 2.506628274631000502417 * t**(x+0.5) * cmath.exp(-t) * r
|
| 230 |
+
|
| 231 |
+
gamma = _mathfun_real(_gamma_real, _gamma_complex)
|
| 232 |
+
|
| 233 |
+
def rgamma(x):
|
| 234 |
+
try:
|
| 235 |
+
return 1./gamma(x)
|
| 236 |
+
except ZeroDivisionError:
|
| 237 |
+
return x*0.0
|
| 238 |
+
|
| 239 |
+
def factorial(x):
|
| 240 |
+
return gamma(x+1.0)
|
| 241 |
+
|
| 242 |
+
def arg(x):
|
| 243 |
+
if type(x) is float:
|
| 244 |
+
return math.atan2(0.0,x)
|
| 245 |
+
return math.atan2(x.imag,x.real)
|
| 246 |
+
|
| 247 |
+
# XXX: broken for negatives
|
| 248 |
+
def loggamma(x):
|
| 249 |
+
if type(x) not in (float, complex):
|
| 250 |
+
try:
|
| 251 |
+
x = float(x)
|
| 252 |
+
except (ValueError, TypeError):
|
| 253 |
+
x = complex(x)
|
| 254 |
+
try:
|
| 255 |
+
xreal = x.real
|
| 256 |
+
ximag = x.imag
|
| 257 |
+
except AttributeError: # py2.5
|
| 258 |
+
xreal = x
|
| 259 |
+
ximag = 0.0
|
| 260 |
+
# Reflection formula
|
| 261 |
+
# http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0003/
|
| 262 |
+
if xreal < 0.0:
|
| 263 |
+
if abs(x) < 0.5:
|
| 264 |
+
v = log(gamma(x))
|
| 265 |
+
if ximag == 0:
|
| 266 |
+
v = v.conjugate()
|
| 267 |
+
return v
|
| 268 |
+
z = 1-x
|
| 269 |
+
try:
|
| 270 |
+
re = z.real
|
| 271 |
+
im = z.imag
|
| 272 |
+
except AttributeError: # py2.5
|
| 273 |
+
re = z
|
| 274 |
+
im = 0.0
|
| 275 |
+
refloor = floor(re)
|
| 276 |
+
if im == 0.0:
|
| 277 |
+
imsign = 0
|
| 278 |
+
elif im < 0.0:
|
| 279 |
+
imsign = -1
|
| 280 |
+
else:
|
| 281 |
+
imsign = 1
|
| 282 |
+
return (-pi*1j)*abs(refloor)*(1-abs(imsign)) + logpi - \
|
| 283 |
+
log(sinpi(z-refloor)) - loggamma(z) + 1j*pi*refloor*imsign
|
| 284 |
+
if x == 1.0 or x == 2.0:
|
| 285 |
+
return x*0
|
| 286 |
+
p = 0.
|
| 287 |
+
while abs(x) < 11:
|
| 288 |
+
p -= log(x)
|
| 289 |
+
x += 1.0
|
| 290 |
+
s = 0.918938533204672742 + (x-0.5)*log(x) - x
|
| 291 |
+
r = 1./x
|
| 292 |
+
r2 = r*r
|
| 293 |
+
s += 0.083333333333333333333*r; r *= r2
|
| 294 |
+
s += -0.0027777777777777777778*r; r *= r2
|
| 295 |
+
s += 0.00079365079365079365079*r; r *= r2
|
| 296 |
+
s += -0.0005952380952380952381*r; r *= r2
|
| 297 |
+
s += 0.00084175084175084175084*r; r *= r2
|
| 298 |
+
s += -0.0019175269175269175269*r; r *= r2
|
| 299 |
+
s += 0.0064102564102564102564*r; r *= r2
|
| 300 |
+
s += -0.02955065359477124183*r
|
| 301 |
+
return s + p
|
| 302 |
+
|
| 303 |
+
_psi_coeff = [
|
| 304 |
+
0.083333333333333333333,
|
| 305 |
+
-0.0083333333333333333333,
|
| 306 |
+
0.003968253968253968254,
|
| 307 |
+
-0.0041666666666666666667,
|
| 308 |
+
0.0075757575757575757576,
|
| 309 |
+
-0.021092796092796092796,
|
| 310 |
+
0.083333333333333333333,
|
| 311 |
+
-0.44325980392156862745,
|
| 312 |
+
3.0539543302701197438,
|
| 313 |
+
-26.456212121212121212]
|
| 314 |
+
|
| 315 |
+
def _digamma_real(x):
|
| 316 |
+
_intx = int(x)
|
| 317 |
+
if _intx == x:
|
| 318 |
+
if _intx <= 0:
|
| 319 |
+
raise ZeroDivisionError("polygamma pole")
|
| 320 |
+
if x < 0.5:
|
| 321 |
+
x = 1.0-x
|
| 322 |
+
s = pi*cotpi(x)
|
| 323 |
+
else:
|
| 324 |
+
s = 0.0
|
| 325 |
+
while x < 10.0:
|
| 326 |
+
s -= 1.0/x
|
| 327 |
+
x += 1.0
|
| 328 |
+
x2 = x**-2
|
| 329 |
+
t = x2
|
| 330 |
+
for c in _psi_coeff:
|
| 331 |
+
s -= c*t
|
| 332 |
+
if t < 1e-20:
|
| 333 |
+
break
|
| 334 |
+
t *= x2
|
| 335 |
+
return s + math_log(x) - 0.5/x
|
| 336 |
+
|
| 337 |
+
def _digamma_complex(x):
|
| 338 |
+
if not x.imag:
|
| 339 |
+
return complex(_digamma_real(x.real))
|
| 340 |
+
if x.real < 0.5:
|
| 341 |
+
x = 1.0-x
|
| 342 |
+
s = pi*cotpi(x)
|
| 343 |
+
else:
|
| 344 |
+
s = 0.0
|
| 345 |
+
while abs(x) < 10.0:
|
| 346 |
+
s -= 1.0/x
|
| 347 |
+
x += 1.0
|
| 348 |
+
x2 = x**-2
|
| 349 |
+
t = x2
|
| 350 |
+
for c in _psi_coeff:
|
| 351 |
+
s -= c*t
|
| 352 |
+
if abs(t) < 1e-20:
|
| 353 |
+
break
|
| 354 |
+
t *= x2
|
| 355 |
+
return s + cmath.log(x) - 0.5/x
|
| 356 |
+
|
| 357 |
+
digamma = _mathfun_real(_digamma_real, _digamma_complex)
|
| 358 |
+
|
| 359 |
+
# TODO: could implement complex erf and erfc here. Need
|
| 360 |
+
# to find an accurate method (avoiding cancellation)
|
| 361 |
+
# for approx. 1 < abs(x) < 9.
|
| 362 |
+
|
| 363 |
+
_erfc_coeff_P = [
|
| 364 |
+
1.0000000161203922312,
|
| 365 |
+
2.1275306946297962644,
|
| 366 |
+
2.2280433377390253297,
|
| 367 |
+
1.4695509105618423961,
|
| 368 |
+
0.66275911699770787537,
|
| 369 |
+
0.20924776504163751585,
|
| 370 |
+
0.045459713768411264339,
|
| 371 |
+
0.0063065951710717791934,
|
| 372 |
+
0.00044560259661560421715][::-1]
|
| 373 |
+
|
| 374 |
+
_erfc_coeff_Q = [
|
| 375 |
+
1.0000000000000000000,
|
| 376 |
+
3.2559100272784894318,
|
| 377 |
+
4.9019435608903239131,
|
| 378 |
+
4.4971472894498014205,
|
| 379 |
+
2.7845640601891186528,
|
| 380 |
+
1.2146026030046904138,
|
| 381 |
+
0.37647108453729465912,
|
| 382 |
+
0.080970149639040548613,
|
| 383 |
+
0.011178148899483545902,
|
| 384 |
+
0.00078981003831980423513][::-1]
|
| 385 |
+
|
| 386 |
+
def _polyval(coeffs, x):
|
| 387 |
+
p = coeffs[0]
|
| 388 |
+
for c in coeffs[1:]:
|
| 389 |
+
p = c + x*p
|
| 390 |
+
return p
|
| 391 |
+
|
| 392 |
+
def _erf_taylor(x):
|
| 393 |
+
# Taylor series assuming 0 <= x <= 1
|
| 394 |
+
x2 = x*x
|
| 395 |
+
s = t = x
|
| 396 |
+
n = 1
|
| 397 |
+
while abs(t) > 1e-17:
|
| 398 |
+
t *= x2/n
|
| 399 |
+
s -= t/(n+n+1)
|
| 400 |
+
n += 1
|
| 401 |
+
t *= x2/n
|
| 402 |
+
s += t/(n+n+1)
|
| 403 |
+
n += 1
|
| 404 |
+
return 1.1283791670955125739*s
|
| 405 |
+
|
| 406 |
+
def _erfc_mid(x):
|
| 407 |
+
# Rational approximation assuming 0 <= x <= 9
|
| 408 |
+
return exp(-x*x)*_polyval(_erfc_coeff_P,x)/_polyval(_erfc_coeff_Q,x)
|
| 409 |
+
|
| 410 |
+
def _erfc_asymp(x):
|
| 411 |
+
# Asymptotic expansion assuming x >= 9
|
| 412 |
+
x2 = x*x
|
| 413 |
+
v = exp(-x2)/x*0.56418958354775628695
|
| 414 |
+
r = t = 0.5 / x2
|
| 415 |
+
s = 1.0
|
| 416 |
+
for n in range(1,22,4):
|
| 417 |
+
s -= t
|
| 418 |
+
t *= r * (n+2)
|
| 419 |
+
s += t
|
| 420 |
+
t *= r * (n+4)
|
| 421 |
+
if abs(t) < 1e-17:
|
| 422 |
+
break
|
| 423 |
+
return s * v
|
| 424 |
+
|
| 425 |
+
def erf(x):
|
| 426 |
+
"""
|
| 427 |
+
erf of a real number.
|
| 428 |
+
"""
|
| 429 |
+
x = float(x)
|
| 430 |
+
if x != x:
|
| 431 |
+
return x
|
| 432 |
+
if x < 0.0:
|
| 433 |
+
return -erf(-x)
|
| 434 |
+
if x >= 1.0:
|
| 435 |
+
if x >= 6.0:
|
| 436 |
+
return 1.0
|
| 437 |
+
return 1.0 - _erfc_mid(x)
|
| 438 |
+
return _erf_taylor(x)
|
| 439 |
+
|
| 440 |
+
def erfc(x):
|
| 441 |
+
"""
|
| 442 |
+
erfc of a real number.
|
| 443 |
+
"""
|
| 444 |
+
x = float(x)
|
| 445 |
+
if x != x:
|
| 446 |
+
return x
|
| 447 |
+
if x < 0.0:
|
| 448 |
+
if x < -6.0:
|
| 449 |
+
return 2.0
|
| 450 |
+
return 2.0-erfc(-x)
|
| 451 |
+
if x > 9.0:
|
| 452 |
+
return _erfc_asymp(x)
|
| 453 |
+
if x >= 1.0:
|
| 454 |
+
return _erfc_mid(x)
|
| 455 |
+
return 1.0 - _erf_taylor(x)
|
| 456 |
+
|
| 457 |
+
gauss42 = [\
|
| 458 |
+
(0.99839961899006235, 0.0041059986046490839),
|
| 459 |
+
(-0.99839961899006235, 0.0041059986046490839),
|
| 460 |
+
(0.9915772883408609, 0.009536220301748501),
|
| 461 |
+
(-0.9915772883408609,0.009536220301748501),
|
| 462 |
+
(0.97934250806374812, 0.014922443697357493),
|
| 463 |
+
(-0.97934250806374812, 0.014922443697357493),
|
| 464 |
+
(0.96175936533820439,0.020227869569052644),
|
| 465 |
+
(-0.96175936533820439, 0.020227869569052644),
|
| 466 |
+
(0.93892355735498811, 0.025422959526113047),
|
| 467 |
+
(-0.93892355735498811,0.025422959526113047),
|
| 468 |
+
(0.91095972490412735, 0.030479240699603467),
|
| 469 |
+
(-0.91095972490412735, 0.030479240699603467),
|
| 470 |
+
(0.87802056981217269,0.03536907109759211),
|
| 471 |
+
(-0.87802056981217269, 0.03536907109759211),
|
| 472 |
+
(0.8402859832618168, 0.040065735180692258),
|
| 473 |
+
(-0.8402859832618168,0.040065735180692258),
|
| 474 |
+
(0.7979620532554873, 0.044543577771965874),
|
| 475 |
+
(-0.7979620532554873, 0.044543577771965874),
|
| 476 |
+
(0.75127993568948048,0.048778140792803244),
|
| 477 |
+
(-0.75127993568948048, 0.048778140792803244),
|
| 478 |
+
(0.70049459055617114, 0.052746295699174064),
|
| 479 |
+
(-0.70049459055617114,0.052746295699174064),
|
| 480 |
+
(0.64588338886924779, 0.056426369358018376),
|
| 481 |
+
(-0.64588338886924779, 0.056426369358018376),
|
| 482 |
+
(0.58774459748510932, 0.059798262227586649),
|
| 483 |
+
(-0.58774459748510932, 0.059798262227586649),
|
| 484 |
+
(0.5263957499311922, 0.062843558045002565),
|
| 485 |
+
(-0.5263957499311922, 0.062843558045002565),
|
| 486 |
+
(0.46217191207042191, 0.065545624364908975),
|
| 487 |
+
(-0.46217191207042191, 0.065545624364908975),
|
| 488 |
+
(0.39542385204297503, 0.067889703376521934),
|
| 489 |
+
(-0.39542385204297503, 0.067889703376521934),
|
| 490 |
+
(0.32651612446541151, 0.069862992492594159),
|
| 491 |
+
(-0.32651612446541151, 0.069862992492594159),
|
| 492 |
+
(0.25582507934287907, 0.071454714265170971),
|
| 493 |
+
(-0.25582507934287907, 0.071454714265170971),
|
| 494 |
+
(0.18373680656485453, 0.072656175243804091),
|
| 495 |
+
(-0.18373680656485453, 0.072656175243804091),
|
| 496 |
+
(0.11064502720851986, 0.073460813453467527),
|
| 497 |
+
(-0.11064502720851986, 0.073460813453467527),
|
| 498 |
+
(0.036948943165351772, 0.073864234232172879),
|
| 499 |
+
(-0.036948943165351772, 0.073864234232172879)]
|
| 500 |
+
|
| 501 |
+
EI_ASYMP_CONVERGENCE_RADIUS = 40.0
|
| 502 |
+
|
| 503 |
+
def ei_asymp(z, _e1=False):
|
| 504 |
+
r = 1./z
|
| 505 |
+
s = t = 1.0
|
| 506 |
+
k = 1
|
| 507 |
+
while 1:
|
| 508 |
+
t *= k*r
|
| 509 |
+
s += t
|
| 510 |
+
if abs(t) < 1e-16:
|
| 511 |
+
break
|
| 512 |
+
k += 1
|
| 513 |
+
v = s*exp(z)/z
|
| 514 |
+
if _e1:
|
| 515 |
+
if type(z) is complex:
|
| 516 |
+
zreal = z.real
|
| 517 |
+
zimag = z.imag
|
| 518 |
+
else:
|
| 519 |
+
zreal = z
|
| 520 |
+
zimag = 0.0
|
| 521 |
+
if zimag == 0.0 and zreal > 0.0:
|
| 522 |
+
v += pi*1j
|
| 523 |
+
else:
|
| 524 |
+
if type(z) is complex:
|
| 525 |
+
if z.imag > 0:
|
| 526 |
+
v += pi*1j
|
| 527 |
+
if z.imag < 0:
|
| 528 |
+
v -= pi*1j
|
| 529 |
+
return v
|
| 530 |
+
|
| 531 |
+
def ei_taylor(z, _e1=False):
|
| 532 |
+
s = t = z
|
| 533 |
+
k = 2
|
| 534 |
+
while 1:
|
| 535 |
+
t = t*z/k
|
| 536 |
+
term = t/k
|
| 537 |
+
if abs(term) < 1e-17:
|
| 538 |
+
break
|
| 539 |
+
s += term
|
| 540 |
+
k += 1
|
| 541 |
+
s += euler
|
| 542 |
+
if _e1:
|
| 543 |
+
s += log(-z)
|
| 544 |
+
else:
|
| 545 |
+
if type(z) is float or z.imag == 0.0:
|
| 546 |
+
s += math_log(abs(z))
|
| 547 |
+
else:
|
| 548 |
+
s += cmath.log(z)
|
| 549 |
+
return s
|
| 550 |
+
|
| 551 |
+
def ei(z, _e1=False):
|
| 552 |
+
typez = type(z)
|
| 553 |
+
if typez not in (float, complex):
|
| 554 |
+
try:
|
| 555 |
+
z = float(z)
|
| 556 |
+
typez = float
|
| 557 |
+
except (TypeError, ValueError):
|
| 558 |
+
z = complex(z)
|
| 559 |
+
typez = complex
|
| 560 |
+
if not z:
|
| 561 |
+
return -INF
|
| 562 |
+
absz = abs(z)
|
| 563 |
+
if absz > EI_ASYMP_CONVERGENCE_RADIUS:
|
| 564 |
+
return ei_asymp(z, _e1)
|
| 565 |
+
elif absz <= 2.0 or (typez is float and z > 0.0):
|
| 566 |
+
return ei_taylor(z, _e1)
|
| 567 |
+
# Integrate, starting from whichever is smaller of a Taylor
|
| 568 |
+
# series value or an asymptotic series value
|
| 569 |
+
if typez is complex and z.real > 0.0:
|
| 570 |
+
zref = z / absz
|
| 571 |
+
ref = ei_taylor(zref, _e1)
|
| 572 |
+
else:
|
| 573 |
+
zref = EI_ASYMP_CONVERGENCE_RADIUS * z / absz
|
| 574 |
+
ref = ei_asymp(zref, _e1)
|
| 575 |
+
C = (zref-z)*0.5
|
| 576 |
+
D = (zref+z)*0.5
|
| 577 |
+
s = 0.0
|
| 578 |
+
if type(z) is complex:
|
| 579 |
+
_exp = cmath.exp
|
| 580 |
+
else:
|
| 581 |
+
_exp = math.exp
|
| 582 |
+
for x,w in gauss42:
|
| 583 |
+
t = C*x+D
|
| 584 |
+
s += w*_exp(t)/t
|
| 585 |
+
ref -= C*s
|
| 586 |
+
return ref
|
| 587 |
+
|
| 588 |
+
def e1(z):
|
| 589 |
+
# hack to get consistent signs if the imaginary part if 0
|
| 590 |
+
# and signed
|
| 591 |
+
typez = type(z)
|
| 592 |
+
if type(z) not in (float, complex):
|
| 593 |
+
try:
|
| 594 |
+
z = float(z)
|
| 595 |
+
typez = float
|
| 596 |
+
except (TypeError, ValueError):
|
| 597 |
+
z = complex(z)
|
| 598 |
+
typez = complex
|
| 599 |
+
if typez is complex and not z.imag:
|
| 600 |
+
z = complex(z.real, 0.0)
|
| 601 |
+
# end hack
|
| 602 |
+
return -ei(-z, _e1=True)
|
| 603 |
+
|
| 604 |
+
_zeta_int = [\
|
| 605 |
+
-0.5,
|
| 606 |
+
0.0,
|
| 607 |
+
1.6449340668482264365,1.2020569031595942854,1.0823232337111381915,
|
| 608 |
+
1.0369277551433699263,1.0173430619844491397,1.0083492773819228268,
|
| 609 |
+
1.0040773561979443394,1.0020083928260822144,1.0009945751278180853,
|
| 610 |
+
1.0004941886041194646,1.0002460865533080483,1.0001227133475784891,
|
| 611 |
+
1.0000612481350587048,1.0000305882363070205,1.0000152822594086519,
|
| 612 |
+
1.0000076371976378998,1.0000038172932649998,1.0000019082127165539,
|
| 613 |
+
1.0000009539620338728,1.0000004769329867878,1.0000002384505027277,
|
| 614 |
+
1.0000001192199259653,1.0000000596081890513,1.0000000298035035147,
|
| 615 |
+
1.0000000149015548284]
|
| 616 |
+
|
| 617 |
+
_zeta_P = [-3.50000000087575873, -0.701274355654678147,
|
| 618 |
+
-0.0672313458590012612, -0.00398731457954257841,
|
| 619 |
+
-0.000160948723019303141, -4.67633010038383371e-6,
|
| 620 |
+
-1.02078104417700585e-7, -1.68030037095896287e-9,
|
| 621 |
+
-1.85231868742346722e-11][::-1]
|
| 622 |
+
|
| 623 |
+
_zeta_Q = [1.00000000000000000, -0.936552848762465319,
|
| 624 |
+
-0.0588835413263763741, -0.00441498861482948666,
|
| 625 |
+
-0.000143416758067432622, -5.10691659585090782e-6,
|
| 626 |
+
-9.58813053268913799e-8, -1.72963791443181972e-9,
|
| 627 |
+
-1.83527919681474132e-11][::-1]
|
| 628 |
+
|
| 629 |
+
_zeta_1 = [3.03768838606128127e-10, -1.21924525236601262e-8,
|
| 630 |
+
2.01201845887608893e-7, -1.53917240683468381e-6,
|
| 631 |
+
-5.09890411005967954e-7, 0.000122464707271619326,
|
| 632 |
+
-0.000905721539353130232, -0.00239315326074843037,
|
| 633 |
+
0.084239750013159168, 0.418938517907442414, 0.500000001921884009]
|
| 634 |
+
|
| 635 |
+
_zeta_0 = [-3.46092485016748794e-10, -6.42610089468292485e-9,
|
| 636 |
+
1.76409071536679773e-7, -1.47141263991560698e-6, -6.38880222546167613e-7,
|
| 637 |
+
0.000122641099800668209, -0.000905894913516772796, -0.00239303348507992713,
|
| 638 |
+
0.0842396947501199816, 0.418938533204660256, 0.500000000000000052]
|
| 639 |
+
|
| 640 |
+
def zeta(s):
|
| 641 |
+
"""
|
| 642 |
+
Riemann zeta function, real argument
|
| 643 |
+
"""
|
| 644 |
+
if not isinstance(s, (float, int)):
|
| 645 |
+
try:
|
| 646 |
+
s = float(s)
|
| 647 |
+
except (ValueError, TypeError):
|
| 648 |
+
try:
|
| 649 |
+
s = complex(s)
|
| 650 |
+
if not s.imag:
|
| 651 |
+
return complex(zeta(s.real))
|
| 652 |
+
except (ValueError, TypeError):
|
| 653 |
+
pass
|
| 654 |
+
raise NotImplementedError
|
| 655 |
+
if s == 1:
|
| 656 |
+
raise ValueError("zeta(1) pole")
|
| 657 |
+
if s >= 27:
|
| 658 |
+
return 1.0 + 2.0**(-s) + 3.0**(-s)
|
| 659 |
+
n = int(s)
|
| 660 |
+
if n == s:
|
| 661 |
+
if n >= 0:
|
| 662 |
+
return _zeta_int[n]
|
| 663 |
+
if not (n % 2):
|
| 664 |
+
return 0.0
|
| 665 |
+
if s <= 0.0:
|
| 666 |
+
return 2.**s*pi**(s-1)*_sinpi_real(0.5*s)*_gamma_real(1-s)*zeta(1-s)
|
| 667 |
+
if s <= 2.0:
|
| 668 |
+
if s <= 1.0:
|
| 669 |
+
return _polyval(_zeta_0,s)/(s-1)
|
| 670 |
+
return _polyval(_zeta_1,s)/(s-1)
|
| 671 |
+
z = _polyval(_zeta_P,s) / _polyval(_zeta_Q,s)
|
| 672 |
+
return 1.0 + 2.0**(-s) + 3.0**(-s) + 4.0**(-s)*z
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/__init__.cpython-311.pyc
ADDED
|
Binary file (213 Bytes). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/extratest_zeta.cpython-311.pyc
ADDED
|
Binary file (1.78 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/runtests.cpython-311.pyc
ADDED
|
Binary file (7.53 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_basic_ops.cpython-311.pyc
ADDED
|
Binary file (39.2 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-311.pyc
ADDED
|
Binary file (16.1 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-311.pyc
ADDED
|
Binary file (4.12 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_convert.cpython-311.pyc
ADDED
|
Binary file (21.5 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_diff.cpython-311.pyc
ADDED
|
Binary file (8.58 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_division.cpython-311.pyc
ADDED
|
Binary file (11.2 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_eigen_symmetric.cpython-311.pyc
ADDED
|
Binary file (21.1 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_functions.cpython-311.pyc
ADDED
|
Binary file (87.5 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_gammazeta.cpython-311.pyc
ADDED
|
Binary file (67.8 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_hp.cpython-311.pyc
ADDED
|
Binary file (13.8 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_identify.cpython-311.pyc
ADDED
|
Binary file (2.05 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_interval.cpython-311.pyc
ADDED
|
Binary file (53.7 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_levin.cpython-311.pyc
ADDED
|
Binary file (13.4 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-311.pyc
ADDED
|
Binary file (22.9 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_matrices.cpython-311.pyc
ADDED
|
Binary file (19 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_mpmath.cpython-311.pyc
ADDED
|
Binary file (706 Bytes). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_ode.cpython-311.pyc
ADDED
|
Binary file (2.72 kB). View file
|
|
|
tuning-competition-baseline/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-311.pyc
ADDED
|
Binary file (1.3 kB). View file
|
|
|