diff --git a/.venv/lib/python3.11/site-packages/mpmath/__init__.py b/.venv/lib/python3.11/site-packages/mpmath/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..46a7c6f7c0875548f264612b604a9e1574b00a84 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/__init__.py @@ -0,0 +1,468 @@ +__version__ = '1.3.0' + +from .usertools import monitor, timing + +from .ctx_fp import FPContext +from .ctx_mp import MPContext +from .ctx_iv import MPIntervalContext + +fp = FPContext() +mp = MPContext() +iv = MPIntervalContext() + +fp._mp = mp +mp._mp = mp +iv._mp = mp +mp._fp = fp +fp._fp = fp +mp._iv = iv +fp._iv = iv +iv._iv = iv + +# XXX: extremely bad pickle hack +from . import ctx_mp as _ctx_mp +_ctx_mp._mpf_module.mpf = mp.mpf +_ctx_mp._mpf_module.mpc = mp.mpc + +make_mpf = mp.make_mpf +make_mpc = mp.make_mpc + +extraprec = mp.extraprec +extradps = mp.extradps +workprec = mp.workprec +workdps = mp.workdps +autoprec = mp.autoprec +maxcalls = mp.maxcalls +memoize = mp.memoize + +mag = mp.mag + +bernfrac = mp.bernfrac + +qfrom = mp.qfrom +mfrom = mp.mfrom +kfrom = mp.kfrom +taufrom = mp.taufrom +qbarfrom = mp.qbarfrom +ellipfun = mp.ellipfun +jtheta = mp.jtheta +kleinj = mp.kleinj +eta = mp.eta + +qp = mp.qp +qhyper = mp.qhyper +qgamma = mp.qgamma +qfac = mp.qfac + +nint_distance = mp.nint_distance + +plot = mp.plot +cplot = mp.cplot +splot = mp.splot + +odefun = mp.odefun + +jacobian = mp.jacobian +findroot = mp.findroot +multiplicity = mp.multiplicity + +isinf = mp.isinf +isnan = mp.isnan +isnormal = mp.isnormal +isint = mp.isint +isfinite = mp.isfinite +almosteq = mp.almosteq +nan = mp.nan +rand = mp.rand + +absmin = mp.absmin +absmax = mp.absmax + +fraction = mp.fraction + +linspace = mp.linspace +arange = mp.arange + +mpmathify = convert = mp.convert +mpc = mp.mpc + +mpi = iv._mpi + +nstr = mp.nstr +nprint = mp.nprint +chop = mp.chop + +fneg = mp.fneg +fadd = mp.fadd +fsub = mp.fsub +fmul = mp.fmul +fdiv = mp.fdiv +fprod = mp.fprod + +quad = mp.quad +quadgl = mp.quadgl +quadts = mp.quadts +quadosc = mp.quadosc +quadsubdiv = mp.quadsubdiv + +invertlaplace = mp.invertlaplace +invlaptalbot = mp.invlaptalbot +invlapstehfest = mp.invlapstehfest +invlapdehoog = mp.invlapdehoog + +pslq = mp.pslq +identify = mp.identify +findpoly = mp.findpoly + +richardson = mp.richardson +shanks = mp.shanks +levin = mp.levin +cohen_alt = mp.cohen_alt +nsum = mp.nsum +nprod = mp.nprod +difference = mp.difference +diff = mp.diff +diffs = mp.diffs +diffs_prod = mp.diffs_prod +diffs_exp = mp.diffs_exp +diffun = mp.diffun +differint = mp.differint +taylor = mp.taylor +pade = mp.pade +polyval = mp.polyval +polyroots = mp.polyroots +fourier = mp.fourier +fourierval = mp.fourierval +sumem = mp.sumem +sumap = mp.sumap +chebyfit = mp.chebyfit +limit = mp.limit + +matrix = mp.matrix +eye = mp.eye +diag = mp.diag +zeros = mp.zeros +ones = mp.ones +hilbert = mp.hilbert +randmatrix = mp.randmatrix +swap_row = mp.swap_row +extend = mp.extend +norm = mp.norm +mnorm = mp.mnorm + +lu_solve = mp.lu_solve +lu = mp.lu +qr = mp.qr +unitvector = mp.unitvector +inverse = mp.inverse +residual = mp.residual +qr_solve = mp.qr_solve +cholesky = mp.cholesky +cholesky_solve = mp.cholesky_solve +det = mp.det +cond = mp.cond +hessenberg = mp.hessenberg +schur = mp.schur +eig = mp.eig +eig_sort = mp.eig_sort +eigsy = mp.eigsy +eighe = mp.eighe +eigh = mp.eigh +svd_r = mp.svd_r +svd_c = mp.svd_c +svd = mp.svd +gauss_quadrature = mp.gauss_quadrature + +expm = mp.expm +sqrtm = mp.sqrtm +powm = mp.powm +logm = mp.logm +sinm = mp.sinm +cosm = mp.cosm + +mpf = mp.mpf +j = mp.j +exp = mp.exp +expj = mp.expj +expjpi = mp.expjpi +ln = mp.ln +im = mp.im +re = mp.re +inf = mp.inf +ninf = mp.ninf +sign = mp.sign + +eps = mp.eps +pi = mp.pi +ln2 = mp.ln2 +ln10 = mp.ln10 +phi = mp.phi +e = mp.e +euler = mp.euler +catalan = mp.catalan +khinchin = mp.khinchin +glaisher = mp.glaisher +apery = mp.apery +degree = mp.degree +twinprime = mp.twinprime +mertens = mp.mertens + +ldexp = mp.ldexp +frexp = mp.frexp + +fsum = mp.fsum +fdot = mp.fdot + +sqrt = mp.sqrt +cbrt = mp.cbrt +exp = mp.exp +ln = mp.ln +log = mp.log +log10 = mp.log10 +power = mp.power +cos = mp.cos +sin = mp.sin +tan = mp.tan +cosh = mp.cosh +sinh = mp.sinh +tanh = mp.tanh +acos = mp.acos +asin = mp.asin +atan = mp.atan +asinh = mp.asinh +acosh = mp.acosh +atanh = mp.atanh +sec = mp.sec +csc = mp.csc +cot = mp.cot +sech = mp.sech +csch = mp.csch +coth = mp.coth +asec = mp.asec +acsc = mp.acsc +acot = mp.acot +asech = mp.asech +acsch = mp.acsch +acoth = mp.acoth +cospi = mp.cospi +sinpi = mp.sinpi +sinc = mp.sinc +sincpi = mp.sincpi +cos_sin = mp.cos_sin +cospi_sinpi = mp.cospi_sinpi +fabs = mp.fabs +re = mp.re +im = mp.im +conj = mp.conj +floor = mp.floor +ceil = mp.ceil +nint = mp.nint +frac = mp.frac +root = mp.root +nthroot = mp.nthroot +hypot = mp.hypot +fmod = mp.fmod +ldexp = mp.ldexp +frexp = mp.frexp +sign = mp.sign +arg = mp.arg +phase = mp.phase +polar = mp.polar +rect = mp.rect +degrees = mp.degrees +radians = mp.radians +atan2 = mp.atan2 +fib = mp.fib +fibonacci = mp.fibonacci +lambertw = mp.lambertw +zeta = mp.zeta +altzeta = mp.altzeta +gamma = mp.gamma +rgamma = mp.rgamma +factorial = mp.factorial +fac = mp.fac +fac2 = mp.fac2 +beta = mp.beta +betainc = mp.betainc +psi = mp.psi +#psi0 = mp.psi0 +#psi1 = mp.psi1 +#psi2 = mp.psi2 +#psi3 = mp.psi3 +polygamma = mp.polygamma +digamma = mp.digamma +#trigamma = mp.trigamma +#tetragamma = mp.tetragamma +#pentagamma = mp.pentagamma +harmonic = mp.harmonic +bernoulli = mp.bernoulli +bernfrac = mp.bernfrac +stieltjes = mp.stieltjes +hurwitz = mp.hurwitz +dirichlet = mp.dirichlet +bernpoly = mp.bernpoly +eulerpoly = mp.eulerpoly +eulernum = mp.eulernum +polylog = mp.polylog +clsin = mp.clsin +clcos = mp.clcos +gammainc = mp.gammainc +gammaprod = mp.gammaprod +binomial = mp.binomial +rf = mp.rf +ff = mp.ff +hyper = mp.hyper +hyp0f1 = mp.hyp0f1 +hyp1f1 = mp.hyp1f1 +hyp1f2 = mp.hyp1f2 +hyp2f1 = mp.hyp2f1 +hyp2f2 = mp.hyp2f2 +hyp2f0 = mp.hyp2f0 +hyp2f3 = mp.hyp2f3 +hyp3f2 = mp.hyp3f2 +hyperu = mp.hyperu +hypercomb = mp.hypercomb +meijerg = mp.meijerg +appellf1 = mp.appellf1 +appellf2 = mp.appellf2 +appellf3 = mp.appellf3 +appellf4 = mp.appellf4 +hyper2d = mp.hyper2d +bihyper = mp.bihyper +erf = mp.erf +erfc = mp.erfc +erfi = mp.erfi +erfinv = mp.erfinv +npdf = mp.npdf +ncdf = mp.ncdf +expint = mp.expint +e1 = mp.e1 +ei = mp.ei +li = mp.li +ci = mp.ci +si = mp.si +chi = mp.chi +shi = mp.shi +fresnels = mp.fresnels +fresnelc = mp.fresnelc +airyai = mp.airyai +airybi = mp.airybi +airyaizero = mp.airyaizero +airybizero = mp.airybizero +scorergi = mp.scorergi +scorerhi = mp.scorerhi +ellipk = mp.ellipk +ellipe = mp.ellipe +ellipf = mp.ellipf +ellippi = mp.ellippi +elliprc = mp.elliprc +elliprj = mp.elliprj +elliprf = mp.elliprf +elliprd = mp.elliprd +elliprg = mp.elliprg +agm = mp.agm +jacobi = mp.jacobi +chebyt = mp.chebyt +chebyu = mp.chebyu +legendre = mp.legendre +legenp = mp.legenp +legenq = mp.legenq +hermite = mp.hermite +pcfd = mp.pcfd +pcfu = mp.pcfu +pcfv = mp.pcfv +pcfw = mp.pcfw +gegenbauer = mp.gegenbauer +laguerre = mp.laguerre +spherharm = mp.spherharm +besselj = mp.besselj +j0 = mp.j0 +j1 = mp.j1 +besseli = mp.besseli +bessely = mp.bessely +besselk = mp.besselk +besseljzero = mp.besseljzero +besselyzero = mp.besselyzero +hankel1 = mp.hankel1 +hankel2 = mp.hankel2 +struveh = mp.struveh +struvel = mp.struvel +angerj = mp.angerj +webere = mp.webere +lommels1 = mp.lommels1 +lommels2 = mp.lommels2 +whitm = mp.whitm +whitw = mp.whitw +ber = mp.ber +bei = mp.bei +ker = mp.ker +kei = mp.kei +coulombc = mp.coulombc +coulombf = mp.coulombf +coulombg = mp.coulombg +barnesg = mp.barnesg +superfac = mp.superfac +hyperfac = mp.hyperfac +loggamma = mp.loggamma +siegeltheta = mp.siegeltheta +siegelz = mp.siegelz +grampoint = mp.grampoint +zetazero = mp.zetazero +riemannr = mp.riemannr +primepi = mp.primepi +primepi2 = mp.primepi2 +primezeta = mp.primezeta +bell = mp.bell +polyexp = mp.polyexp +expm1 = mp.expm1 +log1p = mp.log1p +powm1 = mp.powm1 +unitroots = mp.unitroots +cyclotomic = mp.cyclotomic +mangoldt = mp.mangoldt +secondzeta = mp.secondzeta +nzeros = mp.nzeros +backlunds = mp.backlunds +lerchphi = mp.lerchphi +stirling1 = mp.stirling1 +stirling2 = mp.stirling2 +squarew = mp.squarew +trianglew = mp.trianglew +sawtoothw = mp.sawtoothw +unit_triangle = mp.unit_triangle +sigmoid = mp.sigmoid + +# be careful when changing this name, don't use test*! +def runtests(): + """ + Run all mpmath tests and print output. + """ + import os.path + from inspect import getsourcefile + from .tests import runtests as tests + testdir = os.path.dirname(os.path.abspath(getsourcefile(tests))) + importdir = os.path.abspath(testdir + '/../..') + tests.testit(importdir, testdir) + +def doctests(filter=[]): + import sys + from timeit import default_timer as clock + for i, arg in enumerate(sys.argv): + if '__init__.py' in arg: + filter = [sn for sn in sys.argv[i+1:] if not sn.startswith("-")] + break + import doctest + globs = globals().copy() + for obj in globs: #sorted(globs.keys()): + if filter: + if not sum([pat in obj for pat in filter]): + continue + sys.stdout.write(str(obj) + " ") + sys.stdout.flush() + t1 = clock() + doctest.run_docstring_examples(globs[obj], {}, verbose=("-v" in sys.argv)) + t2 = clock() + print(round(t2-t1, 3)) + +if __name__ == '__main__': + doctests() diff --git a/.venv/lib/python3.11/site-packages/mpmath/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57c25c4174fef6987f11ba0bcb3c02886dba4dcf Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_base.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_base.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c4b0962921f3c433ccc7ef12fc4d69180aa72ab Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_base.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_mp.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_mp.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..953826969cec237fd72853402332303ba3de953f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/__pycache__/ctx_mp.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/ctx_base.py b/.venv/lib/python3.11/site-packages/mpmath/ctx_base.py new file mode 100644 index 0000000000000000000000000000000000000000..1946f8daf4dbe165b3943be09af361812828aab1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/ctx_base.py @@ -0,0 +1,494 @@ +from operator import gt, lt + +from .libmp.backend import xrange + +from .functions.functions import SpecialFunctions +from .functions.rszeta import RSCache +from .calculus.quadrature import QuadratureMethods +from .calculus.inverselaplace import LaplaceTransformInversionMethods +from .calculus.calculus import CalculusMethods +from .calculus.optimization import OptimizationMethods +from .calculus.odes import ODEMethods +from .matrices.matrices import MatrixMethods +from .matrices.calculus import MatrixCalculusMethods +from .matrices.linalg import LinearAlgebraMethods +from .matrices.eigen import Eigen +from .identification import IdentificationMethods +from .visualization import VisualizationMethods + +from . import libmp + +class Context(object): + pass + +class StandardBaseContext(Context, + SpecialFunctions, + RSCache, + QuadratureMethods, + LaplaceTransformInversionMethods, + CalculusMethods, + MatrixMethods, + MatrixCalculusMethods, + LinearAlgebraMethods, + Eigen, + IdentificationMethods, + OptimizationMethods, + ODEMethods, + VisualizationMethods): + + NoConvergence = libmp.NoConvergence + ComplexResult = libmp.ComplexResult + + def __init__(ctx): + ctx._aliases = {} + # Call those that need preinitialization (e.g. for wrappers) + SpecialFunctions.__init__(ctx) + RSCache.__init__(ctx) + QuadratureMethods.__init__(ctx) + LaplaceTransformInversionMethods.__init__(ctx) + CalculusMethods.__init__(ctx) + MatrixMethods.__init__(ctx) + + def _init_aliases(ctx): + for alias, value in ctx._aliases.items(): + try: + setattr(ctx, alias, getattr(ctx, value)) + except AttributeError: + pass + + _fixed_precision = False + + # XXX + verbose = False + + def warn(ctx, msg): + print("Warning:", msg) + + def bad_domain(ctx, msg): + raise ValueError(msg) + + def _re(ctx, x): + if hasattr(x, "real"): + return x.real + return x + + def _im(ctx, x): + if hasattr(x, "imag"): + return x.imag + return ctx.zero + + def _as_points(ctx, x): + return x + + def fneg(ctx, x, **kwargs): + return -ctx.convert(x) + + def fadd(ctx, x, y, **kwargs): + return ctx.convert(x)+ctx.convert(y) + + def fsub(ctx, x, y, **kwargs): + return ctx.convert(x)-ctx.convert(y) + + def fmul(ctx, x, y, **kwargs): + return ctx.convert(x)*ctx.convert(y) + + def fdiv(ctx, x, y, **kwargs): + return ctx.convert(x)/ctx.convert(y) + + def fsum(ctx, args, absolute=False, squared=False): + if absolute: + if squared: + return sum((abs(x)**2 for x in args), ctx.zero) + return sum((abs(x) for x in args), ctx.zero) + if squared: + return sum((x**2 for x in args), ctx.zero) + return sum(args, ctx.zero) + + def fdot(ctx, xs, ys=None, conjugate=False): + if ys is not None: + xs = zip(xs, ys) + if conjugate: + cf = ctx.conj + return sum((x*cf(y) for (x,y) in xs), ctx.zero) + else: + return sum((x*y for (x,y) in xs), ctx.zero) + + def fprod(ctx, args): + prod = ctx.one + for arg in args: + prod *= arg + return prod + + def nprint(ctx, x, n=6, **kwargs): + """ + Equivalent to ``print(nstr(x, n))``. + """ + print(ctx.nstr(x, n, **kwargs)) + + def chop(ctx, x, tol=None): + """ + Chops off small real or imaginary parts, or converts + numbers close to zero to exact zeros. The input can be a + single number or an iterable:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> chop(5+1e-10j, tol=1e-9) + mpf('5.0') + >>> nprint(chop([1.0, 1e-20, 3+1e-18j, -4, 2])) + [1.0, 0.0, 3.0, -4.0, 2.0] + + The tolerance defaults to ``100*eps``. + """ + if tol is None: + tol = 100*ctx.eps + try: + x = ctx.convert(x) + absx = abs(x) + if abs(x) < tol: + return ctx.zero + if ctx._is_complex_type(x): + #part_tol = min(tol, absx*tol) + part_tol = max(tol, absx*tol) + if abs(x.imag) < part_tol: + return x.real + if abs(x.real) < part_tol: + return ctx.mpc(0, x.imag) + except TypeError: + if isinstance(x, ctx.matrix): + return x.apply(lambda a: ctx.chop(a, tol)) + if hasattr(x, "__iter__"): + return [ctx.chop(a, tol) for a in x] + return x + + def almosteq(ctx, s, t, rel_eps=None, abs_eps=None): + r""" + Determine whether the difference between `s` and `t` is smaller + than a given epsilon, either relatively or absolutely. + + Both a maximum relative difference and a maximum difference + ('epsilons') may be specified. The absolute difference is + defined as `|s-t|` and the relative difference is defined + as `|s-t|/\max(|s|, |t|)`. + + If only one epsilon is given, both are set to the same value. + If none is given, both epsilons are set to `2^{-p+m}` where + `p` is the current working precision and `m` is a small + integer. The default setting typically allows :func:`~mpmath.almosteq` + to be used to check for mathematical equality + in the presence of small rounding errors. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15 + >>> almosteq(3.141592653589793, 3.141592653589790) + True + >>> almosteq(3.141592653589793, 3.141592653589700) + False + >>> almosteq(3.141592653589793, 3.141592653589700, 1e-10) + True + >>> almosteq(1e-20, 2e-20) + True + >>> almosteq(1e-20, 2e-20, rel_eps=0, abs_eps=0) + False + + """ + t = ctx.convert(t) + if abs_eps is None and rel_eps is None: + rel_eps = abs_eps = ctx.ldexp(1, -ctx.prec+4) + if abs_eps is None: + abs_eps = rel_eps + elif rel_eps is None: + rel_eps = abs_eps + diff = abs(s-t) + if diff <= abs_eps: + return True + abss = abs(s) + abst = abs(t) + if abss < abst: + err = diff/abst + else: + err = diff/abss + return err <= rel_eps + + def arange(ctx, *args): + r""" + This is a generalized version of Python's :func:`~mpmath.range` function + that accepts fractional endpoints and step sizes and + returns a list of ``mpf`` instances. Like :func:`~mpmath.range`, + :func:`~mpmath.arange` can be called with 1, 2 or 3 arguments: + + ``arange(b)`` + `[0, 1, 2, \ldots, x]` + ``arange(a, b)`` + `[a, a+1, a+2, \ldots, x]` + ``arange(a, b, h)`` + `[a, a+h, a+h, \ldots, x]` + + where `b-1 \le x < b` (in the third case, `b-h \le x < b`). + + Like Python's :func:`~mpmath.range`, the endpoint is not included. To + produce ranges where the endpoint is included, :func:`~mpmath.linspace` + is more convenient. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> arange(4) + [mpf('0.0'), mpf('1.0'), mpf('2.0'), mpf('3.0')] + >>> arange(1, 2, 0.25) + [mpf('1.0'), mpf('1.25'), mpf('1.5'), mpf('1.75')] + >>> arange(1, -1, -0.75) + [mpf('1.0'), mpf('0.25'), mpf('-0.5')] + + """ + if not len(args) <= 3: + raise TypeError('arange expected at most 3 arguments, got %i' + % len(args)) + if not len(args) >= 1: + raise TypeError('arange expected at least 1 argument, got %i' + % len(args)) + # set default + a = 0 + dt = 1 + # interpret arguments + if len(args) == 1: + b = args[0] + elif len(args) >= 2: + a = args[0] + b = args[1] + if len(args) == 3: + dt = args[2] + a, b, dt = ctx.mpf(a), ctx.mpf(b), ctx.mpf(dt) + assert a + dt != a, 'dt is too small and would cause an infinite loop' + # adapt code for sign of dt + if a > b: + if dt > 0: + return [] + op = gt + else: + if dt < 0: + return [] + op = lt + # create list + result = [] + i = 0 + t = a + while 1: + t = a + dt*i + i += 1 + if op(t, b): + result.append(t) + else: + break + return result + + def linspace(ctx, *args, **kwargs): + """ + ``linspace(a, b, n)`` returns a list of `n` evenly spaced + samples from `a` to `b`. The syntax ``linspace(mpi(a,b), n)`` + is also valid. + + This function is often more convenient than :func:`~mpmath.arange` + for partitioning an interval into subintervals, since + the endpoint is included:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> linspace(1, 4, 4) + [mpf('1.0'), mpf('2.0'), mpf('3.0'), mpf('4.0')] + + You may also provide the keyword argument ``endpoint=False``:: + + >>> linspace(1, 4, 4, endpoint=False) + [mpf('1.0'), mpf('1.75'), mpf('2.5'), mpf('3.25')] + + """ + if len(args) == 3: + a = ctx.mpf(args[0]) + b = ctx.mpf(args[1]) + n = int(args[2]) + elif len(args) == 2: + assert hasattr(args[0], '_mpi_') + a = args[0].a + b = args[0].b + n = int(args[1]) + else: + raise TypeError('linspace expected 2 or 3 arguments, got %i' \ + % len(args)) + if n < 1: + raise ValueError('n must be greater than 0') + if not 'endpoint' in kwargs or kwargs['endpoint']: + if n == 1: + return [ctx.mpf(a)] + step = (b - a) / ctx.mpf(n - 1) + y = [i*step + a for i in xrange(n)] + y[-1] = b + else: + step = (b - a) / ctx.mpf(n) + y = [i*step + a for i in xrange(n)] + return y + + def cos_sin(ctx, z, **kwargs): + return ctx.cos(z, **kwargs), ctx.sin(z, **kwargs) + + def cospi_sinpi(ctx, z, **kwargs): + return ctx.cospi(z, **kwargs), ctx.sinpi(z, **kwargs) + + def _default_hyper_maxprec(ctx, p): + return int(1000 * p**0.25 + 4*p) + + _gcd = staticmethod(libmp.gcd) + list_primes = staticmethod(libmp.list_primes) + isprime = staticmethod(libmp.isprime) + bernfrac = staticmethod(libmp.bernfrac) + moebius = staticmethod(libmp.moebius) + _ifac = staticmethod(libmp.ifac) + _eulernum = staticmethod(libmp.eulernum) + _stirling1 = staticmethod(libmp.stirling1) + _stirling2 = staticmethod(libmp.stirling2) + + def sum_accurately(ctx, terms, check_step=1): + prec = ctx.prec + try: + extraprec = 10 + while 1: + ctx.prec = prec + extraprec + 5 + max_mag = ctx.ninf + s = ctx.zero + k = 0 + for term in terms(): + s += term + if (not k % check_step) and term: + term_mag = ctx.mag(term) + max_mag = max(max_mag, term_mag) + sum_mag = ctx.mag(s) + if sum_mag - term_mag > ctx.prec: + break + k += 1 + cancellation = max_mag - sum_mag + if cancellation != cancellation: + break + if cancellation < extraprec or ctx._fixed_precision: + break + extraprec += min(ctx.prec, cancellation) + return s + finally: + ctx.prec = prec + + def mul_accurately(ctx, factors, check_step=1): + prec = ctx.prec + try: + extraprec = 10 + while 1: + ctx.prec = prec + extraprec + 5 + max_mag = ctx.ninf + one = ctx.one + s = one + k = 0 + for factor in factors(): + s *= factor + term = factor - one + if (not k % check_step): + term_mag = ctx.mag(term) + max_mag = max(max_mag, term_mag) + sum_mag = ctx.mag(s-one) + #if sum_mag - term_mag > ctx.prec: + # break + if -term_mag > ctx.prec: + break + k += 1 + cancellation = max_mag - sum_mag + if cancellation != cancellation: + break + if cancellation < extraprec or ctx._fixed_precision: + break + extraprec += min(ctx.prec, cancellation) + return s + finally: + ctx.prec = prec + + def power(ctx, x, y): + r"""Converts `x` and `y` to mpmath numbers and evaluates + `x^y = \exp(y \log(x))`:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> power(2, 0.5) + 1.41421356237309504880168872421 + + This shows the leading few digits of a large Mersenne prime + (performing the exact calculation ``2**43112609-1`` and + displaying the result in Python would be very slow):: + + >>> power(2, 43112609)-1 + 3.16470269330255923143453723949e+12978188 + """ + return ctx.convert(x) ** ctx.convert(y) + + def _zeta_int(ctx, n): + return ctx.zeta(n) + + def maxcalls(ctx, f, N): + """ + Return a wrapped copy of *f* that raises ``NoConvergence`` when *f* + has been called more than *N* times:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> f = maxcalls(sin, 10) + >>> print(sum(f(n) for n in range(10))) + 1.95520948210738 + >>> f(10) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: maxcalls: function evaluated 10 times + + """ + counter = [0] + def f_maxcalls_wrapped(*args, **kwargs): + counter[0] += 1 + if counter[0] > N: + raise ctx.NoConvergence("maxcalls: function evaluated %i times" % N) + return f(*args, **kwargs) + return f_maxcalls_wrapped + + def memoize(ctx, f): + """ + Return a wrapped copy of *f* that caches computed values, i.e. + a memoized copy of *f*. Values are only reused if the cached precision + is equal to or higher than the working precision:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> f = memoize(maxcalls(sin, 1)) + >>> f(2) + 0.909297426825682 + >>> f(2) + 0.909297426825682 + >>> mp.dps = 25 + >>> f(2) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: maxcalls: function evaluated 1 times + + """ + f_cache = {} + def f_cached(*args, **kwargs): + if kwargs: + key = args, tuple(kwargs.items()) + else: + key = args + prec = ctx.prec + if key in f_cache: + cprec, cvalue = f_cache[key] + if cprec >= prec: + return +cvalue + value = f(*args, **kwargs) + f_cache[key] = (prec, value) + return value + f_cached.__name__ = f.__name__ + f_cached.__doc__ = f.__doc__ + return f_cached diff --git a/.venv/lib/python3.11/site-packages/mpmath/ctx_fp.py b/.venv/lib/python3.11/site-packages/mpmath/ctx_fp.py new file mode 100644 index 0000000000000000000000000000000000000000..aa72ea5b03fde4da66b0d8fbf8ffa4012e3f6178 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/ctx_fp.py @@ -0,0 +1,253 @@ +from .ctx_base import StandardBaseContext + +import math +import cmath +from . import math2 + +from . import function_docs + +from .libmp import mpf_bernoulli, to_float, int_types +from . import libmp + +class FPContext(StandardBaseContext): + """ + Context for fast low-precision arithmetic (53-bit precision, giving at most + about 15-digit accuracy), using Python's builtin float and complex. + """ + + def __init__(ctx): + StandardBaseContext.__init__(ctx) + + # Override SpecialFunctions implementation + ctx.loggamma = math2.loggamma + ctx._bernoulli_cache = {} + ctx.pretty = False + + ctx._init_aliases() + + _mpq = lambda cls, x: float(x[0])/x[1] + + NoConvergence = libmp.NoConvergence + + def _get_prec(ctx): return 53 + def _set_prec(ctx, p): return + def _get_dps(ctx): return 15 + def _set_dps(ctx, p): return + + _fixed_precision = True + + prec = property(_get_prec, _set_prec) + dps = property(_get_dps, _set_dps) + + zero = 0.0 + one = 1.0 + eps = math2.EPS + inf = math2.INF + ninf = math2.NINF + nan = math2.NAN + j = 1j + + # Called by SpecialFunctions.__init__() + @classmethod + def _wrap_specfun(cls, name, f, wrap): + if wrap: + def f_wrapped(ctx, *args, **kwargs): + convert = ctx.convert + args = [convert(a) for a in args] + return f(ctx, *args, **kwargs) + else: + f_wrapped = f + f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__) + setattr(cls, name, f_wrapped) + + def bernoulli(ctx, n): + cache = ctx._bernoulli_cache + if n in cache: + return cache[n] + cache[n] = to_float(mpf_bernoulli(n, 53, 'n'), strict=True) + return cache[n] + + pi = math2.pi + e = math2.e + euler = math2.euler + sqrt2 = 1.4142135623730950488 + sqrt5 = 2.2360679774997896964 + phi = 1.6180339887498948482 + ln2 = 0.69314718055994530942 + ln10 = 2.302585092994045684 + euler = 0.57721566490153286061 + catalan = 0.91596559417721901505 + khinchin = 2.6854520010653064453 + apery = 1.2020569031595942854 + glaisher = 1.2824271291006226369 + + absmin = absmax = abs + + def is_special(ctx, x): + return x - x != 0.0 + + def isnan(ctx, x): + return x != x + + def isinf(ctx, x): + return abs(x) == math2.INF + + def isnormal(ctx, x): + if x: + return x - x == 0.0 + return False + + def isnpint(ctx, x): + if type(x) is complex: + if x.imag: + return False + x = x.real + return x <= 0.0 and round(x) == x + + mpf = float + mpc = complex + + def convert(ctx, x): + try: + return float(x) + except: + return complex(x) + + power = staticmethod(math2.pow) + sqrt = staticmethod(math2.sqrt) + exp = staticmethod(math2.exp) + ln = log = staticmethod(math2.log) + cos = staticmethod(math2.cos) + sin = staticmethod(math2.sin) + tan = staticmethod(math2.tan) + cos_sin = staticmethod(math2.cos_sin) + acos = staticmethod(math2.acos) + asin = staticmethod(math2.asin) + atan = staticmethod(math2.atan) + cosh = staticmethod(math2.cosh) + sinh = staticmethod(math2.sinh) + tanh = staticmethod(math2.tanh) + gamma = staticmethod(math2.gamma) + rgamma = staticmethod(math2.rgamma) + fac = factorial = staticmethod(math2.factorial) + floor = staticmethod(math2.floor) + ceil = staticmethod(math2.ceil) + cospi = staticmethod(math2.cospi) + sinpi = staticmethod(math2.sinpi) + cbrt = staticmethod(math2.cbrt) + _nthroot = staticmethod(math2.nthroot) + _ei = staticmethod(math2.ei) + _e1 = staticmethod(math2.e1) + _zeta = _zeta_int = staticmethod(math2.zeta) + + # XXX: math2 + def arg(ctx, z): + z = complex(z) + return math.atan2(z.imag, z.real) + + def expj(ctx, x): + return ctx.exp(ctx.j*x) + + def expjpi(ctx, x): + return ctx.exp(ctx.j*ctx.pi*x) + + ldexp = math.ldexp + frexp = math.frexp + + def mag(ctx, z): + if z: + return ctx.frexp(abs(z))[1] + return ctx.ninf + + def isint(ctx, z): + if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 + if z.imag: + return False + z = z.real + try: + return z == int(z) + except: + return False + + def nint_distance(ctx, z): + if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 + n = round(z.real) + else: + n = round(z) + if n == z: + return n, ctx.ninf + return n, ctx.mag(abs(z-n)) + + def _convert_param(ctx, z): + if type(z) is tuple: + p, q = z + return ctx.mpf(p) / q, 'R' + if hasattr(z, "imag"): # float/int don't have .real/.imag in py2.5 + intz = int(z.real) + else: + intz = int(z) + if z == intz: + return intz, 'Z' + return z, 'R' + + def _is_real_type(ctx, z): + return isinstance(z, float) or isinstance(z, int_types) + + def _is_complex_type(ctx, z): + return isinstance(z, complex) + + def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs): + coeffs = list(coeffs) + num = range(p) + den = range(p,p+q) + tol = ctx.eps + s = t = 1.0 + k = 0 + while 1: + for i in num: t *= (coeffs[i]+k) + for i in den: t /= (coeffs[i]+k) + k += 1; t /= k; t *= z; s += t + if abs(t) < tol: + return s + if k > maxterms: + raise ctx.NoConvergence + + def atan2(ctx, x, y): + return math.atan2(x, y) + + def psi(ctx, m, z): + m = int(m) + if m == 0: + return ctx.digamma(z) + return (-1)**(m+1) * ctx.fac(m) * ctx.zeta(m+1, z) + + digamma = staticmethod(math2.digamma) + + def harmonic(ctx, x): + x = ctx.convert(x) + if x == 0 or x == 1: + return x + return ctx.digamma(x+1) + ctx.euler + + nstr = str + + def to_fixed(ctx, x, prec): + return int(math.ldexp(x, prec)) + + def rand(ctx): + import random + return random.random() + + _erf = staticmethod(math2.erf) + _erfc = staticmethod(math2.erfc) + + def sum_accurately(ctx, terms, check_step=1): + s = ctx.zero + k = 0 + for term in terms(): + s += term + if (not k % check_step) and term: + if abs(term) <= 1e-18*abs(s): + break + k += 1 + return s diff --git a/.venv/lib/python3.11/site-packages/mpmath/ctx_iv.py b/.venv/lib/python3.11/site-packages/mpmath/ctx_iv.py new file mode 100644 index 0000000000000000000000000000000000000000..c038e00a5677e318d222b63c22d225e3045e1c2b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/ctx_iv.py @@ -0,0 +1,551 @@ +import operator + +from . import libmp + +from .libmp.backend import basestring + +from .libmp import ( + int_types, MPZ_ONE, + prec_to_dps, dps_to_prec, repr_dps, + round_floor, round_ceiling, + fzero, finf, fninf, fnan, + mpf_le, mpf_neg, + from_int, from_float, from_str, from_rational, + mpi_mid, mpi_delta, mpi_str, + mpi_abs, mpi_pos, mpi_neg, mpi_add, mpi_sub, + mpi_mul, mpi_div, mpi_pow_int, mpi_pow, + mpi_from_str, + mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow, + mpci_abs, mpci_pow, mpci_exp, mpci_log, + ComplexResult, + mpf_hash, mpc_hash) +from .matrices.matrices import _matrix + +mpi_zero = (fzero, fzero) + +from .ctx_base import StandardBaseContext + +new = object.__new__ + +def convert_mpf_(x, prec, rounding): + if hasattr(x, "_mpf_"): return x._mpf_ + if isinstance(x, int_types): return from_int(x, prec, rounding) + if isinstance(x, float): return from_float(x, prec, rounding) + if isinstance(x, basestring): return from_str(x, prec, rounding) + raise NotImplementedError + + +class ivmpf(object): + """ + Interval arithmetic class. Precision is controlled by iv.prec. + """ + + def __new__(cls, x=0): + return cls.ctx.convert(x) + + def cast(self, cls, f_convert): + a, b = self._mpi_ + if a == b: + return cls(f_convert(a)) + raise ValueError + + def __int__(self): + return self.cast(int, libmp.to_int) + + def __float__(self): + return self.cast(float, libmp.to_float) + + def __complex__(self): + return self.cast(complex, libmp.to_float) + + def __hash__(self): + a, b = self._mpi_ + if a == b: + return mpf_hash(a) + else: + return hash(self._mpi_) + + @property + def real(self): return self + + @property + def imag(self): return self.ctx.zero + + def conjugate(self): return self + + @property + def a(self): + a, b = self._mpi_ + return self.ctx.make_mpf((a, a)) + + @property + def b(self): + a, b = self._mpi_ + return self.ctx.make_mpf((b, b)) + + @property + def mid(self): + ctx = self.ctx + v = mpi_mid(self._mpi_, ctx.prec) + return ctx.make_mpf((v, v)) + + @property + def delta(self): + ctx = self.ctx + v = mpi_delta(self._mpi_, ctx.prec) + return ctx.make_mpf((v,v)) + + @property + def _mpci_(self): + return self._mpi_, mpi_zero + + def _compare(*args): + raise TypeError("no ordering relation is defined for intervals") + + __gt__ = _compare + __le__ = _compare + __gt__ = _compare + __ge__ = _compare + + def __contains__(self, t): + t = self.ctx.mpf(t) + return (self.a <= t.a) and (t.b <= self.b) + + def __str__(self): + return mpi_str(self._mpi_, self.ctx.prec) + + def __repr__(self): + if self.ctx.pretty: + return str(self) + a, b = self._mpi_ + n = repr_dps(self.ctx.prec) + a = libmp.to_str(a, n) + b = libmp.to_str(b, n) + return "mpi(%r, %r)" % (a, b) + + def _compare(s, t, cmpfun): + if not hasattr(t, "_mpi_"): + try: + t = s.ctx.convert(t) + except: + return NotImplemented + return cmpfun(s._mpi_, t._mpi_) + + def __eq__(s, t): return s._compare(t, libmp.mpi_eq) + def __ne__(s, t): return s._compare(t, libmp.mpi_ne) + def __lt__(s, t): return s._compare(t, libmp.mpi_lt) + def __le__(s, t): return s._compare(t, libmp.mpi_le) + def __gt__(s, t): return s._compare(t, libmp.mpi_gt) + def __ge__(s, t): return s._compare(t, libmp.mpi_ge) + + def __abs__(self): + return self.ctx.make_mpf(mpi_abs(self._mpi_, self.ctx.prec)) + def __pos__(self): + return self.ctx.make_mpf(mpi_pos(self._mpi_, self.ctx.prec)) + def __neg__(self): + return self.ctx.make_mpf(mpi_neg(self._mpi_, self.ctx.prec)) + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.ctx.almosteq(s, t, rel_eps, abs_eps) + +class ivmpc(object): + + def __new__(cls, re=0, im=0): + re = cls.ctx.convert(re) + im = cls.ctx.convert(im) + y = new(cls) + y._mpci_ = re._mpi_, im._mpi_ + return y + + def __hash__(self): + (a, b), (c,d) = self._mpci_ + if a == b and c == d: + return mpc_hash((a, c)) + else: + return hash(self._mpci_) + + def __repr__(s): + if s.ctx.pretty: + return str(s) + return "iv.mpc(%s, %s)" % (repr(s.real), repr(s.imag)) + + def __str__(s): + return "(%s + %s*j)" % (str(s.real), str(s.imag)) + + @property + def a(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((a, a)) + + @property + def b(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((b, b)) + + @property + def c(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((c, c)) + + @property + def d(self): + (a, b), (c,d) = self._mpci_ + return self.ctx.make_mpf((d, d)) + + @property + def real(s): + return s.ctx.make_mpf(s._mpci_[0]) + + @property + def imag(s): + return s.ctx.make_mpf(s._mpci_[1]) + + def conjugate(s): + a, b = s._mpci_ + return s.ctx.make_mpc((a, mpf_neg(b))) + + def overlap(s, t): + t = s.ctx.convert(t) + real_overlap = (s.a <= t.a <= s.b) or (s.a <= t.b <= s.b) or (t.a <= s.a <= t.b) or (t.a <= s.b <= t.b) + imag_overlap = (s.c <= t.c <= s.d) or (s.c <= t.d <= s.d) or (t.c <= s.c <= t.d) or (t.c <= s.d <= t.d) + return real_overlap and imag_overlap + + def __contains__(s, t): + t = s.ctx.convert(t) + return t.real in s.real and t.imag in s.imag + + def _compare(s, t, ne=False): + if not isinstance(t, s.ctx._types): + try: + t = s.ctx.convert(t) + except: + return NotImplemented + if hasattr(t, '_mpi_'): + tval = t._mpi_, mpi_zero + elif hasattr(t, '_mpci_'): + tval = t._mpci_ + if ne: + return s._mpci_ != tval + return s._mpci_ == tval + + def __eq__(s, t): return s._compare(t) + def __ne__(s, t): return s._compare(t, True) + + def __lt__(s, t): raise TypeError("complex intervals cannot be ordered") + __le__ = __gt__ = __ge__ = __lt__ + + def __neg__(s): return s.ctx.make_mpc(mpci_neg(s._mpci_, s.ctx.prec)) + def __pos__(s): return s.ctx.make_mpc(mpci_pos(s._mpci_, s.ctx.prec)) + def __abs__(s): return s.ctx.make_mpf(mpci_abs(s._mpci_, s.ctx.prec)) + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.ctx.almosteq(s, t, rel_eps, abs_eps) + +def _binary_op(f_real, f_complex): + def g_complex(ctx, sval, tval): + return ctx.make_mpc(f_complex(sval, tval, ctx.prec)) + def g_real(ctx, sval, tval): + try: + return ctx.make_mpf(f_real(sval, tval, ctx.prec)) + except ComplexResult: + sval = (sval, mpi_zero) + tval = (tval, mpi_zero) + return g_complex(ctx, sval, tval) + def lop_real(s, t): + if isinstance(t, _matrix): return NotImplemented + ctx = s.ctx + if not isinstance(t, ctx._types): t = ctx.convert(t) + if hasattr(t, "_mpi_"): return g_real(ctx, s._mpi_, t._mpi_) + if hasattr(t, "_mpci_"): return g_complex(ctx, (s._mpi_, mpi_zero), t._mpci_) + return NotImplemented + def rop_real(s, t): + ctx = s.ctx + if not isinstance(t, ctx._types): t = ctx.convert(t) + if hasattr(t, "_mpi_"): return g_real(ctx, t._mpi_, s._mpi_) + if hasattr(t, "_mpci_"): return g_complex(ctx, t._mpci_, (s._mpi_, mpi_zero)) + return NotImplemented + def lop_complex(s, t): + if isinstance(t, _matrix): return NotImplemented + ctx = s.ctx + if not isinstance(t, s.ctx._types): + try: + t = s.ctx.convert(t) + except (ValueError, TypeError): + return NotImplemented + return g_complex(ctx, s._mpci_, t._mpci_) + def rop_complex(s, t): + ctx = s.ctx + if not isinstance(t, s.ctx._types): + t = s.ctx.convert(t) + return g_complex(ctx, t._mpci_, s._mpci_) + return lop_real, rop_real, lop_complex, rop_complex + +ivmpf.__add__, ivmpf.__radd__, ivmpc.__add__, ivmpc.__radd__ = _binary_op(mpi_add, mpci_add) +ivmpf.__sub__, ivmpf.__rsub__, ivmpc.__sub__, ivmpc.__rsub__ = _binary_op(mpi_sub, mpci_sub) +ivmpf.__mul__, ivmpf.__rmul__, ivmpc.__mul__, ivmpc.__rmul__ = _binary_op(mpi_mul, mpci_mul) +ivmpf.__div__, ivmpf.__rdiv__, ivmpc.__div__, ivmpc.__rdiv__ = _binary_op(mpi_div, mpci_div) +ivmpf.__pow__, ivmpf.__rpow__, ivmpc.__pow__, ivmpc.__rpow__ = _binary_op(mpi_pow, mpci_pow) + +ivmpf.__truediv__ = ivmpf.__div__; ivmpf.__rtruediv__ = ivmpf.__rdiv__ +ivmpc.__truediv__ = ivmpc.__div__; ivmpc.__rtruediv__ = ivmpc.__rdiv__ + +class ivmpf_constant(ivmpf): + def __new__(cls, f): + self = new(cls) + self._f = f + return self + def _get_mpi_(self): + prec = self.ctx._prec[0] + a = self._f(prec, round_floor) + b = self._f(prec, round_ceiling) + return a, b + _mpi_ = property(_get_mpi_) + +class MPIntervalContext(StandardBaseContext): + + def __init__(ctx): + ctx.mpf = type('ivmpf', (ivmpf,), {}) + ctx.mpc = type('ivmpc', (ivmpc,), {}) + ctx._types = (ctx.mpf, ctx.mpc) + ctx._constant = type('ivmpf_constant', (ivmpf_constant,), {}) + ctx._prec = [53] + ctx._set_prec(53) + ctx._constant._ctxdata = ctx.mpf._ctxdata = ctx.mpc._ctxdata = [ctx.mpf, new, ctx._prec] + ctx._constant.ctx = ctx.mpf.ctx = ctx.mpc.ctx = ctx + ctx.pretty = False + StandardBaseContext.__init__(ctx) + ctx._init_builtins() + + def _mpi(ctx, a, b=None): + if b is None: + return ctx.mpf(a) + return ctx.mpf((a,b)) + + def _init_builtins(ctx): + ctx.one = ctx.mpf(1) + ctx.zero = ctx.mpf(0) + ctx.inf = ctx.mpf('inf') + ctx.ninf = -ctx.inf + ctx.nan = ctx.mpf('nan') + ctx.j = ctx.mpc(0,1) + ctx.exp = ctx._wrap_mpi_function(libmp.mpi_exp, libmp.mpci_exp) + ctx.sqrt = ctx._wrap_mpi_function(libmp.mpi_sqrt) + ctx.ln = ctx._wrap_mpi_function(libmp.mpi_log, libmp.mpci_log) + ctx.cos = ctx._wrap_mpi_function(libmp.mpi_cos, libmp.mpci_cos) + ctx.sin = ctx._wrap_mpi_function(libmp.mpi_sin, libmp.mpci_sin) + ctx.tan = ctx._wrap_mpi_function(libmp.mpi_tan) + ctx.gamma = ctx._wrap_mpi_function(libmp.mpi_gamma, libmp.mpci_gamma) + ctx.loggamma = ctx._wrap_mpi_function(libmp.mpi_loggamma, libmp.mpci_loggamma) + ctx.rgamma = ctx._wrap_mpi_function(libmp.mpi_rgamma, libmp.mpci_rgamma) + ctx.factorial = ctx._wrap_mpi_function(libmp.mpi_factorial, libmp.mpci_factorial) + ctx.fac = ctx.factorial + + ctx.eps = ctx._constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1)) + ctx.pi = ctx._constant(libmp.mpf_pi) + ctx.e = ctx._constant(libmp.mpf_e) + ctx.ln2 = ctx._constant(libmp.mpf_ln2) + ctx.ln10 = ctx._constant(libmp.mpf_ln10) + ctx.phi = ctx._constant(libmp.mpf_phi) + ctx.euler = ctx._constant(libmp.mpf_euler) + ctx.catalan = ctx._constant(libmp.mpf_catalan) + ctx.glaisher = ctx._constant(libmp.mpf_glaisher) + ctx.khinchin = ctx._constant(libmp.mpf_khinchin) + ctx.twinprime = ctx._constant(libmp.mpf_twinprime) + + def _wrap_mpi_function(ctx, f_real, f_complex=None): + def g(x, **kwargs): + if kwargs: + prec = kwargs.get('prec', ctx._prec[0]) + else: + prec = ctx._prec[0] + x = ctx.convert(x) + if hasattr(x, "_mpi_"): + return ctx.make_mpf(f_real(x._mpi_, prec)) + if hasattr(x, "_mpci_"): + return ctx.make_mpc(f_complex(x._mpci_, prec)) + raise ValueError + return g + + @classmethod + def _wrap_specfun(cls, name, f, wrap): + if wrap: + def f_wrapped(ctx, *args, **kwargs): + convert = ctx.convert + args = [convert(a) for a in args] + prec = ctx.prec + try: + ctx.prec += 10 + retval = f(ctx, *args, **kwargs) + finally: + ctx.prec = prec + return +retval + else: + f_wrapped = f + setattr(cls, name, f_wrapped) + + def _set_prec(ctx, n): + ctx._prec[0] = max(1, int(n)) + ctx._dps = prec_to_dps(n) + + def _set_dps(ctx, n): + ctx._prec[0] = dps_to_prec(n) + ctx._dps = max(1, int(n)) + + prec = property(lambda ctx: ctx._prec[0], _set_prec) + dps = property(lambda ctx: ctx._dps, _set_dps) + + def make_mpf(ctx, v): + a = new(ctx.mpf) + a._mpi_ = v + return a + + def make_mpc(ctx, v): + a = new(ctx.mpc) + a._mpci_ = v + return a + + def _mpq(ctx, pq): + p, q = pq + a = libmp.from_rational(p, q, ctx.prec, round_floor) + b = libmp.from_rational(p, q, ctx.prec, round_ceiling) + return ctx.make_mpf((a, b)) + + def convert(ctx, x): + if isinstance(x, (ctx.mpf, ctx.mpc)): + return x + if isinstance(x, ctx._constant): + return +x + if isinstance(x, complex) or hasattr(x, "_mpc_"): + re = ctx.convert(x.real) + im = ctx.convert(x.imag) + return ctx.mpc(re,im) + if isinstance(x, basestring): + v = mpi_from_str(x, ctx.prec) + return ctx.make_mpf(v) + if hasattr(x, "_mpi_"): + a, b = x._mpi_ + else: + try: + a, b = x + except (TypeError, ValueError): + a = b = x + if hasattr(a, "_mpi_"): + a = a._mpi_[0] + else: + a = convert_mpf_(a, ctx.prec, round_floor) + if hasattr(b, "_mpi_"): + b = b._mpi_[1] + else: + b = convert_mpf_(b, ctx.prec, round_ceiling) + if a == fnan or b == fnan: + a = fninf + b = finf + assert mpf_le(a, b), "endpoints must be properly ordered" + return ctx.make_mpf((a, b)) + + def nstr(ctx, x, n=5, **kwargs): + x = ctx.convert(x) + if hasattr(x, "_mpi_"): + return libmp.mpi_to_str(x._mpi_, n, **kwargs) + if hasattr(x, "_mpci_"): + re = libmp.mpi_to_str(x._mpci_[0], n, **kwargs) + im = libmp.mpi_to_str(x._mpci_[1], n, **kwargs) + return "(%s + %s*j)" % (re, im) + + def mag(ctx, x): + x = ctx.convert(x) + if isinstance(x, ctx.mpc): + return max(ctx.mag(x.real), ctx.mag(x.imag)) + 1 + a, b = libmp.mpi_abs(x._mpi_) + sign, man, exp, bc = b + if man: + return exp+bc + if b == fzero: + return ctx.ninf + if b == fnan: + return ctx.nan + return ctx.inf + + def isnan(ctx, x): + return False + + def isinf(ctx, x): + return x == ctx.inf + + def isint(ctx, x): + x = ctx.convert(x) + a, b = x._mpi_ + if a == b: + sign, man, exp, bc = a + if man: + return exp >= 0 + return a == fzero + return None + + def ldexp(ctx, x, n): + a, b = ctx.convert(x)._mpi_ + a = libmp.mpf_shift(a, n) + b = libmp.mpf_shift(b, n) + return ctx.make_mpf((a,b)) + + def absmin(ctx, x): + return abs(ctx.convert(x)).a + + def absmax(ctx, x): + return abs(ctx.convert(x)).b + + def atan2(ctx, y, x): + y = ctx.convert(y)._mpi_ + x = ctx.convert(x)._mpi_ + return ctx.make_mpf(libmp.mpi_atan2(y,x,ctx.prec)) + + def _convert_param(ctx, x): + if isinstance(x, libmp.int_types): + return x, 'Z' + if isinstance(x, tuple): + p, q = x + return (ctx.mpf(p) / ctx.mpf(q), 'R') + x = ctx.convert(x) + if isinstance(x, ctx.mpf): + return x, 'R' + if isinstance(x, ctx.mpc): + return x, 'C' + raise ValueError + + def _is_real_type(ctx, z): + return isinstance(z, ctx.mpf) or isinstance(z, int_types) + + def _is_complex_type(ctx, z): + return isinstance(z, ctx.mpc) + + def hypsum(ctx, p, q, types, coeffs, z, maxterms=6000, **kwargs): + coeffs = list(coeffs) + num = range(p) + den = range(p,p+q) + #tol = ctx.eps + s = t = ctx.one + k = 0 + while 1: + for i in num: t *= (coeffs[i]+k) + for i in den: t /= (coeffs[i]+k) + k += 1; t /= k; t *= z; s += t + if t == 0: + return s + #if abs(t) < tol: + # return s + if k > maxterms: + raise ctx.NoConvergence + + +# Register with "numbers" ABC +# We do not subclass, hence we do not use the @abstractmethod checks. While +# this is less invasive it may turn out that we do not actually support +# parts of the expected interfaces. See +# http://docs.python.org/2/library/numbers.html for list of abstract +# methods. +try: + import numbers + numbers.Complex.register(ivmpc) + numbers.Real.register(ivmpf) +except ImportError: + pass diff --git a/.venv/lib/python3.11/site-packages/mpmath/ctx_mp.py b/.venv/lib/python3.11/site-packages/mpmath/ctx_mp.py new file mode 100644 index 0000000000000000000000000000000000000000..93594dd44474a415c74e4b0beb83bd7012666c9d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/ctx_mp.py @@ -0,0 +1,1339 @@ +""" +This module defines the mpf, mpc classes, and standard functions for +operating with them. +""" +__docformat__ = 'plaintext' + +import functools + +import re + +from .ctx_base import StandardBaseContext + +from .libmp.backend import basestring, BACKEND + +from . import libmp + +from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps, + round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps, + ComplexResult, to_pickable, from_pickable, normalize, + from_int, from_float, from_str, to_int, to_float, to_str, + from_rational, from_man_exp, + fone, fzero, finf, fninf, fnan, + mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, + mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod, + mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge, + mpf_hash, mpf_rand, + mpf_sum, + bitcount, to_fixed, + mpc_to_str, + mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate, + mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf, + mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int, + mpc_mpf_div, + mpf_pow, + mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10, + mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin, + mpf_glaisher, mpf_twinprime, mpf_mertens, + int_types) + +from . import function_docs +from . import rational + +new = object.__new__ + +get_complex = re.compile(r'^\(?(?P[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?)??' + r'(?P[\+\-]?\d*(\.\d*)?(e[\+\-]?\d+)?j)?\)?$') + +if BACKEND == 'sage': + from sage.libs.mpmath.ext_main import Context as BaseMPContext + # pickle hack + import sage.libs.mpmath.ext_main as _mpf_module +else: + from .ctx_mp_python import PythonMPContext as BaseMPContext + from . import ctx_mp_python as _mpf_module + +from .ctx_mp_python import _mpf, _mpc, mpnumeric + +class MPContext(BaseMPContext, StandardBaseContext): + """ + Context for multiprecision arithmetic with a global precision. + """ + + def __init__(ctx): + BaseMPContext.__init__(ctx) + ctx.trap_complex = False + ctx.pretty = False + ctx.types = [ctx.mpf, ctx.mpc, ctx.constant] + ctx._mpq = rational.mpq + ctx.default() + StandardBaseContext.__init__(ctx) + + ctx.mpq = rational.mpq + ctx.init_builtins() + + ctx.hyp_summators = {} + + ctx._init_aliases() + + # XXX: automate + try: + ctx.bernoulli.im_func.func_doc = function_docs.bernoulli + ctx.primepi.im_func.func_doc = function_docs.primepi + ctx.psi.im_func.func_doc = function_docs.psi + ctx.atan2.im_func.func_doc = function_docs.atan2 + except AttributeError: + # python 3 + ctx.bernoulli.__func__.func_doc = function_docs.bernoulli + ctx.primepi.__func__.func_doc = function_docs.primepi + ctx.psi.__func__.func_doc = function_docs.psi + ctx.atan2.__func__.func_doc = function_docs.atan2 + + ctx.digamma.func_doc = function_docs.digamma + ctx.cospi.func_doc = function_docs.cospi + ctx.sinpi.func_doc = function_docs.sinpi + + def init_builtins(ctx): + + mpf = ctx.mpf + mpc = ctx.mpc + + # Exact constants + ctx.one = ctx.make_mpf(fone) + ctx.zero = ctx.make_mpf(fzero) + ctx.j = ctx.make_mpc((fzero,fone)) + ctx.inf = ctx.make_mpf(finf) + ctx.ninf = ctx.make_mpf(fninf) + ctx.nan = ctx.make_mpf(fnan) + + eps = ctx.constant(lambda prec, rnd: (0, MPZ_ONE, 1-prec, 1), + "epsilon of working precision", "eps") + ctx.eps = eps + + # Approximate constants + ctx.pi = ctx.constant(mpf_pi, "pi", "pi") + ctx.ln2 = ctx.constant(mpf_ln2, "ln(2)", "ln2") + ctx.ln10 = ctx.constant(mpf_ln10, "ln(10)", "ln10") + ctx.phi = ctx.constant(mpf_phi, "Golden ratio phi", "phi") + ctx.e = ctx.constant(mpf_e, "e = exp(1)", "e") + ctx.euler = ctx.constant(mpf_euler, "Euler's constant", "euler") + ctx.catalan = ctx.constant(mpf_catalan, "Catalan's constant", "catalan") + ctx.khinchin = ctx.constant(mpf_khinchin, "Khinchin's constant", "khinchin") + ctx.glaisher = ctx.constant(mpf_glaisher, "Glaisher's constant", "glaisher") + ctx.apery = ctx.constant(mpf_apery, "Apery's constant", "apery") + ctx.degree = ctx.constant(mpf_degree, "1 deg = pi / 180", "degree") + ctx.twinprime = ctx.constant(mpf_twinprime, "Twin prime constant", "twinprime") + ctx.mertens = ctx.constant(mpf_mertens, "Mertens' constant", "mertens") + + # Standard functions + ctx.sqrt = ctx._wrap_libmp_function(libmp.mpf_sqrt, libmp.mpc_sqrt) + ctx.cbrt = ctx._wrap_libmp_function(libmp.mpf_cbrt, libmp.mpc_cbrt) + ctx.ln = ctx._wrap_libmp_function(libmp.mpf_log, libmp.mpc_log) + ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan) + ctx.exp = ctx._wrap_libmp_function(libmp.mpf_exp, libmp.mpc_exp) + ctx.expj = ctx._wrap_libmp_function(libmp.mpf_expj, libmp.mpc_expj) + ctx.expjpi = ctx._wrap_libmp_function(libmp.mpf_expjpi, libmp.mpc_expjpi) + ctx.sin = ctx._wrap_libmp_function(libmp.mpf_sin, libmp.mpc_sin) + ctx.cos = ctx._wrap_libmp_function(libmp.mpf_cos, libmp.mpc_cos) + ctx.tan = ctx._wrap_libmp_function(libmp.mpf_tan, libmp.mpc_tan) + ctx.sinh = ctx._wrap_libmp_function(libmp.mpf_sinh, libmp.mpc_sinh) + ctx.cosh = ctx._wrap_libmp_function(libmp.mpf_cosh, libmp.mpc_cosh) + ctx.tanh = ctx._wrap_libmp_function(libmp.mpf_tanh, libmp.mpc_tanh) + ctx.asin = ctx._wrap_libmp_function(libmp.mpf_asin, libmp.mpc_asin) + ctx.acos = ctx._wrap_libmp_function(libmp.mpf_acos, libmp.mpc_acos) + ctx.atan = ctx._wrap_libmp_function(libmp.mpf_atan, libmp.mpc_atan) + ctx.asinh = ctx._wrap_libmp_function(libmp.mpf_asinh, libmp.mpc_asinh) + ctx.acosh = ctx._wrap_libmp_function(libmp.mpf_acosh, libmp.mpc_acosh) + ctx.atanh = ctx._wrap_libmp_function(libmp.mpf_atanh, libmp.mpc_atanh) + ctx.sinpi = ctx._wrap_libmp_function(libmp.mpf_sin_pi, libmp.mpc_sin_pi) + ctx.cospi = ctx._wrap_libmp_function(libmp.mpf_cos_pi, libmp.mpc_cos_pi) + ctx.floor = ctx._wrap_libmp_function(libmp.mpf_floor, libmp.mpc_floor) + ctx.ceil = ctx._wrap_libmp_function(libmp.mpf_ceil, libmp.mpc_ceil) + ctx.nint = ctx._wrap_libmp_function(libmp.mpf_nint, libmp.mpc_nint) + ctx.frac = ctx._wrap_libmp_function(libmp.mpf_frac, libmp.mpc_frac) + ctx.fib = ctx.fibonacci = ctx._wrap_libmp_function(libmp.mpf_fibonacci, libmp.mpc_fibonacci) + + ctx.gamma = ctx._wrap_libmp_function(libmp.mpf_gamma, libmp.mpc_gamma) + ctx.rgamma = ctx._wrap_libmp_function(libmp.mpf_rgamma, libmp.mpc_rgamma) + ctx.loggamma = ctx._wrap_libmp_function(libmp.mpf_loggamma, libmp.mpc_loggamma) + ctx.fac = ctx.factorial = ctx._wrap_libmp_function(libmp.mpf_factorial, libmp.mpc_factorial) + + ctx.digamma = ctx._wrap_libmp_function(libmp.mpf_psi0, libmp.mpc_psi0) + ctx.harmonic = ctx._wrap_libmp_function(libmp.mpf_harmonic, libmp.mpc_harmonic) + ctx.ei = ctx._wrap_libmp_function(libmp.mpf_ei, libmp.mpc_ei) + ctx.e1 = ctx._wrap_libmp_function(libmp.mpf_e1, libmp.mpc_e1) + ctx._ci = ctx._wrap_libmp_function(libmp.mpf_ci, libmp.mpc_ci) + ctx._si = ctx._wrap_libmp_function(libmp.mpf_si, libmp.mpc_si) + ctx.ellipk = ctx._wrap_libmp_function(libmp.mpf_ellipk, libmp.mpc_ellipk) + ctx._ellipe = ctx._wrap_libmp_function(libmp.mpf_ellipe, libmp.mpc_ellipe) + ctx.agm1 = ctx._wrap_libmp_function(libmp.mpf_agm1, libmp.mpc_agm1) + ctx._erf = ctx._wrap_libmp_function(libmp.mpf_erf, None) + ctx._erfc = ctx._wrap_libmp_function(libmp.mpf_erfc, None) + ctx._zeta = ctx._wrap_libmp_function(libmp.mpf_zeta, libmp.mpc_zeta) + ctx._altzeta = ctx._wrap_libmp_function(libmp.mpf_altzeta, libmp.mpc_altzeta) + + # Faster versions + ctx.sqrt = getattr(ctx, "_sage_sqrt", ctx.sqrt) + ctx.exp = getattr(ctx, "_sage_exp", ctx.exp) + ctx.ln = getattr(ctx, "_sage_ln", ctx.ln) + ctx.cos = getattr(ctx, "_sage_cos", ctx.cos) + ctx.sin = getattr(ctx, "_sage_sin", ctx.sin) + + def to_fixed(ctx, x, prec): + return x.to_fixed(prec) + + def hypot(ctx, x, y): + r""" + Computes the Euclidean norm of the vector `(x, y)`, equal + to `\sqrt{x^2 + y^2}`. Both `x` and `y` must be real.""" + x = ctx.convert(x) + y = ctx.convert(y) + return ctx.make_mpf(libmp.mpf_hypot(x._mpf_, y._mpf_, *ctx._prec_rounding)) + + def _gamma_upper_int(ctx, n, z): + n = int(ctx._re(n)) + if n == 0: + return ctx.e1(z) + if not hasattr(z, '_mpf_'): + raise NotImplementedError + prec, rounding = ctx._prec_rounding + real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding, gamma=True) + if imag is None: + return ctx.make_mpf(real) + else: + return ctx.make_mpc((real, imag)) + + def _expint_int(ctx, n, z): + n = int(n) + if n == 1: + return ctx.e1(z) + if not hasattr(z, '_mpf_'): + raise NotImplementedError + prec, rounding = ctx._prec_rounding + real, imag = libmp.mpf_expint(n, z._mpf_, prec, rounding) + if imag is None: + return ctx.make_mpf(real) + else: + return ctx.make_mpc((real, imag)) + + def _nthroot(ctx, x, n): + if hasattr(x, '_mpf_'): + try: + return ctx.make_mpf(libmp.mpf_nthroot(x._mpf_, n, *ctx._prec_rounding)) + except ComplexResult: + if ctx.trap_complex: + raise + x = (x._mpf_, libmp.fzero) + else: + x = x._mpc_ + return ctx.make_mpc(libmp.mpc_nthroot(x, n, *ctx._prec_rounding)) + + def _besselj(ctx, n, z): + prec, rounding = ctx._prec_rounding + if hasattr(z, '_mpf_'): + return ctx.make_mpf(libmp.mpf_besseljn(n, z._mpf_, prec, rounding)) + elif hasattr(z, '_mpc_'): + return ctx.make_mpc(libmp.mpc_besseljn(n, z._mpc_, prec, rounding)) + + def _agm(ctx, a, b=1): + prec, rounding = ctx._prec_rounding + if hasattr(a, '_mpf_') and hasattr(b, '_mpf_'): + try: + v = libmp.mpf_agm(a._mpf_, b._mpf_, prec, rounding) + return ctx.make_mpf(v) + except ComplexResult: + pass + if hasattr(a, '_mpf_'): a = (a._mpf_, libmp.fzero) + else: a = a._mpc_ + if hasattr(b, '_mpf_'): b = (b._mpf_, libmp.fzero) + else: b = b._mpc_ + return ctx.make_mpc(libmp.mpc_agm(a, b, prec, rounding)) + + def bernoulli(ctx, n): + return ctx.make_mpf(libmp.mpf_bernoulli(int(n), *ctx._prec_rounding)) + + def _zeta_int(ctx, n): + return ctx.make_mpf(libmp.mpf_zeta_int(int(n), *ctx._prec_rounding)) + + def atan2(ctx, y, x): + x = ctx.convert(x) + y = ctx.convert(y) + return ctx.make_mpf(libmp.mpf_atan2(y._mpf_, x._mpf_, *ctx._prec_rounding)) + + def psi(ctx, m, z): + z = ctx.convert(z) + m = int(m) + if ctx._is_real_type(z): + return ctx.make_mpf(libmp.mpf_psi(m, z._mpf_, *ctx._prec_rounding)) + else: + return ctx.make_mpc(libmp.mpc_psi(m, z._mpc_, *ctx._prec_rounding)) + + def cos_sin(ctx, x, **kwargs): + if type(x) not in ctx.types: + x = ctx.convert(x) + prec, rounding = ctx._parse_prec(kwargs) + if hasattr(x, '_mpf_'): + c, s = libmp.mpf_cos_sin(x._mpf_, prec, rounding) + return ctx.make_mpf(c), ctx.make_mpf(s) + elif hasattr(x, '_mpc_'): + c, s = libmp.mpc_cos_sin(x._mpc_, prec, rounding) + return ctx.make_mpc(c), ctx.make_mpc(s) + else: + return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs) + + def cospi_sinpi(ctx, x, **kwargs): + if type(x) not in ctx.types: + x = ctx.convert(x) + prec, rounding = ctx._parse_prec(kwargs) + if hasattr(x, '_mpf_'): + c, s = libmp.mpf_cos_sin_pi(x._mpf_, prec, rounding) + return ctx.make_mpf(c), ctx.make_mpf(s) + elif hasattr(x, '_mpc_'): + c, s = libmp.mpc_cos_sin_pi(x._mpc_, prec, rounding) + return ctx.make_mpc(c), ctx.make_mpc(s) + else: + return ctx.cos(x, **kwargs), ctx.sin(x, **kwargs) + + def clone(ctx): + """ + Create a copy of the context, with the same working precision. + """ + a = ctx.__class__() + a.prec = ctx.prec + return a + + # Several helper methods + # TODO: add more of these, make consistent, write docstrings, ... + + def _is_real_type(ctx, x): + if hasattr(x, '_mpc_') or type(x) is complex: + return False + return True + + def _is_complex_type(ctx, x): + if hasattr(x, '_mpc_') or type(x) is complex: + return True + return False + + def isnan(ctx, x): + """ + Return *True* if *x* is a NaN (not-a-number), or for a complex + number, whether either the real or complex part is NaN; + otherwise return *False*:: + + >>> from mpmath import * + >>> isnan(3.14) + False + >>> isnan(nan) + True + >>> isnan(mpc(3.14,2.72)) + False + >>> isnan(mpc(3.14,nan)) + True + + """ + if hasattr(x, "_mpf_"): + return x._mpf_ == fnan + if hasattr(x, "_mpc_"): + return fnan in x._mpc_ + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return False + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isnan(x) + raise TypeError("isnan() needs a number as input") + + def isfinite(ctx, x): + """ + Return *True* if *x* is a finite number, i.e. neither + an infinity or a NaN. + + >>> from mpmath import * + >>> isfinite(inf) + False + >>> isfinite(-inf) + False + >>> isfinite(3) + True + >>> isfinite(nan) + False + >>> isfinite(3+4j) + True + >>> isfinite(mpc(3,inf)) + False + >>> isfinite(mpc(nan,3)) + False + + """ + if ctx.isinf(x) or ctx.isnan(x): + return False + return True + + def isnpint(ctx, x): + """ + Determine if *x* is a nonpositive integer. + """ + if not x: + return True + if hasattr(x, '_mpf_'): + sign, man, exp, bc = x._mpf_ + return sign and exp >= 0 + if hasattr(x, '_mpc_'): + return not x.imag and ctx.isnpint(x.real) + if type(x) in int_types: + return x <= 0 + if isinstance(x, ctx.mpq): + p, q = x._mpq_ + if not p: + return True + return q == 1 and p <= 0 + return ctx.isnpint(ctx.convert(x)) + + def __str__(ctx): + lines = ["Mpmath settings:", + (" mp.prec = %s" % ctx.prec).ljust(30) + "[default: 53]", + (" mp.dps = %s" % ctx.dps).ljust(30) + "[default: 15]", + (" mp.trap_complex = %s" % ctx.trap_complex).ljust(30) + "[default: False]", + ] + return "\n".join(lines) + + @property + def _repr_digits(ctx): + return repr_dps(ctx._prec) + + @property + def _str_digits(ctx): + return ctx._dps + + def extraprec(ctx, n, normalize_output=False): + """ + The block + + with extraprec(n): + + + increases the precision n bits, executes , and then + restores the precision. + + extraprec(n)(f) returns a decorated version of the function f + that increases the working precision by n bits before execution, + and restores the parent precision afterwards. With + normalize_output=True, it rounds the return value to the parent + precision. + """ + return PrecisionManager(ctx, lambda p: p + n, None, normalize_output) + + def extradps(ctx, n, normalize_output=False): + """ + This function is analogous to extraprec (see documentation) + but changes the decimal precision instead of the number of bits. + """ + return PrecisionManager(ctx, None, lambda d: d + n, normalize_output) + + def workprec(ctx, n, normalize_output=False): + """ + The block + + with workprec(n): + + + sets the precision to n bits, executes , and then restores + the precision. + + workprec(n)(f) returns a decorated version of the function f + that sets the precision to n bits before execution, + and restores the precision afterwards. With normalize_output=True, + it rounds the return value to the parent precision. + """ + return PrecisionManager(ctx, lambda p: n, None, normalize_output) + + def workdps(ctx, n, normalize_output=False): + """ + This function is analogous to workprec (see documentation) + but changes the decimal precision instead of the number of bits. + """ + return PrecisionManager(ctx, None, lambda d: n, normalize_output) + + def autoprec(ctx, f, maxprec=None, catch=(), verbose=False): + r""" + Return a wrapped copy of *f* that repeatedly evaluates *f* + with increasing precision until the result converges to the + full precision used at the point of the call. + + This heuristically protects against rounding errors, at the cost of + roughly a 2x slowdown compared to manually setting the optimal + precision. This method can, however, easily be fooled if the results + from *f* depend "discontinuously" on the precision, for instance + if catastrophic cancellation can occur. Therefore, :func:`~mpmath.autoprec` + should be used judiciously. + + **Examples** + + Many functions are sensitive to perturbations of the input arguments. + If the arguments are decimal numbers, they may have to be converted + to binary at a much higher precision. If the amount of required + extra precision is unknown, :func:`~mpmath.autoprec` is convenient:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> mp.pretty = True + >>> besselj(5, 125 * 10**28) # Exact input + -8.03284785591801e-17 + >>> besselj(5, '1.25e30') # Bad + 7.12954868316652e-16 + >>> autoprec(besselj)(5, '1.25e30') # Good + -8.03284785591801e-17 + + The following fails to converge because `\sin(\pi) = 0` whereas all + finite-precision approximations of `\pi` give nonzero values:: + + >>> autoprec(sin)(pi) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: autoprec: prec increased to 2910 without convergence + + As the following example shows, :func:`~mpmath.autoprec` can protect against + cancellation, but is fooled by too severe cancellation:: + + >>> x = 1e-10 + >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x) + 1.00000008274037e-10 + 1.00000000005e-10 + 1.00000000005e-10 + >>> x = 1e-50 + >>> exp(x)-1; expm1(x); autoprec(lambda t: exp(t)-1)(x) + 0.0 + 1.0e-50 + 0.0 + + With *catch*, an exception or list of exceptions to intercept + may be specified. The raised exception is interpreted + as signaling insufficient precision. This permits, for example, + evaluating a function where a too low precision results in a + division by zero:: + + >>> f = lambda x: 1/(exp(x)-1) + >>> f(1e-30) + Traceback (most recent call last): + ... + ZeroDivisionError + >>> autoprec(f, catch=ZeroDivisionError)(1e-30) + 1.0e+30 + + + """ + def f_autoprec_wrapped(*args, **kwargs): + prec = ctx.prec + if maxprec is None: + maxprec2 = ctx._default_hyper_maxprec(prec) + else: + maxprec2 = maxprec + try: + ctx.prec = prec + 10 + try: + v1 = f(*args, **kwargs) + except catch: + v1 = ctx.nan + prec2 = prec + 20 + while 1: + ctx.prec = prec2 + try: + v2 = f(*args, **kwargs) + except catch: + v2 = ctx.nan + if v1 == v2: + break + err = ctx.mag(v2-v1) - ctx.mag(v2) + if err < (-prec): + break + if verbose: + print("autoprec: target=%s, prec=%s, accuracy=%s" \ + % (prec, prec2, -err)) + v1 = v2 + if prec2 >= maxprec2: + raise ctx.NoConvergence(\ + "autoprec: prec increased to %i without convergence"\ + % prec2) + prec2 += int(prec2*2) + prec2 = min(prec2, maxprec2) + finally: + ctx.prec = prec + return +v2 + return f_autoprec_wrapped + + def nstr(ctx, x, n=6, **kwargs): + """ + Convert an ``mpf`` or ``mpc`` to a decimal string literal with *n* + significant digits. The small default value for *n* is chosen to + make this function useful for printing collections of numbers + (lists, matrices, etc). + + If *x* is a list or tuple, :func:`~mpmath.nstr` is applied recursively + to each element. For unrecognized classes, :func:`~mpmath.nstr` + simply returns ``str(x)``. + + The companion function :func:`~mpmath.nprint` prints the result + instead of returning it. + + The keyword arguments *strip_zeros*, *min_fixed*, *max_fixed* + and *show_zero_exponent* are forwarded to :func:`~mpmath.libmp.to_str`. + + The number will be printed in fixed-point format if the position + of the leading digit is strictly between min_fixed + (default = min(-dps/3,-5)) and max_fixed (default = dps). + + To force fixed-point format always, set min_fixed = -inf, + max_fixed = +inf. To force floating-point format, set + min_fixed >= max_fixed. + + >>> from mpmath import * + >>> nstr([+pi, ldexp(1,-500)]) + '[3.14159, 3.05494e-151]' + >>> nprint([+pi, ldexp(1,-500)]) + [3.14159, 3.05494e-151] + >>> nstr(mpf("5e-10"), 5) + '5.0e-10' + >>> nstr(mpf("5e-10"), 5, strip_zeros=False) + '5.0000e-10' + >>> nstr(mpf("5e-10"), 5, strip_zeros=False, min_fixed=-11) + '0.00000000050000' + >>> nstr(mpf(0), 5, show_zero_exponent=True) + '0.0e+0' + + """ + if isinstance(x, list): + return "[%s]" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x)) + if isinstance(x, tuple): + return "(%s)" % (", ".join(ctx.nstr(c, n, **kwargs) for c in x)) + if hasattr(x, '_mpf_'): + return to_str(x._mpf_, n, **kwargs) + if hasattr(x, '_mpc_'): + return "(" + mpc_to_str(x._mpc_, n, **kwargs) + ")" + if isinstance(x, basestring): + return repr(x) + if isinstance(x, ctx.matrix): + return x.__nstr__(n, **kwargs) + return str(x) + + def _convert_fallback(ctx, x, strings): + if strings and isinstance(x, basestring): + if 'j' in x.lower(): + x = x.lower().replace(' ', '') + match = get_complex.match(x) + re = match.group('re') + if not re: + re = 0 + im = match.group('im').rstrip('j') + return ctx.mpc(ctx.convert(re), ctx.convert(im)) + if hasattr(x, "_mpi_"): + a, b = x._mpi_ + if a == b: + return ctx.make_mpf(a) + else: + raise ValueError("can only create mpf from zero-width interval") + raise TypeError("cannot create mpf from " + repr(x)) + + def mpmathify(ctx, *args, **kwargs): + return ctx.convert(*args, **kwargs) + + def _parse_prec(ctx, kwargs): + if kwargs: + if kwargs.get('exact'): + return 0, 'f' + prec, rounding = ctx._prec_rounding + if 'rounding' in kwargs: + rounding = kwargs['rounding'] + if 'prec' in kwargs: + prec = kwargs['prec'] + if prec == ctx.inf: + return 0, 'f' + else: + prec = int(prec) + elif 'dps' in kwargs: + dps = kwargs['dps'] + if dps == ctx.inf: + return 0, 'f' + prec = dps_to_prec(dps) + return prec, rounding + return ctx._prec_rounding + + _exact_overflow_msg = "the exact result does not fit in memory" + + _hypsum_msg = """hypsum() failed to converge to the requested %i bits of accuracy +using a working precision of %i bits. Try with a higher maxprec, +maxterms, or set zeroprec.""" + + def hypsum(ctx, p, q, flags, coeffs, z, accurate_small=True, **kwargs): + if hasattr(z, "_mpf_"): + key = p, q, flags, 'R' + v = z._mpf_ + elif hasattr(z, "_mpc_"): + key = p, q, flags, 'C' + v = z._mpc_ + if key not in ctx.hyp_summators: + ctx.hyp_summators[key] = libmp.make_hyp_summator(key)[1] + summator = ctx.hyp_summators[key] + prec = ctx.prec + maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(prec)) + extraprec = 50 + epsshift = 25 + # Jumps in magnitude occur when parameters are close to negative + # integers. We must ensure that these terms are included in + # the sum and added accurately + magnitude_check = {} + max_total_jump = 0 + for i, c in enumerate(coeffs): + if flags[i] == 'Z': + if i >= p and c <= 0: + ok = False + for ii, cc in enumerate(coeffs[:p]): + # Note: c <= cc or c < cc, depending on convention + if flags[ii] == 'Z' and cc <= 0 and c <= cc: + ok = True + if not ok: + raise ZeroDivisionError("pole in hypergeometric series") + continue + n, d = ctx.nint_distance(c) + n = -int(n) + d = -d + if i >= p and n >= 0 and d > 4: + if n in magnitude_check: + magnitude_check[n] += d + else: + magnitude_check[n] = d + extraprec = max(extraprec, d - prec + 60) + max_total_jump += abs(d) + while 1: + if extraprec > maxprec: + raise ValueError(ctx._hypsum_msg % (prec, prec+extraprec)) + wp = prec + extraprec + if magnitude_check: + mag_dict = dict((n,None) for n in magnitude_check) + else: + mag_dict = {} + zv, have_complex, magnitude = summator(coeffs, v, prec, wp, \ + epsshift, mag_dict, **kwargs) + cancel = -magnitude + jumps_resolved = True + if extraprec < max_total_jump: + for n in mag_dict.values(): + if (n is None) or (n < prec): + jumps_resolved = False + break + accurate = (cancel < extraprec-25-5 or not accurate_small) + if jumps_resolved: + if accurate: + break + # zero? + zeroprec = kwargs.get('zeroprec') + if zeroprec is not None: + if cancel > zeroprec: + if have_complex: + return ctx.mpc(0) + else: + return ctx.zero + + # Some near-singularities were not included, so increase + # precision and repeat until they are + extraprec *= 2 + # Possible workaround for bad roundoff in fixed-point arithmetic + epsshift += 5 + extraprec += 5 + + if type(zv) is tuple: + if have_complex: + return ctx.make_mpc(zv) + else: + return ctx.make_mpf(zv) + else: + return zv + + def ldexp(ctx, x, n): + r""" + Computes `x 2^n` efficiently. No rounding is performed. + The argument `x` must be a real floating-point number (or + possible to convert into one) and `n` must be a Python ``int``. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> ldexp(1, 10) + mpf('1024.0') + >>> ldexp(1, -3) + mpf('0.125') + + """ + x = ctx.convert(x) + return ctx.make_mpf(libmp.mpf_shift(x._mpf_, n)) + + def frexp(ctx, x): + r""" + Given a real number `x`, returns `(y, n)` with `y \in [0.5, 1)`, + `n` a Python integer, and such that `x = y 2^n`. No rounding is + performed. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> frexp(7.5) + (mpf('0.9375'), 3) + + """ + x = ctx.convert(x) + y, n = libmp.mpf_frexp(x._mpf_) + return ctx.make_mpf(y), n + + def fneg(ctx, x, **kwargs): + """ + Negates the number *x*, giving a floating-point result, optionally + using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + An mpmath number is returned:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fneg(2.5) + mpf('-2.5') + >>> fneg(-5+2j) + mpc(real='5.0', imag='-2.0') + + Precise control over rounding is possible:: + + >>> x = fadd(2, 1e-100, exact=True) + >>> fneg(x) + mpf('-2.0') + >>> fneg(x, rounding='f') + mpf('-2.0000000000000004') + + Negating with and without roundoff:: + + >>> n = 200000000000000000000001 + >>> print(int(-mpf(n))) + -200000000000000016777216 + >>> print(int(fneg(n))) + -200000000000000016777216 + >>> print(int(fneg(n, prec=log(n,2)+1))) + -200000000000000000000001 + >>> print(int(fneg(n, dps=log(n,10)+1))) + -200000000000000000000001 + >>> print(int(fneg(n, prec=inf))) + -200000000000000000000001 + >>> print(int(fneg(n, dps=inf))) + -200000000000000000000001 + >>> print(int(fneg(n, exact=True))) + -200000000000000000000001 + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + if hasattr(x, '_mpf_'): + return ctx.make_mpf(mpf_neg(x._mpf_, prec, rounding)) + if hasattr(x, '_mpc_'): + return ctx.make_mpc(mpc_neg(x._mpc_, prec, rounding)) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fadd(ctx, x, y, **kwargs): + """ + Adds the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + The default precision is the working precision of the context. + You can specify a custom precision in bits by passing the *prec* keyword + argument, or by providing an equivalent decimal precision with the *dps* + keyword argument. If the precision is set to ``+inf``, or if the flag + *exact=True* is passed, an exact addition with no rounding is performed. + + When the precision is finite, the optional *rounding* keyword argument + specifies the direction of rounding. Valid options are ``'n'`` for + nearest (default), ``'f'`` for floor, ``'c'`` for ceiling, ``'d'`` + for down, ``'u'`` for up. + + **Examples** + + Using :func:`~mpmath.fadd` with precision and rounding control:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fadd(2, 1e-20) + mpf('2.0') + >>> fadd(2, 1e-20, rounding='u') + mpf('2.0000000000000004') + >>> nprint(fadd(2, 1e-20, prec=100), 25) + 2.00000000000000000001 + >>> nprint(fadd(2, 1e-20, dps=15), 25) + 2.0 + >>> nprint(fadd(2, 1e-20, dps=25), 25) + 2.00000000000000000001 + >>> nprint(fadd(2, 1e-20, exact=True), 25) + 2.00000000000000000001 + + Exact addition avoids cancellation errors, enforcing familiar laws + of numbers such as `x+y-x = y`, which don't hold in floating-point + arithmetic with finite precision:: + + >>> x, y = mpf(2), mpf('1e-1000') + >>> print(x + y - x) + 0.0 + >>> print(fadd(x, y, prec=inf) - x) + 1.0e-1000 + >>> print(fadd(x, y, exact=True) - x) + 1.0e-1000 + + Exact addition can be inefficient and may be impossible to perform + with large magnitude differences:: + + >>> fadd(1, '1e-100000000000000000000', prec=inf) + Traceback (most recent call last): + ... + OverflowError: the exact result does not fit in memory + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + y = ctx.convert(y) + try: + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_add(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_add_mpf(y._mpc_, x._mpf_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_add_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_add(x._mpc_, y._mpc_, prec, rounding)) + except (ValueError, OverflowError): + raise OverflowError(ctx._exact_overflow_msg) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fsub(ctx, x, y, **kwargs): + """ + Subtracts the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + Using :func:`~mpmath.fsub` with precision and rounding control:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fsub(2, 1e-20) + mpf('2.0') + >>> fsub(2, 1e-20, rounding='d') + mpf('1.9999999999999998') + >>> nprint(fsub(2, 1e-20, prec=100), 25) + 1.99999999999999999999 + >>> nprint(fsub(2, 1e-20, dps=15), 25) + 2.0 + >>> nprint(fsub(2, 1e-20, dps=25), 25) + 1.99999999999999999999 + >>> nprint(fsub(2, 1e-20, exact=True), 25) + 1.99999999999999999999 + + Exact subtraction avoids cancellation errors, enforcing familiar laws + of numbers such as `x-y+y = x`, which don't hold in floating-point + arithmetic with finite precision:: + + >>> x, y = mpf(2), mpf('1e1000') + >>> print(x - y + y) + 0.0 + >>> print(fsub(x, y, prec=inf) + y) + 2.0 + >>> print(fsub(x, y, exact=True) + y) + 2.0 + + Exact addition can be inefficient and may be impossible to perform + with large magnitude differences:: + + >>> fsub(1, '1e-100000000000000000000', prec=inf) + Traceback (most recent call last): + ... + OverflowError: the exact result does not fit in memory + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + y = ctx.convert(y) + try: + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_sub(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_sub((x._mpf_, fzero), y._mpc_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_sub_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_sub(x._mpc_, y._mpc_, prec, rounding)) + except (ValueError, OverflowError): + raise OverflowError(ctx._exact_overflow_msg) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fmul(ctx, x, y, **kwargs): + """ + Multiplies the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + The result is an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fmul(2, 5.0) + mpf('10.0') + >>> fmul(0.5j, 0.5) + mpc(real='0.0', imag='0.25') + + Avoiding roundoff:: + + >>> x, y = 10**10+1, 10**15+1 + >>> print(x*y) + 10000000001000010000000001 + >>> print(mpf(x) * mpf(y)) + 1.0000000001e+25 + >>> print(int(mpf(x) * mpf(y))) + 10000000001000011026399232 + >>> print(int(fmul(x, y))) + 10000000001000011026399232 + >>> print(int(fmul(x, y, dps=25))) + 10000000001000010000000001 + >>> print(int(fmul(x, y, exact=True))) + 10000000001000010000000001 + + Exact multiplication with complex numbers can be inefficient and may + be impossible to perform with large magnitude differences between + real and imaginary parts:: + + >>> x = 1+2j + >>> y = mpc(2, '1e-100000000000000000000') + >>> fmul(x, y) + mpc(real='2.0', imag='4.0') + >>> fmul(x, y, rounding='u') + mpc(real='2.0', imag='4.0000000000000009') + >>> fmul(x, y, exact=True) + Traceback (most recent call last): + ... + OverflowError: the exact result does not fit in memory + + """ + prec, rounding = ctx._parse_prec(kwargs) + x = ctx.convert(x) + y = ctx.convert(y) + try: + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_mul(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_mul_mpf(y._mpc_, x._mpf_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_mul_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_mul(x._mpc_, y._mpc_, prec, rounding)) + except (ValueError, OverflowError): + raise OverflowError(ctx._exact_overflow_msg) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def fdiv(ctx, x, y, **kwargs): + """ + Divides the numbers *x* and *y*, giving a floating-point result, + optionally using a custom precision and rounding mode. + + See the documentation of :func:`~mpmath.fadd` for a detailed description + of how to specify precision and rounding. + + **Examples** + + The result is an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fdiv(3, 2) + mpf('1.5') + >>> fdiv(2, 3) + mpf('0.66666666666666663') + >>> fdiv(2+4j, 0.5) + mpc(real='4.0', imag='8.0') + + The rounding direction and precision can be controlled:: + + >>> fdiv(2, 3, dps=3) # Should be accurate to at least 3 digits + mpf('0.6666259765625') + >>> fdiv(2, 3, rounding='d') + mpf('0.66666666666666663') + >>> fdiv(2, 3, prec=60) + mpf('0.66666666666666667') + >>> fdiv(2, 3, rounding='u') + mpf('0.66666666666666674') + + Checking the error of a division by performing it at higher precision:: + + >>> fdiv(2, 3) - fdiv(2, 3, prec=100) + mpf('-3.7007434154172148e-17') + + Unlike :func:`~mpmath.fadd`, :func:`~mpmath.fmul`, etc., exact division is not + allowed since the quotient of two floating-point numbers generally + does not have an exact floating-point representation. (In the + future this might be changed to allow the case where the division + is actually exact.) + + >>> fdiv(2, 3, exact=True) + Traceback (most recent call last): + ... + ValueError: division is not an exact operation + + """ + prec, rounding = ctx._parse_prec(kwargs) + if not prec: + raise ValueError("division is not an exact operation") + x = ctx.convert(x) + y = ctx.convert(y) + if hasattr(x, '_mpf_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpf(mpf_div(x._mpf_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_div((x._mpf_, fzero), y._mpc_, prec, rounding)) + if hasattr(x, '_mpc_'): + if hasattr(y, '_mpf_'): + return ctx.make_mpc(mpc_div_mpf(x._mpc_, y._mpf_, prec, rounding)) + if hasattr(y, '_mpc_'): + return ctx.make_mpc(mpc_div(x._mpc_, y._mpc_, prec, rounding)) + raise ValueError("Arguments need to be mpf or mpc compatible numbers") + + def nint_distance(ctx, x): + r""" + Return `(n,d)` where `n` is the nearest integer to `x` and `d` is + an estimate of `\log_2(|x-n|)`. If `d < 0`, `-d` gives the precision + (measured in bits) lost to cancellation when computing `x-n`. + + >>> from mpmath import * + >>> n, d = nint_distance(5) + >>> print(n); print(d) + 5 + -inf + >>> n, d = nint_distance(mpf(5)) + >>> print(n); print(d) + 5 + -inf + >>> n, d = nint_distance(mpf(5.00000001)) + >>> print(n); print(d) + 5 + -26 + >>> n, d = nint_distance(mpf(4.99999999)) + >>> print(n); print(d) + 5 + -26 + >>> n, d = nint_distance(mpc(5,10)) + >>> print(n); print(d) + 5 + 4 + >>> n, d = nint_distance(mpc(5,0.000001)) + >>> print(n); print(d) + 5 + -19 + + """ + typx = type(x) + if typx in int_types: + return int(x), ctx.ninf + elif typx is rational.mpq: + p, q = x._mpq_ + n, r = divmod(p, q) + if 2*r >= q: + n += 1 + elif not r: + return n, ctx.ninf + # log(p/q-n) = log((p-nq)/q) = log(p-nq) - log(q) + d = bitcount(abs(p-n*q)) - bitcount(q) + return n, d + if hasattr(x, "_mpf_"): + re = x._mpf_ + im_dist = ctx.ninf + elif hasattr(x, "_mpc_"): + re, im = x._mpc_ + isign, iman, iexp, ibc = im + if iman: + im_dist = iexp + ibc + elif im == fzero: + im_dist = ctx.ninf + else: + raise ValueError("requires a finite number") + else: + x = ctx.convert(x) + if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"): + return ctx.nint_distance(x) + else: + raise TypeError("requires an mpf/mpc") + sign, man, exp, bc = re + mag = exp+bc + # |x| < 0.5 + if mag < 0: + n = 0 + re_dist = mag + elif man: + # exact integer + if exp >= 0: + n = man << exp + re_dist = ctx.ninf + # exact half-integer + elif exp == -1: + n = (man>>1)+1 + re_dist = 0 + else: + d = (-exp-1) + t = man >> d + if t & 1: + t += 1 + man = (t<>1 # int(t)>>1 + re_dist = exp+bitcount(man) + if sign: + n = -n + elif re == fzero: + re_dist = ctx.ninf + n = 0 + else: + raise ValueError("requires a finite number") + return n, max(re_dist, im_dist) + + def fprod(ctx, factors): + r""" + Calculates a product containing a finite number of factors (for + infinite products, see :func:`~mpmath.nprod`). The factors will be + converted to mpmath numbers. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fprod([1, 2, 0.5, 7]) + mpf('7.0') + + """ + orig = ctx.prec + try: + v = ctx.one + for p in factors: + v *= p + finally: + ctx.prec = orig + return +v + + def rand(ctx): + """ + Returns an ``mpf`` with value chosen randomly from `[0, 1)`. + The number of randomly generated bits in the mantissa is equal + to the working precision. + """ + return ctx.make_mpf(mpf_rand(ctx._prec)) + + def fraction(ctx, p, q): + """ + Given Python integers `(p, q)`, returns a lazy ``mpf`` representing + the fraction `p/q`. The value is updated with the precision. + + >>> from mpmath import * + >>> mp.dps = 15 + >>> a = fraction(1,100) + >>> b = mpf(1)/100 + >>> print(a); print(b) + 0.01 + 0.01 + >>> mp.dps = 30 + >>> print(a); print(b) # a will be accurate + 0.01 + 0.0100000000000000002081668171172 + >>> mp.dps = 15 + """ + return ctx.constant(lambda prec, rnd: from_rational(p, q, prec, rnd), + '%s/%s' % (p, q)) + + def absmin(ctx, x): + return abs(ctx.convert(x)) + + def absmax(ctx, x): + return abs(ctx.convert(x)) + + def _as_points(ctx, x): + # XXX: remove this? + if hasattr(x, '_mpi_'): + a, b = x._mpi_ + return [ctx.make_mpf(a), ctx.make_mpf(b)] + return x + + ''' + def _zetasum(ctx, s, a, b): + """ + Computes sum of k^(-s) for k = a, a+1, ..., b with a, b both small + integers. + """ + a = int(a) + b = int(b) + s = ctx.convert(s) + prec, rounding = ctx._prec_rounding + if hasattr(s, '_mpf_'): + v = ctx.make_mpf(libmp.mpf_zetasum(s._mpf_, a, b, prec)) + elif hasattr(s, '_mpc_'): + v = ctx.make_mpc(libmp.mpc_zetasum(s._mpc_, a, b, prec)) + return v + ''' + + def _zetasum_fast(ctx, s, a, n, derivatives=[0], reflect=False): + if not (ctx.isint(a) and hasattr(s, "_mpc_")): + raise NotImplementedError + a = int(a) + prec = ctx._prec + xs, ys = libmp.mpc_zetasum(s._mpc_, a, n, derivatives, reflect, prec) + xs = [ctx.make_mpc(x) for x in xs] + ys = [ctx.make_mpc(y) for y in ys] + return xs, ys + +class PrecisionManager: + def __init__(self, ctx, precfun, dpsfun, normalize_output=False): + self.ctx = ctx + self.precfun = precfun + self.dpsfun = dpsfun + self.normalize_output = normalize_output + def __call__(self, f): + @functools.wraps(f) + def g(*args, **kwargs): + orig = self.ctx.prec + try: + if self.precfun: + self.ctx.prec = self.precfun(self.ctx.prec) + else: + self.ctx.dps = self.dpsfun(self.ctx.dps) + if self.normalize_output: + v = f(*args, **kwargs) + if type(v) is tuple: + return tuple([+a for a in v]) + return +v + else: + return f(*args, **kwargs) + finally: + self.ctx.prec = orig + return g + def __enter__(self): + self.origp = self.ctx.prec + if self.precfun: + self.ctx.prec = self.precfun(self.ctx.prec) + else: + self.ctx.dps = self.dpsfun(self.ctx.dps) + def __exit__(self, exc_type, exc_val, exc_tb): + self.ctx.prec = self.origp + return False + + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/.venv/lib/python3.11/site-packages/mpmath/ctx_mp_python.py b/.venv/lib/python3.11/site-packages/mpmath/ctx_mp_python.py new file mode 100644 index 0000000000000000000000000000000000000000..cfbd72fb8300bf840069c38529b7b41418d26eeb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/ctx_mp_python.py @@ -0,0 +1,1149 @@ +#from ctx_base import StandardBaseContext + +from .libmp.backend import basestring, exec_ + +from .libmp import (MPZ, MPZ_ZERO, MPZ_ONE, int_types, repr_dps, + round_floor, round_ceiling, dps_to_prec, round_nearest, prec_to_dps, + ComplexResult, to_pickable, from_pickable, normalize, + from_int, from_float, from_npfloat, from_Decimal, from_str, to_int, to_float, to_str, + from_rational, from_man_exp, + fone, fzero, finf, fninf, fnan, + mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, mpf_mul_int, + mpf_div, mpf_rdiv_int, mpf_pow_int, mpf_mod, + mpf_eq, mpf_cmp, mpf_lt, mpf_gt, mpf_le, mpf_ge, + mpf_hash, mpf_rand, + mpf_sum, + bitcount, to_fixed, + mpc_to_str, + mpc_to_complex, mpc_hash, mpc_pos, mpc_is_nonzero, mpc_neg, mpc_conjugate, + mpc_abs, mpc_add, mpc_add_mpf, mpc_sub, mpc_sub_mpf, mpc_mul, mpc_mul_mpf, + mpc_mul_int, mpc_div, mpc_div_mpf, mpc_pow, mpc_pow_mpf, mpc_pow_int, + mpc_mpf_div, + mpf_pow, + mpf_pi, mpf_degree, mpf_e, mpf_phi, mpf_ln2, mpf_ln10, + mpf_euler, mpf_catalan, mpf_apery, mpf_khinchin, + mpf_glaisher, mpf_twinprime, mpf_mertens, + int_types) + +from . import rational +from . import function_docs + +new = object.__new__ + +class mpnumeric(object): + """Base class for mpf and mpc.""" + __slots__ = [] + def __new__(cls, val): + raise NotImplementedError + +class _mpf(mpnumeric): + """ + An mpf instance holds a real-valued floating-point number. mpf:s + work analogously to Python floats, but support arbitrary-precision + arithmetic. + """ + __slots__ = ['_mpf_'] + + def __new__(cls, val=fzero, **kwargs): + """A new mpf can be created from a Python float, an int, a + or a decimal string representing a number in floating-point + format.""" + prec, rounding = cls.context._prec_rounding + if kwargs: + prec = kwargs.get('prec', prec) + if 'dps' in kwargs: + prec = dps_to_prec(kwargs['dps']) + rounding = kwargs.get('rounding', rounding) + if type(val) is cls: + sign, man, exp, bc = val._mpf_ + if (not man) and exp: + return val + v = new(cls) + v._mpf_ = normalize(sign, man, exp, bc, prec, rounding) + return v + elif type(val) is tuple: + if len(val) == 2: + v = new(cls) + v._mpf_ = from_man_exp(val[0], val[1], prec, rounding) + return v + if len(val) == 4: + if val not in (finf, fninf, fnan): + sign, man, exp, bc = val + val = normalize(sign, MPZ(man), exp, bc, prec, rounding) + v = new(cls) + v._mpf_ = val + return v + raise ValueError + else: + v = new(cls) + v._mpf_ = mpf_pos(cls.mpf_convert_arg(val, prec, rounding), prec, rounding) + return v + + @classmethod + def mpf_convert_arg(cls, x, prec, rounding): + if isinstance(x, int_types): return from_int(x) + if isinstance(x, float): return from_float(x) + if isinstance(x, basestring): return from_str(x, prec, rounding) + if isinstance(x, cls.context.constant): return x.func(prec, rounding) + if hasattr(x, '_mpf_'): return x._mpf_ + if hasattr(x, '_mpmath_'): + t = cls.context.convert(x._mpmath_(prec, rounding)) + if hasattr(t, '_mpf_'): + return t._mpf_ + if hasattr(x, '_mpi_'): + a, b = x._mpi_ + if a == b: + return a + raise ValueError("can only create mpf from zero-width interval") + raise TypeError("cannot create mpf from " + repr(x)) + + @classmethod + def mpf_convert_rhs(cls, x): + if isinstance(x, int_types): return from_int(x) + if isinstance(x, float): return from_float(x) + if isinstance(x, complex_types): return cls.context.mpc(x) + if isinstance(x, rational.mpq): + p, q = x._mpq_ + return from_rational(p, q, cls.context.prec) + if hasattr(x, '_mpf_'): return x._mpf_ + if hasattr(x, '_mpmath_'): + t = cls.context.convert(x._mpmath_(*cls.context._prec_rounding)) + if hasattr(t, '_mpf_'): + return t._mpf_ + return t + return NotImplemented + + @classmethod + def mpf_convert_lhs(cls, x): + x = cls.mpf_convert_rhs(x) + if type(x) is tuple: + return cls.context.make_mpf(x) + return x + + man_exp = property(lambda self: self._mpf_[1:3]) + man = property(lambda self: self._mpf_[1]) + exp = property(lambda self: self._mpf_[2]) + bc = property(lambda self: self._mpf_[3]) + + real = property(lambda self: self) + imag = property(lambda self: self.context.zero) + + conjugate = lambda self: self + + def __getstate__(self): return to_pickable(self._mpf_) + def __setstate__(self, val): self._mpf_ = from_pickable(val) + + def __repr__(s): + if s.context.pretty: + return str(s) + return "mpf('%s')" % to_str(s._mpf_, s.context._repr_digits) + + def __str__(s): return to_str(s._mpf_, s.context._str_digits) + def __hash__(s): return mpf_hash(s._mpf_) + def __int__(s): return int(to_int(s._mpf_)) + def __long__(s): return long(to_int(s._mpf_)) + def __float__(s): return to_float(s._mpf_, rnd=s.context._prec_rounding[1]) + def __complex__(s): return complex(float(s)) + def __nonzero__(s): return s._mpf_ != fzero + + __bool__ = __nonzero__ + + def __abs__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpf_ = mpf_abs(s._mpf_, prec, rounding) + return v + + def __pos__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpf_ = mpf_pos(s._mpf_, prec, rounding) + return v + + def __neg__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpf_ = mpf_neg(s._mpf_, prec, rounding) + return v + + def _cmp(s, t, func): + if hasattr(t, '_mpf_'): + t = t._mpf_ + else: + t = s.mpf_convert_rhs(t) + if t is NotImplemented: + return t + return func(s._mpf_, t) + + def __cmp__(s, t): return s._cmp(t, mpf_cmp) + def __lt__(s, t): return s._cmp(t, mpf_lt) + def __gt__(s, t): return s._cmp(t, mpf_gt) + def __le__(s, t): return s._cmp(t, mpf_le) + def __ge__(s, t): return s._cmp(t, mpf_ge) + + def __ne__(s, t): + v = s.__eq__(t) + if v is NotImplemented: + return v + return not v + + def __rsub__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if type(t) in int_types: + v = new(cls) + v._mpf_ = mpf_sub(from_int(t), s._mpf_, prec, rounding) + return v + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t - s + + def __rdiv__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if isinstance(t, int_types): + v = new(cls) + v._mpf_ = mpf_rdiv_int(t, s._mpf_, prec, rounding) + return v + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t / s + + def __rpow__(s, t): + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t ** s + + def __rmod__(s, t): + t = s.mpf_convert_lhs(t) + if t is NotImplemented: + return t + return t % s + + def sqrt(s): + return s.context.sqrt(s) + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.context.almosteq(s, t, rel_eps, abs_eps) + + def to_fixed(self, prec): + return to_fixed(self._mpf_, prec) + + def __round__(self, *args): + return round(float(self), *args) + +mpf_binary_op = """ +def %NAME%(self, other): + mpf, new, (prec, rounding) = self._ctxdata + sval = self._mpf_ + if hasattr(other, '_mpf_'): + tval = other._mpf_ + %WITH_MPF% + ttype = type(other) + if ttype in int_types: + %WITH_INT% + elif ttype is float: + tval = from_float(other) + %WITH_MPF% + elif hasattr(other, '_mpc_'): + tval = other._mpc_ + mpc = type(other) + %WITH_MPC% + elif ttype is complex: + tval = from_float(other.real), from_float(other.imag) + mpc = self.context.mpc + %WITH_MPC% + if isinstance(other, mpnumeric): + return NotImplemented + try: + other = mpf.context.convert(other, strings=False) + except TypeError: + return NotImplemented + return self.%NAME%(other) +""" + +return_mpf = "; obj = new(mpf); obj._mpf_ = val; return obj" +return_mpc = "; obj = new(mpc); obj._mpc_ = val; return obj" + +mpf_pow_same = """ + try: + val = mpf_pow(sval, tval, prec, rounding) %s + except ComplexResult: + if mpf.context.trap_complex: + raise + mpc = mpf.context.mpc + val = mpc_pow((sval, fzero), (tval, fzero), prec, rounding) %s +""" % (return_mpf, return_mpc) + +def binary_op(name, with_mpf='', with_int='', with_mpc=''): + code = mpf_binary_op + code = code.replace("%WITH_INT%", with_int) + code = code.replace("%WITH_MPC%", with_mpc) + code = code.replace("%WITH_MPF%", with_mpf) + code = code.replace("%NAME%", name) + np = {} + exec_(code, globals(), np) + return np[name] + +_mpf.__eq__ = binary_op('__eq__', + 'return mpf_eq(sval, tval)', + 'return mpf_eq(sval, from_int(other))', + 'return (tval[1] == fzero) and mpf_eq(tval[0], sval)') + +_mpf.__add__ = binary_op('__add__', + 'val = mpf_add(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_add(sval, from_int(other), prec, rounding)' + return_mpf, + 'val = mpc_add_mpf(tval, sval, prec, rounding)' + return_mpc) + +_mpf.__sub__ = binary_op('__sub__', + 'val = mpf_sub(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_sub(sval, from_int(other), prec, rounding)' + return_mpf, + 'val = mpc_sub((sval, fzero), tval, prec, rounding)' + return_mpc) + +_mpf.__mul__ = binary_op('__mul__', + 'val = mpf_mul(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_mul_int(sval, other, prec, rounding)' + return_mpf, + 'val = mpc_mul_mpf(tval, sval, prec, rounding)' + return_mpc) + +_mpf.__div__ = binary_op('__div__', + 'val = mpf_div(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_div(sval, from_int(other), prec, rounding)' + return_mpf, + 'val = mpc_mpf_div(sval, tval, prec, rounding)' + return_mpc) + +_mpf.__mod__ = binary_op('__mod__', + 'val = mpf_mod(sval, tval, prec, rounding)' + return_mpf, + 'val = mpf_mod(sval, from_int(other), prec, rounding)' + return_mpf, + 'raise NotImplementedError("complex modulo")') + +_mpf.__pow__ = binary_op('__pow__', + mpf_pow_same, + 'val = mpf_pow_int(sval, other, prec, rounding)' + return_mpf, + 'val = mpc_pow((sval, fzero), tval, prec, rounding)' + return_mpc) + +_mpf.__radd__ = _mpf.__add__ +_mpf.__rmul__ = _mpf.__mul__ +_mpf.__truediv__ = _mpf.__div__ +_mpf.__rtruediv__ = _mpf.__rdiv__ + + +class _constant(_mpf): + """Represents a mathematical constant with dynamic precision. + When printed or used in an arithmetic operation, a constant + is converted to a regular mpf at the working precision. A + regular mpf can also be obtained using the operation +x.""" + + def __new__(cls, func, name, docname=''): + a = object.__new__(cls) + a.name = name + a.func = func + a.__doc__ = getattr(function_docs, docname, '') + return a + + def __call__(self, prec=None, dps=None, rounding=None): + prec2, rounding2 = self.context._prec_rounding + if not prec: prec = prec2 + if not rounding: rounding = rounding2 + if dps: prec = dps_to_prec(dps) + return self.context.make_mpf(self.func(prec, rounding)) + + @property + def _mpf_(self): + prec, rounding = self.context._prec_rounding + return self.func(prec, rounding) + + def __repr__(self): + return "<%s: %s~>" % (self.name, self.context.nstr(self(dps=15))) + + +class _mpc(mpnumeric): + """ + An mpc represents a complex number using a pair of mpf:s (one + for the real part and another for the imaginary part.) The mpc + class behaves fairly similarly to Python's complex type. + """ + + __slots__ = ['_mpc_'] + + def __new__(cls, real=0, imag=0): + s = object.__new__(cls) + if isinstance(real, complex_types): + real, imag = real.real, real.imag + elif hasattr(real, '_mpc_'): + s._mpc_ = real._mpc_ + return s + real = cls.context.mpf(real) + imag = cls.context.mpf(imag) + s._mpc_ = (real._mpf_, imag._mpf_) + return s + + real = property(lambda self: self.context.make_mpf(self._mpc_[0])) + imag = property(lambda self: self.context.make_mpf(self._mpc_[1])) + + def __getstate__(self): + return to_pickable(self._mpc_[0]), to_pickable(self._mpc_[1]) + + def __setstate__(self, val): + self._mpc_ = from_pickable(val[0]), from_pickable(val[1]) + + def __repr__(s): + if s.context.pretty: + return str(s) + r = repr(s.real)[4:-1] + i = repr(s.imag)[4:-1] + return "%s(real=%s, imag=%s)" % (type(s).__name__, r, i) + + def __str__(s): + return "(%s)" % mpc_to_str(s._mpc_, s.context._str_digits) + + def __complex__(s): + return mpc_to_complex(s._mpc_, rnd=s.context._prec_rounding[1]) + + def __pos__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpc_ = mpc_pos(s._mpc_, prec, rounding) + return v + + def __abs__(s): + prec, rounding = s.context._prec_rounding + v = new(s.context.mpf) + v._mpf_ = mpc_abs(s._mpc_, prec, rounding) + return v + + def __neg__(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpc_ = mpc_neg(s._mpc_, prec, rounding) + return v + + def conjugate(s): + cls, new, (prec, rounding) = s._ctxdata + v = new(cls) + v._mpc_ = mpc_conjugate(s._mpc_, prec, rounding) + return v + + def __nonzero__(s): + return mpc_is_nonzero(s._mpc_) + + __bool__ = __nonzero__ + + def __hash__(s): + return mpc_hash(s._mpc_) + + @classmethod + def mpc_convert_lhs(cls, x): + try: + y = cls.context.convert(x) + return y + except TypeError: + return NotImplemented + + def __eq__(s, t): + if not hasattr(t, '_mpc_'): + if isinstance(t, str): + return False + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return s.real == t.real and s.imag == t.imag + + def __ne__(s, t): + b = s.__eq__(t) + if b is NotImplemented: + return b + return not b + + def _compare(*args): + raise TypeError("no ordering relation is defined for complex numbers") + + __gt__ = _compare + __le__ = _compare + __gt__ = _compare + __ge__ = _compare + + def __add__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_add_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + v = new(cls) + v._mpc_ = mpc_add(s._mpc_, t._mpc_, prec, rounding) + return v + + def __sub__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_sub_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + v = new(cls) + v._mpc_ = mpc_sub(s._mpc_, t._mpc_, prec, rounding) + return v + + def __mul__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + if isinstance(t, int_types): + v = new(cls) + v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_mul_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + v = new(cls) + v._mpc_ = mpc_mul(s._mpc_, t._mpc_, prec, rounding) + return v + + def __div__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if not hasattr(t, '_mpc_'): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + if hasattr(t, '_mpf_'): + v = new(cls) + v._mpc_ = mpc_div_mpf(s._mpc_, t._mpf_, prec, rounding) + return v + v = new(cls) + v._mpc_ = mpc_div(s._mpc_, t._mpc_, prec, rounding) + return v + + def __pow__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if isinstance(t, int_types): + v = new(cls) + v._mpc_ = mpc_pow_int(s._mpc_, t, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + v = new(cls) + if hasattr(t, '_mpf_'): + v._mpc_ = mpc_pow_mpf(s._mpc_, t._mpf_, prec, rounding) + else: + v._mpc_ = mpc_pow(s._mpc_, t._mpc_, prec, rounding) + return v + + __radd__ = __add__ + + def __rsub__(s, t): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t - s + + def __rmul__(s, t): + cls, new, (prec, rounding) = s._ctxdata + if isinstance(t, int_types): + v = new(cls) + v._mpc_ = mpc_mul_int(s._mpc_, t, prec, rounding) + return v + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t * s + + def __rdiv__(s, t): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t / s + + def __rpow__(s, t): + t = s.mpc_convert_lhs(t) + if t is NotImplemented: + return t + return t ** s + + __truediv__ = __div__ + __rtruediv__ = __rdiv__ + + def ae(s, t, rel_eps=None, abs_eps=None): + return s.context.almosteq(s, t, rel_eps, abs_eps) + + +complex_types = (complex, _mpc) + + +class PythonMPContext(object): + + def __init__(ctx): + ctx._prec_rounding = [53, round_nearest] + ctx.mpf = type('mpf', (_mpf,), {}) + ctx.mpc = type('mpc', (_mpc,), {}) + ctx.mpf._ctxdata = [ctx.mpf, new, ctx._prec_rounding] + ctx.mpc._ctxdata = [ctx.mpc, new, ctx._prec_rounding] + ctx.mpf.context = ctx + ctx.mpc.context = ctx + ctx.constant = type('constant', (_constant,), {}) + ctx.constant._ctxdata = [ctx.mpf, new, ctx._prec_rounding] + ctx.constant.context = ctx + + def make_mpf(ctx, v): + a = new(ctx.mpf) + a._mpf_ = v + return a + + def make_mpc(ctx, v): + a = new(ctx.mpc) + a._mpc_ = v + return a + + def default(ctx): + ctx._prec = ctx._prec_rounding[0] = 53 + ctx._dps = 15 + ctx.trap_complex = False + + def _set_prec(ctx, n): + ctx._prec = ctx._prec_rounding[0] = max(1, int(n)) + ctx._dps = prec_to_dps(n) + + def _set_dps(ctx, n): + ctx._prec = ctx._prec_rounding[0] = dps_to_prec(n) + ctx._dps = max(1, int(n)) + + prec = property(lambda ctx: ctx._prec, _set_prec) + dps = property(lambda ctx: ctx._dps, _set_dps) + + def convert(ctx, x, strings=True): + """ + Converts *x* to an ``mpf`` or ``mpc``. If *x* is of type ``mpf``, + ``mpc``, ``int``, ``float``, ``complex``, the conversion + will be performed losslessly. + + If *x* is a string, the result will be rounded to the present + working precision. Strings representing fractions or complex + numbers are permitted. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> mpmathify(3.5) + mpf('3.5') + >>> mpmathify('2.1') + mpf('2.1000000000000001') + >>> mpmathify('3/4') + mpf('0.75') + >>> mpmathify('2+3j') + mpc(real='2.0', imag='3.0') + + """ + if type(x) in ctx.types: return x + if isinstance(x, int_types): return ctx.make_mpf(from_int(x)) + if isinstance(x, float): return ctx.make_mpf(from_float(x)) + if isinstance(x, complex): + return ctx.make_mpc((from_float(x.real), from_float(x.imag))) + if type(x).__module__ == 'numpy': return ctx.npconvert(x) + if isinstance(x, numbers.Rational): # e.g. Fraction + try: x = rational.mpq(int(x.numerator), int(x.denominator)) + except: pass + prec, rounding = ctx._prec_rounding + if isinstance(x, rational.mpq): + p, q = x._mpq_ + return ctx.make_mpf(from_rational(p, q, prec)) + if strings and isinstance(x, basestring): + try: + _mpf_ = from_str(x, prec, rounding) + return ctx.make_mpf(_mpf_) + except ValueError: + pass + if hasattr(x, '_mpf_'): return ctx.make_mpf(x._mpf_) + if hasattr(x, '_mpc_'): return ctx.make_mpc(x._mpc_) + if hasattr(x, '_mpmath_'): + return ctx.convert(x._mpmath_(prec, rounding)) + if type(x).__module__ == 'decimal': + try: return ctx.make_mpf(from_Decimal(x, prec, rounding)) + except: pass + return ctx._convert_fallback(x, strings) + + def npconvert(ctx, x): + """ + Converts *x* to an ``mpf`` or ``mpc``. *x* should be a numpy + scalar. + """ + import numpy as np + if isinstance(x, np.integer): return ctx.make_mpf(from_int(int(x))) + if isinstance(x, np.floating): return ctx.make_mpf(from_npfloat(x)) + if isinstance(x, np.complexfloating): + return ctx.make_mpc((from_npfloat(x.real), from_npfloat(x.imag))) + raise TypeError("cannot create mpf from " + repr(x)) + + def isnan(ctx, x): + """ + Return *True* if *x* is a NaN (not-a-number), or for a complex + number, whether either the real or complex part is NaN; + otherwise return *False*:: + + >>> from mpmath import * + >>> isnan(3.14) + False + >>> isnan(nan) + True + >>> isnan(mpc(3.14,2.72)) + False + >>> isnan(mpc(3.14,nan)) + True + + """ + if hasattr(x, "_mpf_"): + return x._mpf_ == fnan + if hasattr(x, "_mpc_"): + return fnan in x._mpc_ + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return False + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isnan(x) + raise TypeError("isnan() needs a number as input") + + def isinf(ctx, x): + """ + Return *True* if the absolute value of *x* is infinite; + otherwise return *False*:: + + >>> from mpmath import * + >>> isinf(inf) + True + >>> isinf(-inf) + True + >>> isinf(3) + False + >>> isinf(3+4j) + False + >>> isinf(mpc(3,inf)) + True + >>> isinf(mpc(inf,3)) + True + + """ + if hasattr(x, "_mpf_"): + return x._mpf_ in (finf, fninf) + if hasattr(x, "_mpc_"): + re, im = x._mpc_ + return re in (finf, fninf) or im in (finf, fninf) + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return False + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isinf(x) + raise TypeError("isinf() needs a number as input") + + def isnormal(ctx, x): + """ + Determine whether *x* is "normal" in the sense of floating-point + representation; that is, return *False* if *x* is zero, an + infinity or NaN; otherwise return *True*. By extension, a + complex number *x* is considered "normal" if its magnitude is + normal:: + + >>> from mpmath import * + >>> isnormal(3) + True + >>> isnormal(0) + False + >>> isnormal(inf); isnormal(-inf); isnormal(nan) + False + False + False + >>> isnormal(0+0j) + False + >>> isnormal(0+3j) + True + >>> isnormal(mpc(2,nan)) + False + """ + if hasattr(x, "_mpf_"): + return bool(x._mpf_[1]) + if hasattr(x, "_mpc_"): + re, im = x._mpc_ + re_normal = bool(re[1]) + im_normal = bool(im[1]) + if re == fzero: return im_normal + if im == fzero: return re_normal + return re_normal and im_normal + if isinstance(x, int_types) or isinstance(x, rational.mpq): + return bool(x) + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isnormal(x) + raise TypeError("isnormal() needs a number as input") + + def isint(ctx, x, gaussian=False): + """ + Return *True* if *x* is integer-valued; otherwise return + *False*:: + + >>> from mpmath import * + >>> isint(3) + True + >>> isint(mpf(3)) + True + >>> isint(3.2) + False + >>> isint(inf) + False + + Optionally, Gaussian integers can be checked for:: + + >>> isint(3+0j) + True + >>> isint(3+2j) + False + >>> isint(3+2j, gaussian=True) + True + + """ + if isinstance(x, int_types): + return True + if hasattr(x, "_mpf_"): + sign, man, exp, bc = xval = x._mpf_ + return bool((man and exp >= 0) or xval == fzero) + if hasattr(x, "_mpc_"): + re, im = x._mpc_ + rsign, rman, rexp, rbc = re + isign, iman, iexp, ibc = im + re_isint = (rman and rexp >= 0) or re == fzero + if gaussian: + im_isint = (iman and iexp >= 0) or im == fzero + return re_isint and im_isint + return re_isint and im == fzero + if isinstance(x, rational.mpq): + p, q = x._mpq_ + return p % q == 0 + x = ctx.convert(x) + if hasattr(x, '_mpf_') or hasattr(x, '_mpc_'): + return ctx.isint(x, gaussian) + raise TypeError("isint() needs a number as input") + + def fsum(ctx, terms, absolute=False, squared=False): + """ + Calculates a sum containing a finite number of terms (for infinite + series, see :func:`~mpmath.nsum`). The terms will be converted to + mpmath numbers. For len(terms) > 2, this function is generally + faster and produces more accurate results than the builtin + Python function :func:`sum`. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fsum([1, 2, 0.5, 7]) + mpf('10.5') + + With squared=True each term is squared, and with absolute=True + the absolute value of each term is used. + """ + prec, rnd = ctx._prec_rounding + real = [] + imag = [] + for term in terms: + reval = imval = 0 + if hasattr(term, "_mpf_"): + reval = term._mpf_ + elif hasattr(term, "_mpc_"): + reval, imval = term._mpc_ + else: + term = ctx.convert(term) + if hasattr(term, "_mpf_"): + reval = term._mpf_ + elif hasattr(term, "_mpc_"): + reval, imval = term._mpc_ + else: + raise NotImplementedError + if imval: + if squared: + if absolute: + real.append(mpf_mul(reval,reval)) + real.append(mpf_mul(imval,imval)) + else: + reval, imval = mpc_pow_int((reval,imval),2,prec+10) + real.append(reval) + imag.append(imval) + elif absolute: + real.append(mpc_abs((reval,imval), prec)) + else: + real.append(reval) + imag.append(imval) + else: + if squared: + reval = mpf_mul(reval, reval) + elif absolute: + reval = mpf_abs(reval) + real.append(reval) + s = mpf_sum(real, prec, rnd, absolute) + if imag: + s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) + else: + s = ctx.make_mpf(s) + return s + + def fdot(ctx, A, B=None, conjugate=False): + r""" + Computes the dot product of the iterables `A` and `B`, + + .. math :: + + \sum_{k=0} A_k B_k. + + Alternatively, :func:`~mpmath.fdot` accepts a single iterable of pairs. + In other words, ``fdot(A,B)`` and ``fdot(zip(A,B))`` are equivalent. + The elements are automatically converted to mpmath numbers. + + With ``conjugate=True``, the elements in the second vector + will be conjugated: + + .. math :: + + \sum_{k=0} A_k \overline{B_k} + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> A = [2, 1.5, 3] + >>> B = [1, -1, 2] + >>> fdot(A, B) + mpf('6.5') + >>> list(zip(A, B)) + [(2, 1), (1.5, -1), (3, 2)] + >>> fdot(_) + mpf('6.5') + >>> A = [2, 1.5, 3j] + >>> B = [1+j, 3, -1-j] + >>> fdot(A, B) + mpc(real='9.5', imag='-1.0') + >>> fdot(A, B, conjugate=True) + mpc(real='3.5', imag='-5.0') + + """ + if B is not None: + A = zip(A, B) + prec, rnd = ctx._prec_rounding + real = [] + imag = [] + hasattr_ = hasattr + types = (ctx.mpf, ctx.mpc) + for a, b in A: + if type(a) not in types: a = ctx.convert(a) + if type(b) not in types: b = ctx.convert(b) + a_real = hasattr_(a, "_mpf_") + b_real = hasattr_(b, "_mpf_") + if a_real and b_real: + real.append(mpf_mul(a._mpf_, b._mpf_)) + continue + a_complex = hasattr_(a, "_mpc_") + b_complex = hasattr_(b, "_mpc_") + if a_real and b_complex: + aval = a._mpf_ + bre, bim = b._mpc_ + if conjugate: + bim = mpf_neg(bim) + real.append(mpf_mul(aval, bre)) + imag.append(mpf_mul(aval, bim)) + elif b_real and a_complex: + are, aim = a._mpc_ + bval = b._mpf_ + real.append(mpf_mul(are, bval)) + imag.append(mpf_mul(aim, bval)) + elif a_complex and b_complex: + #re, im = mpc_mul(a._mpc_, b._mpc_, prec+20) + are, aim = a._mpc_ + bre, bim = b._mpc_ + if conjugate: + bim = mpf_neg(bim) + real.append(mpf_mul(are, bre)) + real.append(mpf_neg(mpf_mul(aim, bim))) + imag.append(mpf_mul(are, bim)) + imag.append(mpf_mul(aim, bre)) + else: + raise NotImplementedError + s = mpf_sum(real, prec, rnd) + if imag: + s = ctx.make_mpc((s, mpf_sum(imag, prec, rnd))) + else: + s = ctx.make_mpf(s) + return s + + def _wrap_libmp_function(ctx, mpf_f, mpc_f=None, mpi_f=None, doc=""): + """ + Given a low-level mpf_ function, and optionally similar functions + for mpc_ and mpi_, defines the function as a context method. + + It is assumed that the return type is the same as that of + the input; the exception is that propagation from mpf to mpc is possible + by raising ComplexResult. + + """ + def f(x, **kwargs): + if type(x) not in ctx.types: + x = ctx.convert(x) + prec, rounding = ctx._prec_rounding + if kwargs: + prec = kwargs.get('prec', prec) + if 'dps' in kwargs: + prec = dps_to_prec(kwargs['dps']) + rounding = kwargs.get('rounding', rounding) + if hasattr(x, '_mpf_'): + try: + return ctx.make_mpf(mpf_f(x._mpf_, prec, rounding)) + except ComplexResult: + # Handle propagation to complex + if ctx.trap_complex: + raise + return ctx.make_mpc(mpc_f((x._mpf_, fzero), prec, rounding)) + elif hasattr(x, '_mpc_'): + return ctx.make_mpc(mpc_f(x._mpc_, prec, rounding)) + raise NotImplementedError("%s of a %s" % (name, type(x))) + name = mpf_f.__name__[4:] + f.__doc__ = function_docs.__dict__.get(name, "Computes the %s of x" % doc) + return f + + # Called by SpecialFunctions.__init__() + @classmethod + def _wrap_specfun(cls, name, f, wrap): + if wrap: + def f_wrapped(ctx, *args, **kwargs): + convert = ctx.convert + args = [convert(a) for a in args] + prec = ctx.prec + try: + ctx.prec += 10 + retval = f(ctx, *args, **kwargs) + finally: + ctx.prec = prec + return +retval + else: + f_wrapped = f + f_wrapped.__doc__ = function_docs.__dict__.get(name, f.__doc__) + setattr(cls, name, f_wrapped) + + def _convert_param(ctx, x): + if hasattr(x, "_mpc_"): + v, im = x._mpc_ + if im != fzero: + return x, 'C' + elif hasattr(x, "_mpf_"): + v = x._mpf_ + else: + if type(x) in int_types: + return int(x), 'Z' + p = None + if isinstance(x, tuple): + p, q = x + elif hasattr(x, '_mpq_'): + p, q = x._mpq_ + elif isinstance(x, basestring) and '/' in x: + p, q = x.split('/') + p = int(p) + q = int(q) + if p is not None: + if not p % q: + return p // q, 'Z' + return ctx.mpq(p,q), 'Q' + x = ctx.convert(x) + if hasattr(x, "_mpc_"): + v, im = x._mpc_ + if im != fzero: + return x, 'C' + elif hasattr(x, "_mpf_"): + v = x._mpf_ + else: + return x, 'U' + sign, man, exp, bc = v + if man: + if exp >= -4: + if sign: + man = -man + if exp >= 0: + return int(man) << exp, 'Z' + if exp >= -4: + p, q = int(man), (1<<(-exp)) + return ctx.mpq(p,q), 'Q' + x = ctx.make_mpf(v) + return x, 'R' + elif not exp: + return 0, 'Z' + else: + return x, 'U' + + def _mpf_mag(ctx, x): + sign, man, exp, bc = x + if man: + return exp+bc + if x == fzero: + return ctx.ninf + if x == finf or x == fninf: + return ctx.inf + return ctx.nan + + def mag(ctx, x): + """ + Quick logarithmic magnitude estimate of a number. Returns an + integer or infinity `m` such that `|x| <= 2^m`. It is not + guaranteed that `m` is an optimal bound, but it will never + be too large by more than 2 (and probably not more than 1). + + **Examples** + + >>> from mpmath import * + >>> mp.pretty = True + >>> mag(10), mag(10.0), mag(mpf(10)), int(ceil(log(10,2))) + (4, 4, 4, 4) + >>> mag(10j), mag(10+10j) + (4, 5) + >>> mag(0.01), int(ceil(log(0.01,2))) + (-6, -6) + >>> mag(0), mag(inf), mag(-inf), mag(nan) + (-inf, +inf, +inf, nan) + + """ + if hasattr(x, "_mpf_"): + return ctx._mpf_mag(x._mpf_) + elif hasattr(x, "_mpc_"): + r, i = x._mpc_ + if r == fzero: + return ctx._mpf_mag(i) + if i == fzero: + return ctx._mpf_mag(r) + return 1+max(ctx._mpf_mag(r), ctx._mpf_mag(i)) + elif isinstance(x, int_types): + if x: + return bitcount(abs(x)) + return ctx.ninf + elif isinstance(x, rational.mpq): + p, q = x._mpq_ + if p: + return 1 + bitcount(abs(p)) - bitcount(q) + return ctx.ninf + else: + x = ctx.convert(x) + if hasattr(x, "_mpf_") or hasattr(x, "_mpc_"): + return ctx.mag(x) + else: + raise TypeError("requires an mpf/mpc") + + +# Register with "numbers" ABC +# We do not subclass, hence we do not use the @abstractmethod checks. While +# this is less invasive it may turn out that we do not actually support +# parts of the expected interfaces. See +# http://docs.python.org/2/library/numbers.html for list of abstract +# methods. +try: + import numbers + numbers.Complex.register(_mpc) + numbers.Real.register(_mpf) +except ImportError: + pass diff --git a/.venv/lib/python3.11/site-packages/mpmath/function_docs.py b/.venv/lib/python3.11/site-packages/mpmath/function_docs.py new file mode 100644 index 0000000000000000000000000000000000000000..73c071dc30a25c0ea1366e06a407a20206bd18a2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/function_docs.py @@ -0,0 +1,10201 @@ +""" +Extended docstrings for functions.py +""" + + +pi = r""" +`\pi`, roughly equal to 3.141592654, represents the area of the unit +circle, the half-period of trigonometric functions, and many other +things in mathematics. + +Mpmath can evaluate `\pi` to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +pi + 3.1415926535897932384626433832795028841971693993751 + +This shows digits 99991-100000 of `\pi` (the last digit is actually +a 4 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 100000 + >>> str(pi)[-10:] + '5549362465' + +**Possible issues** + +:data:`pi` always rounds to the nearest floating-point +number when used. This means that exact mathematical identities +involving `\pi` will generally not be preserved in floating-point +arithmetic. In particular, multiples of :data:`pi` (except for +the trivial case ``0*pi``) are *not* the exact roots of +:func:`~mpmath.sin`, but differ roughly by the current epsilon:: + + >>> mp.dps = 15 + >>> sin(pi) + 1.22464679914735e-16 + +One solution is to use the :func:`~mpmath.sinpi` function instead:: + + >>> sinpi(1) + 0.0 + +See the documentation of trigonometric functions for additional +details. + +**References** + +* [BorweinBorwein]_ + +""" + +degree = r""" +Represents one degree of angle, `1^{\circ} = \pi/180`, or +about 0.01745329. This constant may be evaluated to arbitrary +precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +degree + 0.017453292519943295769236907684886127134428718885417 + +The :data:`degree` object is convenient for conversion +to radians:: + + >>> sin(30 * degree) + 0.5 + >>> asin(0.5) / degree + 30.0 +""" + +e = r""" +The transcendental number `e` = 2.718281828... is the base of the +natural logarithm (:func:`~mpmath.ln`) and of the exponential function +(:func:`~mpmath.exp`). + +Mpmath can be evaluate `e` to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +e + 2.7182818284590452353602874713526624977572470937 + +This shows digits 99991-100000 of `e` (the last digit is actually +a 5 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 100000 + >>> str(e)[-10:] + '2100427166' + +**Possible issues** + +:data:`e` always rounds to the nearest floating-point number +when used, and mathematical identities involving `e` may not +hold in floating-point arithmetic. For example, ``ln(e)`` +might not evaluate exactly to 1. + +In particular, don't use ``e**x`` to compute the exponential +function. Use ``exp(x)`` instead; this is both faster and more +accurate. +""" + +phi = r""" +Represents the golden ratio `\phi = (1+\sqrt 5)/2`, +approximately equal to 1.6180339887. To high precision, +its value is:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +phi + 1.6180339887498948482045868343656381177203091798058 + +Formulas for the golden ratio include the following:: + + >>> (1+sqrt(5))/2 + 1.6180339887498948482045868343656381177203091798058 + >>> findroot(lambda x: x**2-x-1, 1) + 1.6180339887498948482045868343656381177203091798058 + >>> limit(lambda n: fib(n+1)/fib(n), inf) + 1.6180339887498948482045868343656381177203091798058 +""" + +euler = r""" +Euler's constant or the Euler-Mascheroni constant `\gamma` += 0.57721566... is a number of central importance to +number theory and special functions. It is defined as the limit + +.. math :: + + \gamma = \lim_{n\to\infty} H_n - \log n + +where `H_n = 1 + \frac{1}{2} + \ldots + \frac{1}{n}` is a harmonic +number (see :func:`~mpmath.harmonic`). + +Evaluation of `\gamma` is supported at arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +euler + 0.57721566490153286060651209008240243104215933593992 + +We can also compute `\gamma` directly from the definition, +although this is less efficient:: + + >>> limit(lambda n: harmonic(n)-log(n), inf) + 0.57721566490153286060651209008240243104215933593992 + +This shows digits 9991-10000 of `\gamma` (the last digit is actually +a 5 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 10000 + >>> str(euler)[-10:] + '4679858166' + +Integrals, series, and representations for `\gamma` in terms of +special functions include the following (there are many others):: + + >>> mp.dps = 25 + >>> -quad(lambda x: exp(-x)*log(x), [0,inf]) + 0.5772156649015328606065121 + >>> quad(lambda x,y: (x-1)/(1-x*y)/log(x*y), [0,1], [0,1]) + 0.5772156649015328606065121 + >>> nsum(lambda k: 1/k-log(1+1/k), [1,inf]) + 0.5772156649015328606065121 + >>> nsum(lambda k: (-1)**k*zeta(k)/k, [2,inf]) + 0.5772156649015328606065121 + >>> -diff(gamma, 1) + 0.5772156649015328606065121 + >>> limit(lambda x: 1/x-gamma(x), 0) + 0.5772156649015328606065121 + >>> limit(lambda x: zeta(x)-1/(x-1), 1) + 0.5772156649015328606065121 + >>> (log(2*pi*nprod(lambda n: + ... exp(-2+2/n)*(1+2/n)**n, [1,inf]))-3)/2 + 0.5772156649015328606065121 + +For generalizations of the identities `\gamma = -\Gamma'(1)` +and `\gamma = \lim_{x\to1} \zeta(x)-1/(x-1)`, see +:func:`~mpmath.psi` and :func:`~mpmath.stieltjes` respectively. + +**References** + +* [BorweinBailey]_ + +""" + +catalan = r""" +Catalan's constant `K` = 0.91596559... is given by the infinite +series + +.. math :: + + K = \sum_{k=0}^{\infty} \frac{(-1)^k}{(2k+1)^2}. + +Mpmath can evaluate it to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +catalan + 0.91596559417721901505460351493238411077414937428167 + +One can also compute `K` directly from the definition, although +this is significantly less efficient:: + + >>> nsum(lambda k: (-1)**k/(2*k+1)**2, [0, inf]) + 0.91596559417721901505460351493238411077414937428167 + +This shows digits 9991-10000 of `K` (the last digit is actually +a 3 when the decimal expansion is truncated, but here the nearest +rounding is used):: + + >>> mp.dps = 10000 + >>> str(catalan)[-10:] + '9537871504' + +Catalan's constant has numerous integral representations:: + + >>> mp.dps = 50 + >>> quad(lambda x: -log(x)/(1+x**2), [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + >>> quad(lambda x: atan(x)/x, [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + >>> quad(lambda x: ellipk(x**2)/2, [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + >>> quad(lambda x,y: 1/(1+(x*y)**2), [0, 1], [0, 1]) + 0.91596559417721901505460351493238411077414937428167 + +As well as series representations:: + + >>> pi*log(sqrt(3)+2)/8 + 3*nsum(lambda n: + ... (fac(n)/(2*n+1))**2/fac(2*n), [0, inf])/8 + 0.91596559417721901505460351493238411077414937428167 + >>> 1-nsum(lambda n: n*zeta(2*n+1)/16**n, [1,inf]) + 0.91596559417721901505460351493238411077414937428167 +""" + +khinchin = r""" +Khinchin's constant `K` = 2.68542... is a number that +appears in the theory of continued fractions. Mpmath can evaluate +it to arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +khinchin + 2.6854520010653064453097148354817956938203822939945 + +An integral representation is:: + + >>> I = quad(lambda x: log((1-x**2)/sincpi(x))/x/(1+x), [0, 1]) + >>> 2*exp(1/log(2)*I) + 2.6854520010653064453097148354817956938203822939945 + +The computation of ``khinchin`` is based on an efficient +implementation of the following series:: + + >>> f = lambda n: (zeta(2*n)-1)/n*sum((-1)**(k+1)/mpf(k) + ... for k in range(1,2*int(n))) + >>> exp(nsum(f, [1,inf])/log(2)) + 2.6854520010653064453097148354817956938203822939945 +""" + +glaisher = r""" +Glaisher's constant `A`, also known as the Glaisher-Kinkelin +constant, is a number approximately equal to 1.282427129 that +sometimes appears in formulas related to gamma and zeta functions. +It is also related to the Barnes G-function (see :func:`~mpmath.barnesg`). + +The constant is defined as `A = \exp(1/12-\zeta'(-1))` where +`\zeta'(s)` denotes the derivative of the Riemann zeta function +(see :func:`~mpmath.zeta`). + +Mpmath can evaluate Glaisher's constant to arbitrary precision: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +glaisher + 1.282427129100622636875342568869791727767688927325 + +We can verify that the value computed by :data:`glaisher` is +correct using mpmath's facilities for numerical +differentiation and arbitrary evaluation of the zeta function: + + >>> exp(mpf(1)/12 - diff(zeta, -1)) + 1.282427129100622636875342568869791727767688927325 + +Here is an example of an integral that can be evaluated in +terms of Glaisher's constant: + + >>> mp.dps = 15 + >>> quad(lambda x: log(gamma(x)), [1, 1.5]) + -0.0428537406502909 + >>> -0.5 - 7*log(2)/24 + log(pi)/4 + 3*log(glaisher)/2 + -0.042853740650291 + +Mpmath computes Glaisher's constant by applying Euler-Maclaurin +summation to a slowly convergent series. The implementation is +reasonably efficient up to about 10,000 digits. See the source +code for additional details. + +References: +http://mathworld.wolfram.com/Glaisher-KinkelinConstant.html +""" + +apery = r""" +Represents Apery's constant, which is the irrational number +approximately equal to 1.2020569 given by + +.. math :: + + \zeta(3) = \sum_{k=1}^\infty\frac{1}{k^3}. + +The calculation is based on an efficient hypergeometric +series. To 50 decimal places, the value is given by:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +apery + 1.2020569031595942853997381615114499907649862923405 + +Other ways to evaluate Apery's constant using mpmath +include:: + + >>> zeta(3) + 1.2020569031595942853997381615114499907649862923405 + >>> -psi(2,1)/2 + 1.2020569031595942853997381615114499907649862923405 + >>> 8*nsum(lambda k: 1/(2*k+1)**3, [0,inf])/7 + 1.2020569031595942853997381615114499907649862923405 + >>> f = lambda k: 2/k**3/(exp(2*pi*k)-1) + >>> 7*pi**3/180 - nsum(f, [1,inf]) + 1.2020569031595942853997381615114499907649862923405 + +This shows digits 9991-10000 of Apery's constant:: + + >>> mp.dps = 10000 + >>> str(apery)[-10:] + '3189504235' +""" + +mertens = r""" +Represents the Mertens or Meissel-Mertens constant, which is the +prime number analog of Euler's constant: + +.. math :: + + B_1 = \lim_{N\to\infty} + \left(\sum_{p_k \le N} \frac{1}{p_k} - \log \log N \right) + +Here `p_k` denotes the `k`-th prime number. Other names for this +constant include the Hadamard-de la Vallee-Poussin constant or +the prime reciprocal constant. + +The following gives the Mertens constant to 50 digits:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +mertens + 0.2614972128476427837554268386086958590515666482612 + +References: +http://mathworld.wolfram.com/MertensConstant.html +""" + +twinprime = r""" +Represents the twin prime constant, which is the factor `C_2` +featuring in the Hardy-Littlewood conjecture for the growth of the +twin prime counting function, + +.. math :: + + \pi_2(n) \sim 2 C_2 \frac{n}{\log^2 n}. + +It is given by the product over primes + +.. math :: + + C_2 = \prod_{p\ge3} \frac{p(p-2)}{(p-1)^2} \approx 0.66016 + +Computing `C_2` to 50 digits:: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> +twinprime + 0.66016181584686957392781211001455577843262336028473 + +References: +http://mathworld.wolfram.com/TwinPrimesConstant.html +""" + +ln = r""" +Computes the natural logarithm of `x`, `\ln x`. +See :func:`~mpmath.log` for additional documentation.""" + +sqrt = r""" +``sqrt(x)`` gives the principal square root of `x`, `\sqrt x`. +For positive real numbers, the principal root is simply the +positive square root. For arbitrary complex numbers, the principal +square root is defined to satisfy `\sqrt x = \exp(\log(x)/2)`. +The function thus has a branch cut along the negative half real axis. + +For all mpmath numbers ``x``, calling ``sqrt(x)`` is equivalent to +performing ``x**0.5``. + +**Examples** + +Basic examples and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sqrt(10) + 3.16227766016838 + >>> sqrt(100) + 10.0 + >>> sqrt(-4) + (0.0 + 2.0j) + >>> sqrt(1+1j) + (1.09868411346781 + 0.455089860562227j) + >>> sqrt(inf) + +inf + +Square root evaluation is fast at huge precision:: + + >>> mp.dps = 50000 + >>> a = sqrt(3) + >>> str(a)[-10:] + '9329332815' + +:func:`mpmath.iv.sqrt` supports interval arguments:: + + >>> iv.dps = 15; iv.pretty = True + >>> iv.sqrt([16,100]) + [4.0, 10.0] + >>> iv.sqrt(2) + [1.4142135623730949234, 1.4142135623730951455] + >>> iv.sqrt(2) ** 2 + [1.9999999999999995559, 2.0000000000000004441] + +""" + +cbrt = r""" +``cbrt(x)`` computes the cube root of `x`, `x^{1/3}`. This +function is faster and more accurate than raising to a floating-point +fraction:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> 125**(mpf(1)/3) + mpf('4.9999999999999991') + >>> cbrt(125) + mpf('5.0') + +Every nonzero complex number has three cube roots. This function +returns the cube root defined by `\exp(\log(x)/3)` where the +principal branch of the natural logarithm is used. Note that this +does not give a real cube root for negative real numbers:: + + >>> mp.pretty = True + >>> cbrt(-1) + (0.5 + 0.866025403784439j) +""" + +exp = r""" +Computes the exponential function, + +.. math :: + + \exp(x) = e^x = \sum_{k=0}^{\infty} \frac{x^k}{k!}. + +For complex numbers, the exponential function also satisfies + +.. math :: + + \exp(x+yi) = e^x (\cos y + i \sin y). + +**Basic examples** + +Some values of the exponential function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> exp(0) + 1.0 + >>> exp(1) + 2.718281828459045235360287 + >>> exp(-1) + 0.3678794411714423215955238 + >>> exp(inf) + +inf + >>> exp(-inf) + 0.0 + +Arguments can be arbitrarily large:: + + >>> exp(10000) + 8.806818225662921587261496e+4342 + >>> exp(-10000) + 1.135483865314736098540939e-4343 + +Evaluation is supported for interval arguments via +:func:`mpmath.iv.exp`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.exp([-inf,0]) + [0.0, 1.0] + >>> iv.exp([0,1]) + [1.0, 2.71828182845904523536028749558] + +The exponential function can be evaluated efficiently to arbitrary +precision:: + + >>> mp.dps = 10000 + >>> exp(pi) #doctest: +ELLIPSIS + 23.140692632779269005729...8984304016040616 + +**Functional properties** + +Numerical verification of Euler's identity for the complex +exponential function:: + + >>> mp.dps = 15 + >>> exp(j*pi)+1 + (0.0 + 1.22464679914735e-16j) + >>> chop(exp(j*pi)+1) + 0.0 + +This recovers the coefficients (reciprocal factorials) in the +Maclaurin series expansion of exp:: + + >>> nprint(taylor(exp, 0, 5)) + [1.0, 1.0, 0.5, 0.166667, 0.0416667, 0.00833333] + +The exponential function is its own derivative and antiderivative:: + + >>> exp(pi) + 23.1406926327793 + >>> diff(exp, pi) + 23.1406926327793 + >>> quad(exp, [-inf, pi]) + 23.1406926327793 + +The exponential function can be evaluated using various methods, +including direct summation of the series, limits, and solving +the defining differential equation:: + + >>> nsum(lambda k: pi**k/fac(k), [0,inf]) + 23.1406926327793 + >>> limit(lambda k: (1+pi/k)**k, inf) + 23.1406926327793 + >>> odefun(lambda t, x: x, 0, 1)(pi) + 23.1406926327793 +""" + +cosh = r""" +Computes the hyperbolic cosine of `x`, +`\cosh(x) = (e^x + e^{-x})/2`. Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> cosh(0) + 1.0 + >>> cosh(1) + 1.543080634815243778477906 + >>> cosh(-inf), cosh(+inf) + (+inf, +inf) + +The hyperbolic cosine is an even, convex function with +a global minimum at `x = 0`, having a Maclaurin series +that starts:: + + >>> nprint(chop(taylor(cosh, 0, 5))) + [1.0, 0.0, 0.5, 0.0, 0.0416667, 0.0] + +Generalized to complex numbers, the hyperbolic cosine is +equivalent to a cosine with the argument rotated +in the imaginary direction, or `\cosh x = \cos ix`:: + + >>> cosh(2+3j) + (-3.724545504915322565473971 + 0.5118225699873846088344638j) + >>> cos(3-2j) + (-3.724545504915322565473971 + 0.5118225699873846088344638j) +""" + +sinh = r""" +Computes the hyperbolic sine of `x`, +`\sinh(x) = (e^x - e^{-x})/2`. Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sinh(0) + 0.0 + >>> sinh(1) + 1.175201193643801456882382 + >>> sinh(-inf), sinh(+inf) + (-inf, +inf) + +The hyperbolic sine is an odd function, with a Maclaurin +series that starts:: + + >>> nprint(chop(taylor(sinh, 0, 5))) + [0.0, 1.0, 0.0, 0.166667, 0.0, 0.00833333] + +Generalized to complex numbers, the hyperbolic sine is +essentially a sine with a rotation `i` applied to +the argument; more precisely, `\sinh x = -i \sin ix`:: + + >>> sinh(2+3j) + (-3.590564589985779952012565 + 0.5309210862485198052670401j) + >>> j*sin(3-2j) + (-3.590564589985779952012565 + 0.5309210862485198052670401j) +""" + +tanh = r""" +Computes the hyperbolic tangent of `x`, +`\tanh(x) = \sinh(x)/\cosh(x)`. Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> tanh(0) + 0.0 + >>> tanh(1) + 0.7615941559557648881194583 + >>> tanh(-inf), tanh(inf) + (-1.0, 1.0) + +The hyperbolic tangent is an odd, sigmoidal function, similar +to the inverse tangent and error function. Its Maclaurin +series is:: + + >>> nprint(chop(taylor(tanh, 0, 5))) + [0.0, 1.0, 0.0, -0.333333, 0.0, 0.133333] + +Generalized to complex numbers, the hyperbolic tangent is +essentially a tangent with a rotation `i` applied to +the argument; more precisely, `\tanh x = -i \tan ix`:: + + >>> tanh(2+3j) + (0.9653858790221331242784803 - 0.009884375038322493720314034j) + >>> j*tan(3-2j) + (0.9653858790221331242784803 - 0.009884375038322493720314034j) +""" + +cos = r""" +Computes the cosine of `x`, `\cos(x)`. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> cos(pi/3) + 0.5 + >>> cos(100000001) + -0.9802850113244713353133243 + >>> cos(2+3j) + (-4.189625690968807230132555 - 9.109227893755336597979197j) + >>> cos(inf) + nan + >>> nprint(chop(taylor(cos, 0, 6))) + [1.0, 0.0, -0.5, 0.0, 0.0416667, 0.0, -0.00138889] + +Intervals are supported via :func:`mpmath.iv.cos`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.cos([0,1]) + [0.540302305868139717400936602301, 1.0] + >>> iv.cos([0,2]) + [-0.41614683654714238699756823214, 1.0] +""" + +sin = r""" +Computes the sine of `x`, `\sin(x)`. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sin(pi/3) + 0.8660254037844386467637232 + >>> sin(100000001) + 0.1975887055794968911438743 + >>> sin(2+3j) + (9.1544991469114295734673 - 4.168906959966564350754813j) + >>> sin(inf) + nan + >>> nprint(chop(taylor(sin, 0, 6))) + [0.0, 1.0, 0.0, -0.166667, 0.0, 0.00833333, 0.0] + +Intervals are supported via :func:`mpmath.iv.sin`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.sin([0,1]) + [0.0, 0.841470984807896506652502331201] + >>> iv.sin([0,2]) + [0.0, 1.0] +""" + +tan = r""" +Computes the tangent of `x`, `\tan(x) = \frac{\sin(x)}{\cos(x)}`. +The tangent function is singular at `x = (n+1/2)\pi`, but +``tan(x)`` always returns a finite result since `(n+1/2)\pi` +cannot be represented exactly using floating-point arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> tan(pi/3) + 1.732050807568877293527446 + >>> tan(100000001) + -0.2015625081449864533091058 + >>> tan(2+3j) + (-0.003764025641504248292751221 + 1.003238627353609801446359j) + >>> tan(inf) + nan + >>> nprint(chop(taylor(tan, 0, 6))) + [0.0, 1.0, 0.0, 0.333333, 0.0, 0.133333, 0.0] + +Intervals are supported via :func:`mpmath.iv.tan`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.tan([0,1]) + [0.0, 1.55740772465490223050697482944] + >>> iv.tan([0,2]) # Interval includes a singularity + [-inf, +inf] +""" + +sec = r""" +Computes the secant of `x`, `\mathrm{sec}(x) = \frac{1}{\cos(x)}`. +The secant function is singular at `x = (n+1/2)\pi`, but +``sec(x)`` always returns a finite result since `(n+1/2)\pi` +cannot be represented exactly using floating-point arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sec(pi/3) + 2.0 + >>> sec(10000001) + -1.184723164360392819100265 + >>> sec(2+3j) + (-0.04167496441114427004834991 + 0.0906111371962375965296612j) + >>> sec(inf) + nan + >>> nprint(chop(taylor(sec, 0, 6))) + [1.0, 0.0, 0.5, 0.0, 0.208333, 0.0, 0.0847222] + +Intervals are supported via :func:`mpmath.iv.sec`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.sec([0,1]) + [1.0, 1.85081571768092561791175326276] + >>> iv.sec([0,2]) # Interval includes a singularity + [-inf, +inf] +""" + +csc = r""" +Computes the cosecant of `x`, `\mathrm{csc}(x) = \frac{1}{\sin(x)}`. +This cosecant function is singular at `x = n \pi`, but with the +exception of the point `x = 0`, ``csc(x)`` returns a finite result +since `n \pi` cannot be represented exactly using floating-point +arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> csc(pi/3) + 1.154700538379251529018298 + >>> csc(10000001) + -1.864910497503629858938891 + >>> csc(2+3j) + (0.09047320975320743980579048 + 0.04120098628857412646300981j) + >>> csc(inf) + nan + +Intervals are supported via :func:`mpmath.iv.csc`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.csc([0,1]) # Interval includes a singularity + [1.18839510577812121626159943988, +inf] + >>> iv.csc([0,2]) + [1.0, +inf] +""" + +cot = r""" +Computes the cotangent of `x`, +`\mathrm{cot}(x) = \frac{1}{\tan(x)} = \frac{\cos(x)}{\sin(x)}`. +This cotangent function is singular at `x = n \pi`, but with the +exception of the point `x = 0`, ``cot(x)`` returns a finite result +since `n \pi` cannot be represented exactly using floating-point +arithmetic. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> cot(pi/3) + 0.5773502691896257645091488 + >>> cot(10000001) + 1.574131876209625656003562 + >>> cot(2+3j) + (-0.003739710376336956660117409 - 0.9967577965693583104609688j) + >>> cot(inf) + nan + +Intervals are supported via :func:`mpmath.iv.cot`:: + + >>> iv.dps = 25; iv.pretty = True + >>> iv.cot([0,1]) # Interval includes a singularity + [0.642092615934330703006419974862, +inf] + >>> iv.cot([1,2]) + [-inf, +inf] +""" + +acos = r""" +Computes the inverse cosine or arccosine of `x`, `\cos^{-1}(x)`. +Since `-1 \le \cos(x) \le 1` for real `x`, the inverse +cosine is real-valued only for `-1 \le x \le 1`. On this interval, +:func:`~mpmath.acos` is defined to be a monotonically decreasing +function assuming values between `+\pi` and `0`. + +Basic values are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> acos(-1) + 3.141592653589793238462643 + >>> acos(0) + 1.570796326794896619231322 + >>> acos(1) + 0.0 + >>> nprint(chop(taylor(acos, 0, 6))) + [1.5708, -1.0, 0.0, -0.166667, 0.0, -0.075, 0.0] + +:func:`~mpmath.acos` is defined so as to be a proper inverse function of +`\cos(\theta)` for `0 \le \theta < \pi`. +We have `\cos(\cos^{-1}(x)) = x` for all `x`, but +`\cos^{-1}(\cos(x)) = x` only for `0 \le \Re[x] < \pi`:: + + >>> for x in [1, 10, -1, 2+3j, 10+3j]: + ... print("%s %s" % (cos(acos(x)), acos(cos(x)))) + ... + 1.0 1.0 + (10.0 + 0.0j) 2.566370614359172953850574 + -1.0 1.0 + (2.0 + 3.0j) (2.0 + 3.0j) + (10.0 + 3.0j) (2.566370614359172953850574 - 3.0j) + +The inverse cosine has two branch points: `x = \pm 1`. :func:`~mpmath.acos` +places the branch cuts along the line segments `(-\infty, -1)` and +`(+1, +\infty)`. In general, + +.. math :: + + \cos^{-1}(x) = \frac{\pi}{2} + i \log\left(ix + \sqrt{1-x^2} \right) + +where the principal-branch log and square root are implied. +""" + +asin = r""" +Computes the inverse sine or arcsine of `x`, `\sin^{-1}(x)`. +Since `-1 \le \sin(x) \le 1` for real `x`, the inverse +sine is real-valued only for `-1 \le x \le 1`. +On this interval, it is defined to be a monotonically increasing +function assuming values between `-\pi/2` and `\pi/2`. + +Basic values are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> asin(-1) + -1.570796326794896619231322 + >>> asin(0) + 0.0 + >>> asin(1) + 1.570796326794896619231322 + >>> nprint(chop(taylor(asin, 0, 6))) + [0.0, 1.0, 0.0, 0.166667, 0.0, 0.075, 0.0] + +:func:`~mpmath.asin` is defined so as to be a proper inverse function of +`\sin(\theta)` for `-\pi/2 < \theta < \pi/2`. +We have `\sin(\sin^{-1}(x)) = x` for all `x`, but +`\sin^{-1}(\sin(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`:: + + >>> for x in [1, 10, -1, 1+3j, -2+3j]: + ... print("%s %s" % (chop(sin(asin(x))), asin(sin(x)))) + ... + 1.0 1.0 + 10.0 -0.5752220392306202846120698 + -1.0 -1.0 + (1.0 + 3.0j) (1.0 + 3.0j) + (-2.0 + 3.0j) (-1.141592653589793238462643 - 3.0j) + +The inverse sine has two branch points: `x = \pm 1`. :func:`~mpmath.asin` +places the branch cuts along the line segments `(-\infty, -1)` and +`(+1, +\infty)`. In general, + +.. math :: + + \sin^{-1}(x) = -i \log\left(ix + \sqrt{1-x^2} \right) + +where the principal-branch log and square root are implied. +""" + +atan = r""" +Computes the inverse tangent or arctangent of `x`, `\tan^{-1}(x)`. +This is a real-valued function for all real `x`, with range +`(-\pi/2, \pi/2)`. + +Basic values are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> atan(-inf) + -1.570796326794896619231322 + >>> atan(-1) + -0.7853981633974483096156609 + >>> atan(0) + 0.0 + >>> atan(1) + 0.7853981633974483096156609 + >>> atan(inf) + 1.570796326794896619231322 + >>> nprint(chop(taylor(atan, 0, 6))) + [0.0, 1.0, 0.0, -0.333333, 0.0, 0.2, 0.0] + +The inverse tangent is often used to compute angles. However, +the atan2 function is often better for this as it preserves sign +(see :func:`~mpmath.atan2`). + +:func:`~mpmath.atan` is defined so as to be a proper inverse function of +`\tan(\theta)` for `-\pi/2 < \theta < \pi/2`. +We have `\tan(\tan^{-1}(x)) = x` for all `x`, but +`\tan^{-1}(\tan(x)) = x` only for `-\pi/2 < \Re[x] < \pi/2`:: + + >>> mp.dps = 25 + >>> for x in [1, 10, -1, 1+3j, -2+3j]: + ... print("%s %s" % (tan(atan(x)), atan(tan(x)))) + ... + 1.0 1.0 + 10.0 0.5752220392306202846120698 + -1.0 -1.0 + (1.0 + 3.0j) (1.000000000000000000000001 + 3.0j) + (-2.0 + 3.0j) (1.141592653589793238462644 + 3.0j) + +The inverse tangent has two branch points: `x = \pm i`. :func:`~mpmath.atan` +places the branch cuts along the line segments `(-i \infty, -i)` and +`(+i, +i \infty)`. In general, + +.. math :: + + \tan^{-1}(x) = \frac{i}{2}\left(\log(1-ix)-\log(1+ix)\right) + +where the principal-branch log is implied. +""" + +acot = r"""Computes the inverse cotangent of `x`, +`\mathrm{cot}^{-1}(x) = \tan^{-1}(1/x)`.""" + +asec = r"""Computes the inverse secant of `x`, +`\mathrm{sec}^{-1}(x) = \cos^{-1}(1/x)`.""" + +acsc = r"""Computes the inverse cosecant of `x`, +`\mathrm{csc}^{-1}(x) = \sin^{-1}(1/x)`.""" + +coth = r"""Computes the hyperbolic cotangent of `x`, +`\mathrm{coth}(x) = \frac{\cosh(x)}{\sinh(x)}`. +""" + +sech = r"""Computes the hyperbolic secant of `x`, +`\mathrm{sech}(x) = \frac{1}{\cosh(x)}`. +""" + +csch = r"""Computes the hyperbolic cosecant of `x`, +`\mathrm{csch}(x) = \frac{1}{\sinh(x)}`. +""" + +acosh = r"""Computes the inverse hyperbolic cosine of `x`, +`\mathrm{cosh}^{-1}(x) = \log(x+\sqrt{x+1}\sqrt{x-1})`. +""" + +asinh = r"""Computes the inverse hyperbolic sine of `x`, +`\mathrm{sinh}^{-1}(x) = \log(x+\sqrt{1+x^2})`. +""" + +atanh = r"""Computes the inverse hyperbolic tangent of `x`, +`\mathrm{tanh}^{-1}(x) = \frac{1}{2}\left(\log(1+x)-\log(1-x)\right)`. +""" + +acoth = r"""Computes the inverse hyperbolic cotangent of `x`, +`\mathrm{coth}^{-1}(x) = \tanh^{-1}(1/x)`.""" + +asech = r"""Computes the inverse hyperbolic secant of `x`, +`\mathrm{sech}^{-1}(x) = \cosh^{-1}(1/x)`.""" + +acsch = r"""Computes the inverse hyperbolic cosecant of `x`, +`\mathrm{csch}^{-1}(x) = \sinh^{-1}(1/x)`.""" + + + +sinpi = r""" +Computes `\sin(\pi x)`, more accurately than the expression +``sin(pi*x)``:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sinpi(10**10), sin(pi*(10**10)) + (0.0, -2.23936276195592e-6) + >>> sinpi(10**10+0.5), sin(pi*(10**10+0.5)) + (1.0, 0.999999999998721) +""" + +cospi = r""" +Computes `\cos(\pi x)`, more accurately than the expression +``cos(pi*x)``:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> cospi(10**10), cos(pi*(10**10)) + (1.0, 0.999999999997493) + >>> cospi(10**10+0.5), cos(pi*(10**10+0.5)) + (0.0, 1.59960492420134e-6) +""" + +sinc = r""" +``sinc(x)`` computes the unnormalized sinc function, defined as + +.. math :: + + \mathrm{sinc}(x) = \begin{cases} + \sin(x)/x, & \mbox{if } x \ne 0 \\ + 1, & \mbox{if } x = 0. + \end{cases} + +See :func:`~mpmath.sincpi` for the normalized sinc function. + +Simple values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sinc(0) + 1.0 + >>> sinc(1) + 0.841470984807897 + >>> sinc(inf) + 0.0 + +The integral of the sinc function is the sine integral Si:: + + >>> quad(sinc, [0, 1]) + 0.946083070367183 + >>> si(1) + 0.946083070367183 +""" + +sincpi = r""" +``sincpi(x)`` computes the normalized sinc function, defined as + +.. math :: + + \mathrm{sinc}_{\pi}(x) = \begin{cases} + \sin(\pi x)/(\pi x), & \mbox{if } x \ne 0 \\ + 1, & \mbox{if } x = 0. + \end{cases} + +Equivalently, we have +`\mathrm{sinc}_{\pi}(x) = \mathrm{sinc}(\pi x)`. + +The normalization entails that the function integrates +to unity over the entire real line:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quadosc(sincpi, [-inf, inf], period=2.0) + 1.0 + +Like, :func:`~mpmath.sinpi`, :func:`~mpmath.sincpi` is evaluated accurately +at its roots:: + + >>> sincpi(10) + 0.0 +""" + +expj = r""" +Convenience function for computing `e^{ix}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> expj(0) + (1.0 + 0.0j) + >>> expj(-1) + (0.5403023058681397174009366 - 0.8414709848078965066525023j) + >>> expj(j) + (0.3678794411714423215955238 + 0.0j) + >>> expj(1+j) + (0.1987661103464129406288032 + 0.3095598756531121984439128j) +""" + +expjpi = r""" +Convenience function for computing `e^{i \pi x}`. +Evaluation is accurate near zeros (see also :func:`~mpmath.cospi`, +:func:`~mpmath.sinpi`):: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> expjpi(0) + (1.0 + 0.0j) + >>> expjpi(1) + (-1.0 + 0.0j) + >>> expjpi(0.5) + (0.0 + 1.0j) + >>> expjpi(-1) + (-1.0 + 0.0j) + >>> expjpi(j) + (0.04321391826377224977441774 + 0.0j) + >>> expjpi(1+j) + (-0.04321391826377224977441774 + 0.0j) +""" + +floor = r""" +Computes the floor of `x`, `\lfloor x \rfloor`, defined as +the largest integer less than or equal to `x`:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> floor(3.5) + mpf('3.0') + +.. note :: + + :func:`~mpmath.floor`, :func:`~mpmath.ceil` and :func:`~mpmath.nint` return a + floating-point number, not a Python ``int``. If `\lfloor x \rfloor` is + too large to be represented exactly at the present working precision, + the result will be rounded, not necessarily in the direction + implied by the mathematical definition of the function. + +To avoid rounding, use *prec=0*:: + + >>> mp.dps = 15 + >>> print(int(floor(10**30+1))) + 1000000000000000019884624838656 + >>> print(int(floor(10**30+1, prec=0))) + 1000000000000000000000000000001 + +The floor function is defined for complex numbers and +acts on the real and imaginary parts separately:: + + >>> floor(3.25+4.75j) + mpc(real='3.0', imag='4.0') +""" + +ceil = r""" +Computes the ceiling of `x`, `\lceil x \rceil`, defined as +the smallest integer greater than or equal to `x`:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> ceil(3.5) + mpf('4.0') + +The ceiling function is defined for complex numbers and +acts on the real and imaginary parts separately:: + + >>> ceil(3.25+4.75j) + mpc(real='4.0', imag='5.0') + +See notes about rounding for :func:`~mpmath.floor`. +""" + +nint = r""" +Evaluates the nearest integer function, `\mathrm{nint}(x)`. +This gives the nearest integer to `x`; on a tie, it +gives the nearest even integer:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> nint(3.2) + mpf('3.0') + >>> nint(3.8) + mpf('4.0') + >>> nint(3.5) + mpf('4.0') + >>> nint(4.5) + mpf('4.0') + +The nearest integer function is defined for complex numbers and +acts on the real and imaginary parts separately:: + + >>> nint(3.25+4.75j) + mpc(real='3.0', imag='5.0') + +See notes about rounding for :func:`~mpmath.floor`. +""" + +frac = r""" +Gives the fractional part of `x`, defined as +`\mathrm{frac}(x) = x - \lfloor x \rfloor` (see :func:`~mpmath.floor`). +In effect, this computes `x` modulo 1, or `x+n` where +`n \in \mathbb{Z}` is such that `x+n \in [0,1)`:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> frac(1.25) + mpf('0.25') + >>> frac(3) + mpf('0.0') + >>> frac(-1.25) + mpf('0.75') + +For a complex number, the fractional part function applies to +the real and imaginary parts separately:: + + >>> frac(2.25+3.75j) + mpc(real='0.25', imag='0.75') + +Plotted, the fractional part function gives a sawtooth +wave. The Fourier series coefficients have a simple +form:: + + >>> mp.dps = 15 + >>> nprint(fourier(lambda x: frac(x)-0.5, [0,1], 4)) + ([0.0, 0.0, 0.0, 0.0, 0.0], [0.0, -0.31831, -0.159155, -0.106103, -0.0795775]) + >>> nprint([-1/(pi*k) for k in range(1,5)]) + [-0.31831, -0.159155, -0.106103, -0.0795775] + +.. note:: + + The fractional part is sometimes defined as a symmetric + function, i.e. returning `-\mathrm{frac}(-x)` if `x < 0`. + This convention is used, for instance, by Mathematica's + ``FractionalPart``. + +""" + +sign = r""" +Returns the sign of `x`, defined as `\mathrm{sign}(x) = x / |x|` +(with the special case `\mathrm{sign}(0) = 0`):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> sign(10) + mpf('1.0') + >>> sign(-10) + mpf('-1.0') + >>> sign(0) + mpf('0.0') + +Note that the sign function is also defined for complex numbers, +for which it gives the projection onto the unit circle:: + + >>> mp.dps = 15; mp.pretty = True + >>> sign(1+j) + (0.707106781186547 + 0.707106781186547j) + +""" + +arg = r""" +Computes the complex argument (phase) of `x`, defined as the +signed angle between the positive real axis and `x` in the +complex plane:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> arg(3) + 0.0 + >>> arg(3+3j) + 0.785398163397448 + >>> arg(3j) + 1.5707963267949 + >>> arg(-3) + 3.14159265358979 + >>> arg(-3j) + -1.5707963267949 + +The angle is defined to satisfy `-\pi < \arg(x) \le \pi` and +with the sign convention that a nonnegative imaginary part +results in a nonnegative argument. + +The value returned by :func:`~mpmath.arg` is an ``mpf`` instance. +""" + +fabs = r""" +Returns the absolute value of `x`, `|x|`. Unlike :func:`abs`, +:func:`~mpmath.fabs` converts non-mpmath numbers (such as ``int``) +into mpmath numbers:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> fabs(3) + mpf('3.0') + >>> fabs(-3) + mpf('3.0') + >>> fabs(3+4j) + mpf('5.0') +""" + +re = r""" +Returns the real part of `x`, `\Re(x)`. :func:`~mpmath.re` +converts a non-mpmath number to an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> re(3) + mpf('3.0') + >>> re(-1+4j) + mpf('-1.0') +""" + +im = r""" +Returns the imaginary part of `x`, `\Im(x)`. :func:`~mpmath.im` +converts a non-mpmath number to an mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> im(3) + mpf('0.0') + >>> im(-1+4j) + mpf('4.0') +""" + +conj = r""" +Returns the complex conjugate of `x`, `\overline{x}`. Unlike +``x.conjugate()``, :func:`~mpmath.im` converts `x` to a mpmath number:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> conj(3) + mpf('3.0') + >>> conj(-1+4j) + mpc(real='-1.0', imag='-4.0') +""" + +polar = r""" +Returns the polar representation of the complex number `z` +as a pair `(r, \phi)` such that `z = r e^{i \phi}`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> polar(-2) + (2.0, 3.14159265358979) + >>> polar(3-4j) + (5.0, -0.927295218001612) +""" + +rect = r""" +Returns the complex number represented by polar +coordinates `(r, \phi)`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> chop(rect(2, pi)) + -2.0 + >>> rect(sqrt(2), -pi/4) + (1.0 - 1.0j) +""" + +expm1 = r""" +Computes `e^x - 1`, accurately for small `x`. + +Unlike the expression ``exp(x) - 1``, ``expm1(x)`` does not suffer from +potentially catastrophic cancellation:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> exp(1e-10)-1; print(expm1(1e-10)) + 1.00000008274037e-10 + 1.00000000005e-10 + >>> exp(1e-20)-1; print(expm1(1e-20)) + 0.0 + 1.0e-20 + >>> 1/(exp(1e-20)-1) + Traceback (most recent call last): + ... + ZeroDivisionError + >>> 1/expm1(1e-20) + 1.0e+20 + +Evaluation works for extremely tiny values:: + + >>> expm1(0) + 0.0 + >>> expm1('1e-10000000') + 1.0e-10000000 + +""" + +log1p = r""" +Computes `\log(1+x)`, accurately for small `x`. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> log(1+1e-10); print(mp.log1p(1e-10)) + 1.00000008269037e-10 + 9.9999999995e-11 + >>> mp.log1p(1e-100j) + (5.0e-201 + 1.0e-100j) + >>> mp.log1p(0) + 0.0 + +""" + + +powm1 = r""" +Computes `x^y - 1`, accurately when `x^y` is very close to 1. + +This avoids potentially catastrophic cancellation:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> power(0.99999995, 1e-10) - 1 + 0.0 + >>> powm1(0.99999995, 1e-10) + -5.00000012791934e-18 + +Powers exactly equal to 1, and only those powers, yield 0 exactly:: + + >>> powm1(-j, 4) + (0.0 + 0.0j) + >>> powm1(3, 0) + 0.0 + >>> powm1(fadd(-1, 1e-100, exact=True), 4) + -4.0e-100 + +Evaluation works for extremely tiny `y`:: + + >>> powm1(2, '1e-100000') + 6.93147180559945e-100001 + >>> powm1(j, '1e-1000') + (-1.23370055013617e-2000 + 1.5707963267949e-1000j) + +""" + +root = r""" +``root(z, n, k=0)`` computes an `n`-th root of `z`, i.e. returns a number +`r` that (up to possible approximation error) satisfies `r^n = z`. +(``nthroot`` is available as an alias for ``root``.) + +Every complex number `z \ne 0` has `n` distinct `n`-th roots, which are +equidistant points on a circle with radius `|z|^{1/n}`, centered around the +origin. A specific root may be selected using the optional index +`k`. The roots are indexed counterclockwise, starting with `k = 0` for the root +closest to the positive real half-axis. + +The `k = 0` root is the so-called principal `n`-th root, often denoted by +`\sqrt[n]{z}` or `z^{1/n}`, and also given by `\exp(\log(z) / n)`. If `z` is +a positive real number, the principal root is just the unique positive +`n`-th root of `z`. Under some circumstances, non-principal real roots exist: +for positive real `z`, `n` even, there is a negative root given by `k = n/2`; +for negative real `z`, `n` odd, there is a negative root given by `k = (n-1)/2`. + +To obtain all roots with a simple expression, use +``[root(z,n,k) for k in range(n)]``. + +An important special case, ``root(1, n, k)`` returns the `k`-th `n`-th root of +unity, `\zeta_k = e^{2 \pi i k / n}`. Alternatively, :func:`~mpmath.unitroots` +provides a slightly more convenient way to obtain the roots of unity, +including the option to compute only the primitive roots of unity. + +Both `k` and `n` should be integers; `k` outside of ``range(n)`` will be +reduced modulo `n`. If `n` is negative, `x^{-1/n} = 1/x^{1/n}` (or +the equivalent reciprocal for a non-principal root with `k \ne 0`) is computed. + +:func:`~mpmath.root` is implemented to use Newton's method for small +`n`. At high precision, this makes `x^{1/n}` not much more +expensive than the regular exponentiation, `x^n`. For very large +`n`, :func:`~mpmath.nthroot` falls back to use the exponential function. + +**Examples** + +:func:`~mpmath.nthroot`/:func:`~mpmath.root` is faster and more accurate than raising to a +floating-point fraction:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> 16807 ** (mpf(1)/5) + mpf('7.0000000000000009') + >>> root(16807, 5) + mpf('7.0') + >>> nthroot(16807, 5) # Alias + mpf('7.0') + +A high-precision root:: + + >>> mp.dps = 50; mp.pretty = True + >>> nthroot(10, 5) + 1.584893192461113485202101373391507013269442133825 + >>> nthroot(10, 5) ** 5 + 10.0 + +Computing principal and non-principal square and cube roots:: + + >>> mp.dps = 15 + >>> root(10, 2) + 3.16227766016838 + >>> root(10, 2, 1) + -3.16227766016838 + >>> root(-10, 3) + (1.07721734501594 + 1.86579517236206j) + >>> root(-10, 3, 1) + -2.15443469003188 + >>> root(-10, 3, 2) + (1.07721734501594 - 1.86579517236206j) + +All the 7th roots of a complex number:: + + >>> for r in [root(3+4j, 7, k) for k in range(7)]: + ... print("%s %s" % (r, r**7)) + ... + (1.24747270589553 + 0.166227124177353j) (3.0 + 4.0j) + (0.647824911301003 + 1.07895435170559j) (3.0 + 4.0j) + (-0.439648254723098 + 1.17920694574172j) (3.0 + 4.0j) + (-1.19605731775069 + 0.391492658196305j) (3.0 + 4.0j) + (-1.05181082538903 - 0.691023585965793j) (3.0 + 4.0j) + (-0.115529328478668 - 1.25318497558335j) (3.0 + 4.0j) + (0.907748109144957 - 0.871672518271819j) (3.0 + 4.0j) + +Cube roots of unity:: + + >>> for k in range(3): print(root(1, 3, k)) + ... + 1.0 + (-0.5 + 0.866025403784439j) + (-0.5 - 0.866025403784439j) + +Some exact high order roots:: + + >>> root(75**210, 105) + 5625.0 + >>> root(1, 128, 96) + (0.0 - 1.0j) + >>> root(4**128, 128, 96) + (0.0 - 4.0j) + +""" + +unitroots = r""" +``unitroots(n)`` returns `\zeta_0, \zeta_1, \ldots, \zeta_{n-1}`, +all the distinct `n`-th roots of unity, as a list. If the option +*primitive=True* is passed, only the primitive roots are returned. + +Every `n`-th root of unity satisfies `(\zeta_k)^n = 1`. There are `n` distinct +roots for each `n` (`\zeta_k` and `\zeta_j` are the same when +`k = j \pmod n`), which form a regular polygon with vertices on the unit +circle. They are ordered counterclockwise with increasing `k`, starting +with `\zeta_0 = 1`. + +**Examples** + +The roots of unity up to `n = 4`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint(unitroots(1)) + [1.0] + >>> nprint(unitroots(2)) + [1.0, -1.0] + >>> nprint(unitroots(3)) + [1.0, (-0.5 + 0.866025j), (-0.5 - 0.866025j)] + >>> nprint(unitroots(4)) + [1.0, (0.0 + 1.0j), -1.0, (0.0 - 1.0j)] + +Roots of unity form a geometric series that sums to 0:: + + >>> mp.dps = 50 + >>> chop(fsum(unitroots(25))) + 0.0 + +Primitive roots up to `n = 4`:: + + >>> mp.dps = 15 + >>> nprint(unitroots(1, primitive=True)) + [1.0] + >>> nprint(unitroots(2, primitive=True)) + [-1.0] + >>> nprint(unitroots(3, primitive=True)) + [(-0.5 + 0.866025j), (-0.5 - 0.866025j)] + >>> nprint(unitroots(4, primitive=True)) + [(0.0 + 1.0j), (0.0 - 1.0j)] + +There are only four primitive 12th roots:: + + >>> nprint(unitroots(12, primitive=True)) + [(0.866025 + 0.5j), (-0.866025 + 0.5j), (-0.866025 - 0.5j), (0.866025 - 0.5j)] + +The `n`-th roots of unity form a group, the cyclic group of order `n`. +Any primitive root `r` is a generator for this group, meaning that +`r^0, r^1, \ldots, r^{n-1}` gives the whole set of unit roots (in +some permuted order):: + + >>> for r in unitroots(6): print(r) + ... + 1.0 + (0.5 + 0.866025403784439j) + (-0.5 + 0.866025403784439j) + -1.0 + (-0.5 - 0.866025403784439j) + (0.5 - 0.866025403784439j) + >>> r = unitroots(6, primitive=True)[1] + >>> for k in range(6): print(chop(r**k)) + ... + 1.0 + (0.5 - 0.866025403784439j) + (-0.5 - 0.866025403784439j) + -1.0 + (-0.5 + 0.866025403784438j) + (0.5 + 0.866025403784438j) + +The number of primitive roots equals the Euler totient function `\phi(n)`:: + + >>> [len(unitroots(n, primitive=True)) for n in range(1,20)] + [1, 1, 2, 2, 4, 2, 6, 4, 6, 4, 10, 4, 12, 6, 8, 8, 16, 6, 18] + +""" + + +log = r""" +Computes the base-`b` logarithm of `x`, `\log_b(x)`. If `b` is +unspecified, :func:`~mpmath.log` computes the natural (base `e`) logarithm +and is equivalent to :func:`~mpmath.ln`. In general, the base `b` logarithm +is defined in terms of the natural logarithm as +`\log_b(x) = \ln(x)/\ln(b)`. + +By convention, we take `\log(0) = -\infty`. + +The natural logarithm is real if `x > 0` and complex if `x < 0` or if +`x` is complex. The principal branch of the complex logarithm is +used, meaning that `\Im(\ln(x)) = -\pi < \arg(x) \le \pi`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> log(1) + 0.0 + >>> log(2) + 0.693147180559945 + >>> log(1000,10) + 3.0 + >>> log(4, 16) + 0.5 + >>> log(j) + (0.0 + 1.5707963267949j) + >>> log(-1) + (0.0 + 3.14159265358979j) + >>> log(0) + -inf + >>> log(inf) + +inf + +The natural logarithm is the antiderivative of `1/x`:: + + >>> quad(lambda x: 1/x, [1, 5]) + 1.6094379124341 + >>> log(5) + 1.6094379124341 + >>> diff(log, 10) + 0.1 + +The Taylor series expansion of the natural logarithm around +`x = 1` has coefficients `(-1)^{n+1}/n`:: + + >>> nprint(taylor(log, 1, 7)) + [0.0, 1.0, -0.5, 0.333333, -0.25, 0.2, -0.166667, 0.142857] + +:func:`~mpmath.log` supports arbitrary precision evaluation:: + + >>> mp.dps = 50 + >>> log(pi) + 1.1447298858494001741434273513530587116472948129153 + >>> log(pi, pi**3) + 0.33333333333333333333333333333333333333333333333333 + >>> mp.dps = 25 + >>> log(3+4j) + (1.609437912434100374600759 + 0.9272952180016122324285125j) +""" + +log10 = r""" +Computes the base-10 logarithm of `x`, `\log_{10}(x)`. ``log10(x)`` +is equivalent to ``log(x, 10)``. +""" + +fmod = r""" +Converts `x` and `y` to mpmath numbers and returns `x \mod y`. +For mpmath numbers, this is equivalent to ``x % y``. + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> fmod(100, pi) + 2.61062773871641 + +You can use :func:`~mpmath.fmod` to compute fractional parts of numbers:: + + >>> fmod(10.25, 1) + 0.25 + +""" + +radians = r""" +Converts the degree angle `x` to radians:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> radians(60) + 1.0471975511966 +""" + +degrees = r""" +Converts the radian angle `x` to a degree angle:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> degrees(pi/3) + 60.0 +""" + +atan2 = r""" +Computes the two-argument arctangent, `\mathrm{atan2}(y, x)`, +giving the signed angle between the positive `x`-axis and the +point `(x, y)` in the 2D plane. This function is defined for +real `x` and `y` only. + +The two-argument arctangent essentially computes +`\mathrm{atan}(y/x)`, but accounts for the signs of both +`x` and `y` to give the angle for the correct quadrant. The +following examples illustrate the difference:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> atan2(1,1), atan(1/1.) + (0.785398163397448, 0.785398163397448) + >>> atan2(1,-1), atan(1/-1.) + (2.35619449019234, -0.785398163397448) + >>> atan2(-1,1), atan(-1/1.) + (-0.785398163397448, -0.785398163397448) + >>> atan2(-1,-1), atan(-1/-1.) + (-2.35619449019234, 0.785398163397448) + +The angle convention is the same as that used for the complex +argument; see :func:`~mpmath.arg`. +""" + +fibonacci = r""" +``fibonacci(n)`` computes the `n`-th Fibonacci number, `F(n)`. The +Fibonacci numbers are defined by the recurrence `F(n) = F(n-1) + F(n-2)` +with the initial values `F(0) = 0`, `F(1) = 1`. :func:`~mpmath.fibonacci` +extends this definition to arbitrary real and complex arguments +using the formula + +.. math :: + + F(z) = \frac{\phi^z - \cos(\pi z) \phi^{-z}}{\sqrt 5} + +where `\phi` is the golden ratio. :func:`~mpmath.fibonacci` also uses this +continuous formula to compute `F(n)` for extremely large `n`, where +calculating the exact integer would be wasteful. + +For convenience, :func:`~mpmath.fib` is available as an alias for +:func:`~mpmath.fibonacci`. + +**Basic examples** + +Some small Fibonacci numbers are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for i in range(10): + ... print(fibonacci(i)) + ... + 0.0 + 1.0 + 1.0 + 2.0 + 3.0 + 5.0 + 8.0 + 13.0 + 21.0 + 34.0 + >>> fibonacci(50) + 12586269025.0 + +The recurrence for `F(n)` extends backwards to negative `n`:: + + >>> for i in range(10): + ... print(fibonacci(-i)) + ... + 0.0 + 1.0 + -1.0 + 2.0 + -3.0 + 5.0 + -8.0 + 13.0 + -21.0 + 34.0 + +Large Fibonacci numbers will be computed approximately unless +the precision is set high enough:: + + >>> fib(200) + 2.8057117299251e+41 + >>> mp.dps = 45 + >>> fib(200) + 280571172992510140037611932413038677189525.0 + +:func:`~mpmath.fibonacci` can compute approximate Fibonacci numbers +of stupendous size:: + + >>> mp.dps = 15 + >>> fibonacci(10**25) + 3.49052338550226e+2089876402499787337692720 + +**Real and complex arguments** + +The extended Fibonacci function is an analytic function. The +property `F(z) = F(z-1) + F(z-2)` holds for arbitrary `z`:: + + >>> mp.dps = 15 + >>> fib(pi) + 2.1170270579161 + >>> fib(pi-1) + fib(pi-2) + 2.1170270579161 + >>> fib(3+4j) + (-5248.51130728372 - 14195.962288353j) + >>> fib(2+4j) + fib(1+4j) + (-5248.51130728372 - 14195.962288353j) + +The Fibonacci function has infinitely many roots on the +negative half-real axis. The first root is at 0, the second is +close to -0.18, and then there are infinitely many roots that +asymptotically approach `-n+1/2`:: + + >>> findroot(fib, -0.2) + -0.183802359692956 + >>> findroot(fib, -2) + -1.57077646820395 + >>> findroot(fib, -17) + -16.4999999596115 + >>> findroot(fib, -24) + -23.5000000000479 + +**Mathematical relationships** + +For large `n`, `F(n+1)/F(n)` approaches the golden ratio:: + + >>> mp.dps = 50 + >>> fibonacci(101)/fibonacci(100) + 1.6180339887498948482045868343656381177203127439638 + >>> +phi + 1.6180339887498948482045868343656381177203091798058 + +The sum of reciprocal Fibonacci numbers converges to an irrational +number for which no closed form expression is known:: + + >>> mp.dps = 15 + >>> nsum(lambda n: 1/fib(n), [1, inf]) + 3.35988566624318 + +Amazingly, however, the sum of odd-index reciprocal Fibonacci +numbers can be expressed in terms of a Jacobi theta function:: + + >>> nsum(lambda n: 1/fib(2*n+1), [0, inf]) + 1.82451515740692 + >>> sqrt(5)*jtheta(2,0,(3-sqrt(5))/2)**2/4 + 1.82451515740692 + +Some related sums can be done in closed form:: + + >>> nsum(lambda k: 1/(1+fib(2*k+1)), [0, inf]) + 1.11803398874989 + >>> phi - 0.5 + 1.11803398874989 + >>> f = lambda k:(-1)**(k+1) / sum(fib(n)**2 for n in range(1,int(k+1))) + >>> nsum(f, [1, inf]) + 0.618033988749895 + >>> phi-1 + 0.618033988749895 + +**References** + +1. http://mathworld.wolfram.com/FibonacciNumber.html +""" + +altzeta = r""" +Gives the Dirichlet eta function, `\eta(s)`, also known as the +alternating zeta function. This function is defined in analogy +with the Riemann zeta function as providing the sum of the +alternating series + +.. math :: + + \eta(s) = \sum_{k=0}^{\infty} \frac{(-1)^k}{k^s} + = 1-\frac{1}{2^s}+\frac{1}{3^s}-\frac{1}{4^s}+\ldots + +The eta function, unlike the Riemann zeta function, is an entire +function, having a finite value for all complex `s`. The special case +`\eta(1) = \log(2)` gives the value of the alternating harmonic series. + +The alternating zeta function may expressed using the Riemann zeta function +as `\eta(s) = (1 - 2^{1-s}) \zeta(s)`. It can also be expressed +in terms of the Hurwitz zeta function, for example using +:func:`~mpmath.dirichlet` (see documentation for that function). + +**Examples** + +Some special values are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> altzeta(1) + 0.693147180559945 + >>> altzeta(0) + 0.5 + >>> altzeta(-1) + 0.25 + >>> altzeta(-2) + 0.0 + +An example of a sum that can be computed more accurately and +efficiently via :func:`~mpmath.altzeta` than via numerical summation:: + + >>> sum(-(-1)**n / mpf(n)**2.5 for n in range(1, 100)) + 0.867204951503984 + >>> altzeta(2.5) + 0.867199889012184 + +At positive even integers, the Dirichlet eta function +evaluates to a rational multiple of a power of `\pi`:: + + >>> altzeta(2) + 0.822467033424113 + >>> pi**2/12 + 0.822467033424113 + +Like the Riemann zeta function, `\eta(s)`, approaches 1 +as `s` approaches positive infinity, although it does +so from below rather than from above:: + + >>> altzeta(30) + 0.999999999068682 + >>> altzeta(inf) + 1.0 + >>> mp.pretty = False + >>> altzeta(1000, rounding='d') + mpf('0.99999999999999989') + >>> altzeta(1000, rounding='u') + mpf('1.0') + +**References** + +1. http://mathworld.wolfram.com/DirichletEtaFunction.html + +2. http://en.wikipedia.org/wiki/Dirichlet_eta_function +""" + +factorial = r""" +Computes the factorial, `x!`. For integers `n \ge 0`, we have +`n! = 1 \cdot 2 \cdots (n-1) \cdot n` and more generally the factorial +is defined for real or complex `x` by `x! = \Gamma(x+1)`. + +**Examples** + +Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for k in range(6): + ... print("%s %s" % (k, fac(k))) + ... + 0 1.0 + 1 1.0 + 2 2.0 + 3 6.0 + 4 24.0 + 5 120.0 + >>> fac(inf) + +inf + >>> fac(0.5), sqrt(pi)/2 + (0.886226925452758, 0.886226925452758) + +For large positive `x`, `x!` can be approximated by +Stirling's formula:: + + >>> x = 10**10 + >>> fac(x) + 2.32579620567308e+95657055186 + >>> sqrt(2*pi*x)*(x/e)**x + 2.32579597597705e+95657055186 + +:func:`~mpmath.fac` supports evaluation for astronomically large values:: + + >>> fac(10**30) + 6.22311232304258e+29565705518096748172348871081098 + +Reciprocal factorials appear in the Taylor series of the +exponential function (among many other contexts):: + + >>> nsum(lambda k: 1/fac(k), [0, inf]), exp(1) + (2.71828182845905, 2.71828182845905) + >>> nsum(lambda k: pi**k/fac(k), [0, inf]), exp(pi) + (23.1406926327793, 23.1406926327793) + +""" + +gamma = r""" +Computes the gamma function, `\Gamma(x)`. The gamma function is a +shifted version of the ordinary factorial, satisfying +`\Gamma(n) = (n-1)!` for integers `n > 0`. More generally, it +is defined by + +.. math :: + + \Gamma(x) = \int_0^{\infty} t^{x-1} e^{-t}\, dt + +for any real or complex `x` with `\Re(x) > 0` and for `\Re(x) < 0` +by analytic continuation. + +**Examples** + +Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for k in range(1, 6): + ... print("%s %s" % (k, gamma(k))) + ... + 1 1.0 + 2 1.0 + 3 2.0 + 4 6.0 + 5 24.0 + >>> gamma(inf) + +inf + >>> gamma(0) + Traceback (most recent call last): + ... + ValueError: gamma function pole + +The gamma function of a half-integer is a rational multiple of +`\sqrt{\pi}`:: + + >>> gamma(0.5), sqrt(pi) + (1.77245385090552, 1.77245385090552) + >>> gamma(1.5), sqrt(pi)/2 + (0.886226925452758, 0.886226925452758) + +We can check the integral definition:: + + >>> gamma(3.5) + 3.32335097044784 + >>> quad(lambda t: t**2.5*exp(-t), [0,inf]) + 3.32335097044784 + +:func:`~mpmath.gamma` supports arbitrary-precision evaluation and +complex arguments:: + + >>> mp.dps = 50 + >>> gamma(sqrt(3)) + 0.91510229697308632046045539308226554038315280564184 + >>> mp.dps = 25 + >>> gamma(2j) + (0.009902440080927490985955066 - 0.07595200133501806872408048j) + +Arguments can also be large. Note that the gamma function grows +very quickly:: + + >>> mp.dps = 15 + >>> gamma(10**20) + 1.9328495143101e+1956570551809674817225 + +**References** + +* [Spouge]_ + +""" + +psi = r""" +Gives the polygamma function of order `m` of `z`, `\psi^{(m)}(z)`. +Special cases are known as the *digamma function* (`\psi^{(0)}(z)`), +the *trigamma function* (`\psi^{(1)}(z)`), etc. The polygamma +functions are defined as the logarithmic derivatives of the gamma +function: + +.. math :: + + \psi^{(m)}(z) = \left(\frac{d}{dz}\right)^{m+1} \log \Gamma(z) + +In particular, `\psi^{(0)}(z) = \Gamma'(z)/\Gamma(z)`. In the +present implementation of :func:`~mpmath.psi`, the order `m` must be a +nonnegative integer, while the argument `z` may be an arbitrary +complex number (with exception for the polygamma function's poles +at `z = 0, -1, -2, \ldots`). + +**Examples** + +For various rational arguments, the polygamma function reduces to +a combination of standard mathematical constants:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> psi(0, 1), -euler + (-0.5772156649015328606065121, -0.5772156649015328606065121) + >>> psi(1, '1/4'), pi**2+8*catalan + (17.19732915450711073927132, 17.19732915450711073927132) + >>> psi(2, '1/2'), -14*apery + (-16.82879664423431999559633, -16.82879664423431999559633) + +The polygamma functions are derivatives of each other:: + + >>> diff(lambda x: psi(3, x), pi), psi(4, pi) + (-0.1105749312578862734526952, -0.1105749312578862734526952) + >>> quad(lambda x: psi(4, x), [2, 3]), psi(3,3)-psi(3,2) + (-0.375, -0.375) + +The digamma function diverges logarithmically as `z \to \infty`, +while higher orders tend to zero:: + + >>> psi(0,inf), psi(1,inf), psi(2,inf) + (+inf, 0.0, 0.0) + +Evaluation for a complex argument:: + + >>> psi(2, -1-2j) + (0.03902435405364952654838445 + 0.1574325240413029954685366j) + +Evaluation is supported for large orders `m` and/or large +arguments `z`:: + + >>> psi(3, 10**100) + 2.0e-300 + >>> psi(250, 10**30+10**20*j) + (-1.293142504363642687204865e-7010 + 3.232856260909107391513108e-7018j) + +**Application to infinite series** + +Any infinite series where the summand is a rational function of +the index `k` can be evaluated in closed form in terms of polygamma +functions of the roots and poles of the summand:: + + >>> a = sqrt(2) + >>> b = sqrt(3) + >>> nsum(lambda k: 1/((k+a)**2*(k+b)), [0, inf]) + 0.4049668927517857061917531 + >>> (psi(0,a)-psi(0,b)-a*psi(1,a)+b*psi(1,a))/(a-b)**2 + 0.4049668927517857061917531 + +This follows from the series representation (`m > 0`) + +.. math :: + + \psi^{(m)}(z) = (-1)^{m+1} m! \sum_{k=0}^{\infty} + \frac{1}{(z+k)^{m+1}}. + +Since the roots of a polynomial may be complex, it is sometimes +necessary to use the complex polygamma function to evaluate +an entirely real-valued sum:: + + >>> nsum(lambda k: 1/(k**2-2*k+3), [0, inf]) + 1.694361433907061256154665 + >>> nprint(polyroots([1,-2,3])) + [(1.0 - 1.41421j), (1.0 + 1.41421j)] + >>> r1 = 1-sqrt(2)*j + >>> r2 = r1.conjugate() + >>> (psi(0,-r2)-psi(0,-r1))/(r1-r2) + (1.694361433907061256154665 + 0.0j) + +""" + +digamma = r""" +Shortcut for ``psi(0,z)``. +""" + +harmonic = r""" +If `n` is an integer, ``harmonic(n)`` gives a floating-point +approximation of the `n`-th harmonic number `H(n)`, defined as + +.. math :: + + H(n) = 1 + \frac{1}{2} + \frac{1}{3} + \ldots + \frac{1}{n} + +The first few harmonic numbers are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(8): + ... print("%s %s" % (n, harmonic(n))) + ... + 0 0.0 + 1 1.0 + 2 1.5 + 3 1.83333333333333 + 4 2.08333333333333 + 5 2.28333333333333 + 6 2.45 + 7 2.59285714285714 + +The infinite harmonic series `1 + 1/2 + 1/3 + \ldots` diverges:: + + >>> harmonic(inf) + +inf + +:func:`~mpmath.harmonic` is evaluated using the digamma function rather +than by summing the harmonic series term by term. It can therefore +be computed quickly for arbitrarily large `n`, and even for +nonintegral arguments:: + + >>> harmonic(10**100) + 230.835724964306 + >>> harmonic(0.5) + 0.613705638880109 + >>> harmonic(3+4j) + (2.24757548223494 + 0.850502209186044j) + +:func:`~mpmath.harmonic` supports arbitrary precision evaluation:: + + >>> mp.dps = 50 + >>> harmonic(11) + 3.0198773448773448773448773448773448773448773448773 + >>> harmonic(pi) + 1.8727388590273302654363491032336134987519132374152 + +The harmonic series diverges, but at a glacial pace. It is possible +to calculate the exact number of terms required before the sum +exceeds a given amount, say 100:: + + >>> mp.dps = 50 + >>> v = 10**findroot(lambda x: harmonic(10**x) - 100, 10) + >>> v + 15092688622113788323693563264538101449859496.864101 + >>> v = int(ceil(v)) + >>> print(v) + 15092688622113788323693563264538101449859497 + >>> harmonic(v-1) + 99.999999999999999999999999999999999999999999942747 + >>> harmonic(v) + 100.000000000000000000000000000000000000000000009 + +""" + +bernoulli = r""" +Computes the nth Bernoulli number, `B_n`, for any integer `n \ge 0`. + +The Bernoulli numbers are rational numbers, but this function +returns a floating-point approximation. To obtain an exact +fraction, use :func:`~mpmath.bernfrac` instead. + +**Examples** + +Numerical values of the first few Bernoulli numbers:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(15): + ... print("%s %s" % (n, bernoulli(n))) + ... + 0 1.0 + 1 -0.5 + 2 0.166666666666667 + 3 0.0 + 4 -0.0333333333333333 + 5 0.0 + 6 0.0238095238095238 + 7 0.0 + 8 -0.0333333333333333 + 9 0.0 + 10 0.0757575757575758 + 11 0.0 + 12 -0.253113553113553 + 13 0.0 + 14 1.16666666666667 + +Bernoulli numbers can be approximated with arbitrary precision:: + + >>> mp.dps = 50 + >>> bernoulli(100) + -2.8382249570693706959264156336481764738284680928013e+78 + +Arbitrarily large `n` are supported:: + + >>> mp.dps = 15 + >>> bernoulli(10**20 + 2) + 3.09136296657021e+1876752564973863312327 + +The Bernoulli numbers are related to the Riemann zeta function +at integer arguments:: + + >>> -bernoulli(8) * (2*pi)**8 / (2*fac(8)) + 1.00407735619794 + >>> zeta(8) + 1.00407735619794 + +**Algorithm** + +For small `n` (`n < 3000`) :func:`~mpmath.bernoulli` uses a recurrence +formula due to Ramanujan. All results in this range are cached, +so sequential computation of small Bernoulli numbers is +guaranteed to be fast. + +For larger `n`, `B_n` is evaluated in terms of the Riemann zeta +function. +""" + +stieltjes = r""" +For a nonnegative integer `n`, ``stieltjes(n)`` computes the +`n`-th Stieltjes constant `\gamma_n`, defined as the +`n`-th coefficient in the Laurent series expansion of the +Riemann zeta function around the pole at `s = 1`. That is, +we have: + +.. math :: + + \zeta(s) = \frac{1}{s-1} \sum_{n=0}^{\infty} + \frac{(-1)^n}{n!} \gamma_n (s-1)^n + +More generally, ``stieltjes(n, a)`` gives the corresponding +coefficient `\gamma_n(a)` for the Hurwitz zeta function +`\zeta(s,a)` (with `\gamma_n = \gamma_n(1)`). + +**Examples** + +The zeroth Stieltjes constant is just Euler's constant `\gamma`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> stieltjes(0) + 0.577215664901533 + +Some more values are:: + + >>> stieltjes(1) + -0.0728158454836767 + >>> stieltjes(10) + 0.000205332814909065 + >>> stieltjes(30) + 0.00355772885557316 + >>> stieltjes(1000) + -1.57095384420474e+486 + >>> stieltjes(2000) + 2.680424678918e+1109 + >>> stieltjes(1, 2.5) + -0.23747539175716 + +An alternative way to compute `\gamma_1`:: + + >>> diff(extradps(15)(lambda x: 1/(x-1) - zeta(x)), 1) + -0.0728158454836767 + +:func:`~mpmath.stieltjes` supports arbitrary precision evaluation:: + + >>> mp.dps = 50 + >>> stieltjes(2) + -0.0096903631928723184845303860352125293590658061013408 + +**Algorithm** + +:func:`~mpmath.stieltjes` numerically evaluates the integral in +the following representation due to Ainsworth, Howell and +Coffey [1], [2]: + +.. math :: + + \gamma_n(a) = \frac{\log^n a}{2a} - \frac{\log^{n+1}(a)}{n+1} + + \frac{2}{a} \Re \int_0^{\infty} + \frac{(x/a-i)\log^n(a-ix)}{(1+x^2/a^2)(e^{2\pi x}-1)} dx. + +For some reference values with `a = 1`, see e.g. [4]. + +**References** + +1. O. R. Ainsworth & L. W. Howell, "An integral representation of + the generalized Euler-Mascheroni constants", NASA Technical + Paper 2456 (1985), + http://ntrs.nasa.gov/archive/nasa/casi.ntrs.nasa.gov/19850014994_1985014994.pdf + +2. M. W. Coffey, "The Stieltjes constants, their relation to the + `\eta_j` coefficients, and representation of the Hurwitz + zeta function", arXiv:0706.0343v1 http://arxiv.org/abs/0706.0343 + +3. http://mathworld.wolfram.com/StieltjesConstants.html + +4. http://pi.lacim.uqam.ca/piDATA/stieltjesgamma.txt + +""" + +gammaprod = r""" +Given iterables `a` and `b`, ``gammaprod(a, b)`` computes the +product / quotient of gamma functions: + +.. math :: + + \frac{\Gamma(a_0) \Gamma(a_1) \cdots \Gamma(a_p)} + {\Gamma(b_0) \Gamma(b_1) \cdots \Gamma(b_q)} + +Unlike direct calls to :func:`~mpmath.gamma`, :func:`~mpmath.gammaprod` considers +the entire product as a limit and evaluates this limit properly if +any of the numerator or denominator arguments are nonpositive +integers such that poles of the gamma function are encountered. +That is, :func:`~mpmath.gammaprod` evaluates + +.. math :: + + \lim_{\epsilon \to 0} + \frac{\Gamma(a_0+\epsilon) \Gamma(a_1+\epsilon) \cdots + \Gamma(a_p+\epsilon)} + {\Gamma(b_0+\epsilon) \Gamma(b_1+\epsilon) \cdots + \Gamma(b_q+\epsilon)} + +In particular: + +* If there are equally many poles in the numerator and the + denominator, the limit is a rational number times the remaining, + regular part of the product. + +* If there are more poles in the numerator, :func:`~mpmath.gammaprod` + returns ``+inf``. + +* If there are more poles in the denominator, :func:`~mpmath.gammaprod` + returns 0. + +**Examples** + +The reciprocal gamma function `1/\Gamma(x)` evaluated at `x = 0`:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> gammaprod([], [0]) + 0.0 + +A limit:: + + >>> gammaprod([-4], [-3]) + -0.25 + >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=1) + -0.25 + >>> limit(lambda x: gamma(x-1)/gamma(x), -3, direction=-1) + -0.25 + +""" + +beta = r""" +Computes the beta function, +`B(x,y) = \Gamma(x) \Gamma(y) / \Gamma(x+y)`. +The beta function is also commonly defined by the integral +representation + +.. math :: + + B(x,y) = \int_0^1 t^{x-1} (1-t)^{y-1} \, dt + +**Examples** + +For integer and half-integer arguments where all three gamma +functions are finite, the beta function becomes either rational +number or a rational multiple of `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> beta(5, 2) + 0.0333333333333333 + >>> beta(1.5, 2) + 0.266666666666667 + >>> 16*beta(2.5, 1.5) + 3.14159265358979 + +Where appropriate, :func:`~mpmath.beta` evaluates limits. A pole +of the beta function is taken to result in ``+inf``:: + + >>> beta(-0.5, 0.5) + 0.0 + >>> beta(-3, 3) + -0.333333333333333 + >>> beta(-2, 3) + +inf + >>> beta(inf, 1) + 0.0 + >>> beta(inf, 0) + nan + +:func:`~mpmath.beta` supports complex numbers and arbitrary precision +evaluation:: + + >>> beta(1, 2+j) + (0.4 - 0.2j) + >>> mp.dps = 25 + >>> beta(j,0.5) + (1.079424249270925780135675 - 1.410032405664160838288752j) + >>> mp.dps = 50 + >>> beta(pi, e) + 0.037890298781212201348153837138927165984170287886464 + +Various integrals can be computed by means of the +beta function:: + + >>> mp.dps = 15 + >>> quad(lambda t: t**2.5*(1-t)**2, [0, 1]) + 0.0230880230880231 + >>> beta(3.5, 3) + 0.0230880230880231 + >>> quad(lambda t: sin(t)**4 * sqrt(cos(t)), [0, pi/2]) + 0.319504062596158 + >>> beta(2.5, 0.75)/2 + 0.319504062596158 + +""" + +betainc = r""" +``betainc(a, b, x1=0, x2=1, regularized=False)`` gives the generalized +incomplete beta function, + +.. math :: + + I_{x_1}^{x_2}(a,b) = \int_{x_1}^{x_2} t^{a-1} (1-t)^{b-1} dt. + +When `x_1 = 0, x_2 = 1`, this reduces to the ordinary (complete) +beta function `B(a,b)`; see :func:`~mpmath.beta`. + +With the keyword argument ``regularized=True``, :func:`~mpmath.betainc` +computes the regularized incomplete beta function +`I_{x_1}^{x_2}(a,b) / B(a,b)`. This is the cumulative distribution of the +beta distribution with parameters `a`, `b`. + +.. note : + + Implementations of the incomplete beta function in some other + software uses a different argument order. For example, Mathematica uses the + reversed argument order ``Beta[x1,x2,a,b]``. For the equivalent of SciPy's + three-argument incomplete beta integral (implicitly with `x1 = 0`), use + ``betainc(a,b,0,x2,regularized=True)``. + +**Examples** + +Verifying that :func:`~mpmath.betainc` computes the integral in the +definition:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> x,y,a,b = 3, 4, 0, 6 + >>> betainc(x, y, a, b) + -4010.4 + >>> quad(lambda t: t**(x-1) * (1-t)**(y-1), [a, b]) + -4010.4 + +The arguments may be arbitrary complex numbers:: + + >>> betainc(0.75, 1-4j, 0, 2+3j) + (0.2241657956955709603655887 + 0.3619619242700451992411724j) + +With regularization:: + + >>> betainc(1, 2, 0, 0.25, regularized=True) + 0.4375 + >>> betainc(pi, e, 0, 1, regularized=True) # Complete + 1.0 + +The beta integral satisfies some simple argument transformation +symmetries:: + + >>> mp.dps = 15 + >>> betainc(2,3,4,5), -betainc(2,3,5,4), betainc(3,2,1-5,1-4) + (56.0833333333333, 56.0833333333333, 56.0833333333333) + +The beta integral can often be evaluated analytically. For integer and +rational arguments, the incomplete beta function typically reduces to a +simple algebraic-logarithmic expression:: + + >>> mp.dps = 25 + >>> identify(chop(betainc(0, 0, 3, 4))) + '-(log((9/8)))' + >>> identify(betainc(2, 3, 4, 5)) + '(673/12)' + >>> identify(betainc(1.5, 1, 1, 2)) + '((-12+sqrt(1152))/18)' + +""" + +binomial = r""" +Computes the binomial coefficient + +.. math :: + + {n \choose k} = \frac{n!}{k!(n-k)!}. + +The binomial coefficient gives the number of ways that `k` items +can be chosen from a set of `n` items. More generally, the binomial +coefficient is a well-defined function of arbitrary real or +complex `n` and `k`, via the gamma function. + +**Examples** + +Generate Pascal's triangle:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint([binomial(n,k) for k in range(n+1)]) + ... + [1.0] + [1.0, 1.0] + [1.0, 2.0, 1.0] + [1.0, 3.0, 3.0, 1.0] + [1.0, 4.0, 6.0, 4.0, 1.0] + +There is 1 way to select 0 items from the empty set, and 0 ways to +select 1 item from the empty set:: + + >>> binomial(0, 0) + 1.0 + >>> binomial(0, 1) + 0.0 + +:func:`~mpmath.binomial` supports large arguments:: + + >>> binomial(10**20, 10**20-5) + 8.33333333333333e+97 + >>> binomial(10**20, 10**10) + 2.60784095465201e+104342944813 + +Nonintegral binomial coefficients find use in series +expansions:: + + >>> nprint(taylor(lambda x: (1+x)**0.25, 0, 4)) + [1.0, 0.25, -0.09375, 0.0546875, -0.0375977] + >>> nprint([binomial(0.25, k) for k in range(5)]) + [1.0, 0.25, -0.09375, 0.0546875, -0.0375977] + +An integral representation:: + + >>> n, k = 5, 3 + >>> f = lambda t: exp(-j*k*t)*(1+exp(j*t))**n + >>> chop(quad(f, [-pi,pi])/(2*pi)) + 10.0 + >>> binomial(n,k) + 10.0 + +""" + +rf = r""" +Computes the rising factorial or Pochhammer symbol, + +.. math :: + + x^{(n)} = x (x+1) \cdots (x+n-1) = \frac{\Gamma(x+n)}{\Gamma(x)} + +where the rightmost expression is valid for nonintegral `n`. + +**Examples** + +For integral `n`, the rising factorial is a polynomial:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(taylor(lambda x: rf(x,n), 0, n)) + ... + [1.0] + [0.0, 1.0] + [0.0, 1.0, 1.0] + [0.0, 2.0, 3.0, 1.0] + [0.0, 6.0, 11.0, 6.0, 1.0] + +Evaluation is supported for arbitrary arguments:: + + >>> rf(2+3j, 5.5) + (-7202.03920483347 - 3777.58810701527j) +""" + +ff = r""" +Computes the falling factorial, + +.. math :: + + (x)_n = x (x-1) \cdots (x-n+1) = \frac{\Gamma(x+1)}{\Gamma(x-n+1)} + +where the rightmost expression is valid for nonintegral `n`. + +**Examples** + +For integral `n`, the falling factorial is a polynomial:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(taylor(lambda x: ff(x,n), 0, n)) + ... + [1.0] + [0.0, 1.0] + [0.0, -1.0, 1.0] + [0.0, 2.0, -3.0, 1.0] + [0.0, -6.0, 11.0, -6.0, 1.0] + +Evaluation is supported for arbitrary arguments:: + + >>> ff(2+3j, 5.5) + (-720.41085888203 + 316.101124983878j) +""" + +fac2 = r""" +Computes the double factorial `x!!`, defined for integers +`x > 0` by + +.. math :: + + x!! = \begin{cases} + 1 \cdot 3 \cdots (x-2) \cdot x & x \;\mathrm{odd} \\ + 2 \cdot 4 \cdots (x-2) \cdot x & x \;\mathrm{even} + \end{cases} + +and more generally by [1] + +.. math :: + + x!! = 2^{x/2} \left(\frac{\pi}{2}\right)^{(\cos(\pi x)-1)/4} + \Gamma\left(\frac{x}{2}+1\right). + +**Examples** + +The integer sequence of double factorials begins:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint([fac2(n) for n in range(10)]) + [1.0, 1.0, 2.0, 3.0, 8.0, 15.0, 48.0, 105.0, 384.0, 945.0] + +For large `x`, double factorials follow a Stirling-like asymptotic +approximation:: + + >>> x = mpf(10000) + >>> fac2(x) + 5.97272691416282e+17830 + >>> sqrt(pi)*x**((x+1)/2)*exp(-x/2) + 5.97262736954392e+17830 + +The recurrence formula `x!! = x (x-2)!!` can be reversed to +define the double factorial of negative odd integers (but +not negative even integers):: + + >>> fac2(-1), fac2(-3), fac2(-5), fac2(-7) + (1.0, -1.0, 0.333333333333333, -0.0666666666666667) + >>> fac2(-2) + Traceback (most recent call last): + ... + ValueError: gamma function pole + +With the exception of the poles at negative even integers, +:func:`~mpmath.fac2` supports evaluation for arbitrary complex arguments. +The recurrence formula is valid generally:: + + >>> fac2(pi+2j) + (-1.3697207890154e-12 + 3.93665300979176e-12j) + >>> (pi+2j)*fac2(pi-2+2j) + (-1.3697207890154e-12 + 3.93665300979176e-12j) + +Double factorials should not be confused with nested factorials, +which are immensely larger:: + + >>> fac(fac(20)) + 5.13805976125208e+43675043585825292774 + >>> fac2(20) + 3715891200.0 + +Double factorials appear, among other things, in series expansions +of Gaussian functions and the error function. Infinite series +include:: + + >>> nsum(lambda k: 1/fac2(k), [0, inf]) + 3.05940740534258 + >>> sqrt(e)*(1+sqrt(pi/2)*erf(sqrt(2)/2)) + 3.05940740534258 + >>> nsum(lambda k: 2**k/fac2(2*k-1), [1, inf]) + 4.06015693855741 + >>> e * erf(1) * sqrt(pi) + 4.06015693855741 + +A beautiful Ramanujan sum:: + + >>> nsum(lambda k: (-1)**k*(fac2(2*k-1)/fac2(2*k))**3, [0,inf]) + 0.90917279454693 + >>> (gamma('9/8')/gamma('5/4')/gamma('7/8'))**2 + 0.90917279454693 + +**References** + +1. http://functions.wolfram.com/GammaBetaErf/Factorial2/27/01/0002/ + +2. http://mathworld.wolfram.com/DoubleFactorial.html + +""" + +hyper = r""" +Evaluates the generalized hypergeometric function + +.. math :: + + \,_pF_q(a_1,\ldots,a_p; b_1,\ldots,b_q; z) = + \sum_{n=0}^\infty \frac{(a_1)_n (a_2)_n \ldots (a_p)_n} + {(b_1)_n(b_2)_n\ldots(b_q)_n} \frac{z^n}{n!} + +where `(x)_n` denotes the rising factorial (see :func:`~mpmath.rf`). + +The parameters lists ``a_s`` and ``b_s`` may contain integers, +real numbers, complex numbers, as well as exact fractions given in +the form of tuples `(p, q)`. :func:`~mpmath.hyper` is optimized to handle +integers and fractions more efficiently than arbitrary +floating-point parameters (since rational parameters are by +far the most common). + +**Examples** + +Verifying that :func:`~mpmath.hyper` gives the sum in the definition, by +comparison with :func:`~mpmath.nsum`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a,b,c,d = 2,3,4,5 + >>> x = 0.25 + >>> hyper([a,b],[c,d],x) + 1.078903941164934876086237 + >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)*x**n/fac(n) + >>> nsum(fn, [0, inf]) + 1.078903941164934876086237 + +The parameters can be any combination of integers, fractions, +floats and complex numbers:: + + >>> a, b, c, d, e = 1, (-1,2), pi, 3+4j, (2,3) + >>> x = 0.2j + >>> hyper([a,b],[c,d,e],x) + (0.9923571616434024810831887 - 0.005753848733883879742993122j) + >>> b, e = -0.5, mpf(2)/3 + >>> fn = lambda n: rf(a,n)*rf(b,n)/rf(c,n)/rf(d,n)/rf(e,n)*x**n/fac(n) + >>> nsum(fn, [0, inf]) + (0.9923571616434024810831887 - 0.005753848733883879742993122j) + +The `\,_0F_0` and `\,_1F_0` series are just elementary functions:: + + >>> a, z = sqrt(2), +pi + >>> hyper([],[],z) + 23.14069263277926900572909 + >>> exp(z) + 23.14069263277926900572909 + >>> hyper([a],[],z) + (-0.09069132879922920160334114 + 0.3283224323946162083579656j) + >>> (1-z)**(-a) + (-0.09069132879922920160334114 + 0.3283224323946162083579656j) + +If any `a_k` coefficient is a nonpositive integer, the series terminates +into a finite polynomial:: + + >>> hyper([1,1,1,-3],[2,5],1) + 0.7904761904761904761904762 + >>> identify(_) + '(83/105)' + +If any `b_k` is a nonpositive integer, the function is undefined (unless the +series terminates before the division by zero occurs):: + + >>> hyper([1,1,1,-3],[-2,5],1) + Traceback (most recent call last): + ... + ZeroDivisionError: pole in hypergeometric series + >>> hyper([1,1,1,-1],[-2,5],1) + 1.1 + +Except for polynomial cases, the radius of convergence `R` of the hypergeometric +series is either `R = \infty` (if `p \le q`), `R = 1` (if `p = q+1`), or +`R = 0` (if `p > q+1`). + +The analytic continuations of the functions with `p = q+1`, i.e. `\,_2F_1`, +`\,_3F_2`, `\,_4F_3`, etc, are all implemented and therefore these functions +can be evaluated for `|z| \ge 1`. The shortcuts :func:`~mpmath.hyp2f1`, :func:`~mpmath.hyp3f2` +are available to handle the most common cases (see their documentation), +but functions of higher degree are also supported via :func:`~mpmath.hyper`:: + + >>> hyper([1,2,3,4], [5,6,7], 1) # 4F3 at finite-valued branch point + 1.141783505526870731311423 + >>> hyper([4,5,6,7], [1,2,3], 1) # 4F3 at pole + +inf + >>> hyper([1,2,3,4,5], [6,7,8,9], 10) # 5F4 + (1.543998916527972259717257 - 0.5876309929580408028816365j) + >>> hyper([1,2,3,4,5,6], [7,8,9,10,11], 1j) # 6F5 + (0.9996565821853579063502466 + 0.0129721075905630604445669j) + +Near `z = 1` with noninteger parameters:: + + >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','41/8'], 1) + 2.219433352235586121250027 + >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], 1) + +inf + >>> eps1 = extradps(6)(lambda: 1 - mpf('1e-6'))() + >>> hyper(['1/3',1,'3/2',2], ['1/5','11/6','5/4'], eps1) + 2923978034.412973409330956 + +Please note that, as currently implemented, evaluation of `\,_pF_{p-1}` +with `p \ge 3` may be slow or inaccurate when `|z-1|` is small, +for some parameter values. + +Evaluation may be aborted if convergence appears to be too slow. +The optional ``maxterms`` (limiting the number of series terms) and ``maxprec`` +(limiting the internal precision) keyword arguments can be used +to control evaluation:: + + >>> hyper([1,2,3], [4,5,6], 10000) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. + >>> hyper([1,2,3], [4,5,6], 10000, maxterms=10**6) + 7.622806053177969474396918e+4310 + +Additional options include ``force_series`` (which forces direct use of +a hypergeometric series even if another evaluation method might work better) +and ``asymp_tol`` which controls the target tolerance for using +asymptotic series. + +When `p > q+1`, ``hyper`` computes the (iterated) Borel sum of the divergent +series. For `\,_2F_0` the Borel sum has an analytic solution and can be +computed efficiently (see :func:`~mpmath.hyp2f0`). For higher degrees, the functions +is evaluated first by attempting to sum it directly as an asymptotic +series (this only works for tiny `|z|`), and then by evaluating the Borel +regularized sum using numerical integration. Except for +special parameter combinations, this can be extremely slow. + + >>> hyper([1,1], [], 0.5) # regularization of 2F0 + (1.340965419580146562086448 + 0.8503366631752726568782447j) + >>> hyper([1,1,1,1], [1], 0.5) # regularization of 4F1 + (1.108287213689475145830699 + 0.5327107430640678181200491j) + +With the following magnitude of argument, the asymptotic series for `\,_3F_1` +gives only a few digits. Using Borel summation, ``hyper`` can produce +a value with full accuracy:: + + >>> mp.dps = 15 + >>> hyper([2,0.5,4], [5.25], '0.08', force_series=True) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. + >>> hyper([2,0.5,4], [5.25], '0.08', asymp_tol=1e-4) + 1.0725535790737 + >>> hyper([2,0.5,4], [5.25], '0.08') + (1.07269542893559 + 5.54668863216891e-5j) + >>> hyper([2,0.5,4], [5.25], '-0.08', asymp_tol=1e-4) + 0.946344925484879 + >>> hyper([2,0.5,4], [5.25], '-0.08') + 0.946312503737771 + >>> mp.dps = 25 + >>> hyper([2,0.5,4], [5.25], '-0.08') + 0.9463125037377662296700858 + +Note that with the positive `z` value, there is a complex part in the +correct result, which falls below the tolerance of the asymptotic series. + +By default, a parameter that appears in both ``a_s`` and ``b_s`` will be removed +unless it is a nonpositive integer. This generally speeds up evaluation +by producing a hypergeometric function of lower order. +This optimization can be disabled by passing ``eliminate=False``. + + >>> hyper([1,2,3], [4,5,3], 10000) + 1.268943190440206905892212e+4321 + >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False) # doctest: +IGNORE_EXCEPTION_DETAIL + Traceback (most recent call last): + ... + NoConvergence: Hypergeometric series converges too slowly. Try increasing maxterms. + >>> hyper([1,2,3], [4,5,3], 10000, eliminate=False, maxterms=10**6) + 1.268943190440206905892212e+4321 + +If a nonpositive integer `-n` appears in both ``a_s`` and ``b_s``, this parameter +cannot be unambiguously removed since it creates a term 0 / 0. +In this case the hypergeometric series is understood to terminate before +the division by zero occurs. This convention is consistent with Mathematica. +An alternative convention of eliminating the parameters can be toggled +with ``eliminate_all=True``: + + >>> hyper([2,-1], [-1], 3) + 7.0 + >>> hyper([2,-1], [-1], 3, eliminate_all=True) + 0.25 + >>> hyper([2], [], 3) + 0.25 + +""" + +hypercomb = r""" +Computes a weighted combination of hypergeometric functions + +.. math :: + + \sum_{r=1}^N \left[ \prod_{k=1}^{l_r} {w_{r,k}}^{c_{r,k}} + \frac{\prod_{k=1}^{m_r} \Gamma(\alpha_{r,k})}{\prod_{k=1}^{n_r} + \Gamma(\beta_{r,k})} + \,_{p_r}F_{q_r}(a_{r,1},\ldots,a_{r,p}; b_{r,1}, + \ldots, b_{r,q}; z_r)\right]. + +Typically the parameters are linear combinations of a small set of base +parameters; :func:`~mpmath.hypercomb` permits computing a correct value in +the case that some of the `\alpha`, `\beta`, `b` turn out to be +nonpositive integers, or if division by zero occurs for some `w^c`, +assuming that there are opposing singularities that cancel out. +The limit is computed by evaluating the function with the base +parameters perturbed, at a higher working precision. + +The first argument should be a function that takes the perturbable +base parameters ``params`` as input and returns `N` tuples +``(w, c, alpha, beta, a, b, z)``, where the coefficients ``w``, ``c``, +gamma factors ``alpha``, ``beta``, and hypergeometric coefficients +``a``, ``b`` each should be lists of numbers, and ``z`` should be a single +number. + +**Examples** + +The following evaluates + +.. math :: + + (a-1) \frac{\Gamma(a-3)}{\Gamma(a-4)} \,_1F_1(a,a-1,z) = e^z(a-4)(a+z-1) + +with `a=1, z=3`. There is a zero factor, two gamma function poles, and +the 1F1 function is singular; all singularities cancel out to give a finite +value:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> hypercomb(lambda a: [([a-1],[1],[a-3],[a-4],[a],[a-1],3)], [1]) + -180.769832308689 + >>> -9*exp(3) + -180.769832308689 + +""" + +hyp0f1 = r""" +Gives the hypergeometric function `\,_0F_1`, sometimes known as the +confluent limit function, defined as + +.. math :: + + \,_0F_1(a,z) = \sum_{k=0}^{\infty} \frac{1}{(a)_k} \frac{z^k}{k!}. + +This function satisfies the differential equation `z f''(z) + a f'(z) = f(z)`, +and is related to the Bessel function of the first kind (see :func:`~mpmath.besselj`). + +``hyp0f1(a,z)`` is equivalent to ``hyper([],[a],z)``; see documentation for +:func:`~mpmath.hyper` for more information. + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp0f1(2, 0.25) + 1.130318207984970054415392 + >>> hyp0f1((1,2), 1234567) + 6.27287187546220705604627e+964 + >>> hyp0f1(3+4j, 1000000j) + (3.905169561300910030267132e+606 + 3.807708544441684513934213e+606j) + +Evaluation is supported for arbitrarily large values of `z`, +using asymptotic expansions:: + + >>> hyp0f1(1, 10**50) + 2.131705322874965310390701e+8685889638065036553022565 + >>> hyp0f1(1, -10**50) + 1.115945364792025420300208e-13 + +Verifying the differential equation:: + + >>> a = 2.5 + >>> f = lambda z: hyp0f1(a,z) + >>> for z in [0, 10, 3+4j]: + ... chop(z*diff(f,z,2) + a*diff(f,z) - f(z)) + ... + 0.0 + 0.0 + 0.0 + +""" + +hyp1f1 = r""" +Gives the confluent hypergeometric function of the first kind, + +.. math :: + + \,_1F_1(a,b,z) = \sum_{k=0}^{\infty} \frac{(a)_k}{(b)_k} \frac{z^k}{k!}, + +also known as Kummer's function and sometimes denoted by `M(a,b,z)`. This +function gives one solution to the confluent (Kummer's) differential equation + +.. math :: + + z f''(z) + (b-z) f'(z) - af(z) = 0. + +A second solution is given by the `U` function; see :func:`~mpmath.hyperu`. +Solutions are also given in an alternate form by the Whittaker +functions (:func:`~mpmath.whitm`, :func:`~mpmath.whitw`). + +``hyp1f1(a,b,z)`` is equivalent +to ``hyper([a],[b],z)``; see documentation for :func:`~mpmath.hyper` for more +information. + +**Examples** + +Evaluation for real and complex values of the argument `z`, with +fixed parameters `a = 2, b = -1/3`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp1f1(2, (-1,3), 3.25) + -2815.956856924817275640248 + >>> hyp1f1(2, (-1,3), -3.25) + -1.145036502407444445553107 + >>> hyp1f1(2, (-1,3), 1000) + -8.021799872770764149793693e+441 + >>> hyp1f1(2, (-1,3), -1000) + 0.000003131987633006813594535331 + >>> hyp1f1(2, (-1,3), 100+100j) + (-3.189190365227034385898282e+48 - 1.106169926814270418999315e+49j) + +Parameters may be complex:: + + >>> hyp1f1(2+3j, -1+j, 10j) + (261.8977905181045142673351 + 160.8930312845682213562172j) + +Arbitrarily large values of `z` are supported:: + + >>> hyp1f1(3, 4, 10**20) + 3.890569218254486878220752e+43429448190325182745 + >>> hyp1f1(3, 4, -10**20) + 6.0e-60 + >>> hyp1f1(3, 4, 10**20*j) + (-1.935753855797342532571597e-20 - 2.291911213325184901239155e-20j) + +Verifying the differential equation:: + + >>> a, b = 1.5, 2 + >>> f = lambda z: hyp1f1(a,b,z) + >>> for z in [0, -10, 3, 3+4j]: + ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +An integral representation:: + + >>> a, b = 1.5, 3 + >>> z = 1.5 + >>> hyp1f1(a,b,z) + 2.269381460919952778587441 + >>> g = lambda t: exp(z*t)*t**(a-1)*(1-t)**(b-a-1) + >>> gammaprod([b],[a,b-a])*quad(g, [0,1]) + 2.269381460919952778587441 + + +""" + +hyp1f2 = r""" +Gives the hypergeometric function `\,_1F_2(a_1,a_2;b_1,b_2; z)`. +The call ``hyp1f2(a1,b1,b2,z)`` is equivalent to +``hyper([a1],[b1,b2],z)``. + +Evaluation works for complex and arbitrarily large arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a, b, c = 1.5, (-1,3), 2.25 + >>> hyp1f2(a, b, c, 10**20) + -1.159388148811981535941434e+8685889639 + >>> hyp1f2(a, b, c, -10**20) + -12.60262607892655945795907 + >>> hyp1f2(a, b, c, 10**20*j) + (4.237220401382240876065501e+6141851464 - 2.950930337531768015892987e+6141851464j) + >>> hyp1f2(2+3j, -2j, 0.5j, 10-20j) + (135881.9905586966432662004 - 86681.95885418079535738828j) + +""" + +hyp2f2 = r""" +Gives the hypergeometric function `\,_2F_2(a_1,a_2;b_1,b_2; z)`. +The call ``hyp2f2(a1,a2,b1,b2,z)`` is equivalent to +``hyper([a1,a2],[b1,b2],z)``. + +Evaluation works for complex and arbitrarily large arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a, b, c, d = 1.5, (-1,3), 2.25, 4 + >>> hyp2f2(a, b, c, d, 10**20) + -5.275758229007902299823821e+43429448190325182663 + >>> hyp2f2(a, b, c, d, -10**20) + 2561445.079983207701073448 + >>> hyp2f2(a, b, c, d, 10**20*j) + (2218276.509664121194836667 - 1280722.539991603850462856j) + >>> hyp2f2(2+3j, -2j, 0.5j, 4j, 10-20j) + (80500.68321405666957342788 - 20346.82752982813540993502j) + +""" + +hyp2f3 = r""" +Gives the hypergeometric function `\,_2F_3(a_1,a_2;b_1,b_2,b_3; z)`. +The call ``hyp2f3(a1,a2,b1,b2,b3,z)`` is equivalent to +``hyper([a1,a2],[b1,b2,b3],z)``. + +Evaluation works for arbitrarily large arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a1,a2,b1,b2,b3 = 1.5, (-1,3), 2.25, 4, (1,5) + >>> hyp2f3(a1,a2,b1,b2,b3,10**20) + -4.169178177065714963568963e+8685889590 + >>> hyp2f3(a1,a2,b1,b2,b3,-10**20) + 7064472.587757755088178629 + >>> hyp2f3(a1,a2,b1,b2,b3,10**20*j) + (-5.163368465314934589818543e+6141851415 + 1.783578125755972803440364e+6141851416j) + >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10-20j) + (-2280.938956687033150740228 + 13620.97336609573659199632j) + >>> hyp2f3(2+3j, -2j, 0.5j, 4j, -1-j, 10000000-20000000j) + (4.849835186175096516193e+3504 - 3.365981529122220091353633e+3504j) + +""" + +hyp2f1 = r""" +Gives the Gauss hypergeometric function `\,_2F_1` (often simply referred to as +*the* hypergeometric function), defined for `|z| < 1` as + +.. math :: + + \,_2F_1(a,b,c,z) = \sum_{k=0}^{\infty} + \frac{(a)_k (b)_k}{(c)_k} \frac{z^k}{k!}. + +and for `|z| \ge 1` by analytic continuation, with a branch cut on `(1, \infty)` +when necessary. + +Special cases of this function include many of the orthogonal polynomials as +well as the incomplete beta function and other functions. Properties of the +Gauss hypergeometric function are documented comprehensively in many references, +for example Abramowitz & Stegun, section 15. + +The implementation supports the analytic continuation as well as evaluation +close to the unit circle where `|z| \approx 1`. The syntax ``hyp2f1(a,b,c,z)`` +is equivalent to ``hyper([a,b],[c],z)``. + +**Examples** + +Evaluation with `z` inside, outside and on the unit circle, for +fixed parameters:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp2f1(2, (1,2), 4, 0.75) + 1.303703703703703703703704 + >>> hyp2f1(2, (1,2), 4, -1.75) + 0.7431290566046919177853916 + >>> hyp2f1(2, (1,2), 4, 1.75) + (1.418075801749271137026239 - 1.114976146679907015775102j) + >>> hyp2f1(2, (1,2), 4, 1) + 1.6 + >>> hyp2f1(2, (1,2), 4, -1) + 0.8235498012182875315037882 + >>> hyp2f1(2, (1,2), 4, j) + (0.9144026291433065674259078 + 0.2050415770437884900574923j) + >>> hyp2f1(2, (1,2), 4, 2+j) + (0.9274013540258103029011549 + 0.7455257875808100868984496j) + >>> hyp2f1(2, (1,2), 4, 0.25j) + (0.9931169055799728251931672 + 0.06154836525312066938147793j) + +Evaluation with complex parameter values:: + + >>> hyp2f1(1+j, 0.75, 10j, 1+5j) + (0.8834833319713479923389638 + 0.7053886880648105068343509j) + +Evaluation with `z = 1`:: + + >>> hyp2f1(-2.5, 3.5, 1.5, 1) + 0.0 + >>> hyp2f1(-2.5, 3, 4, 1) + 0.06926406926406926406926407 + >>> hyp2f1(2, 3, 4, 1) + +inf + +Evaluation for huge arguments:: + + >>> hyp2f1((-1,3), 1.75, 4, '1e100') + (7.883714220959876246415651e+32 + 1.365499358305579597618785e+33j) + >>> hyp2f1((-1,3), 1.75, 4, '1e1000000') + (7.883714220959876246415651e+333332 + 1.365499358305579597618785e+333333j) + >>> hyp2f1((-1,3), 1.75, 4, '1e1000000j') + (1.365499358305579597618785e+333333 - 7.883714220959876246415651e+333332j) + +An integral representation:: + + >>> a,b,c,z = -0.5, 1, 2.5, 0.25 + >>> g = lambda t: t**(b-1) * (1-t)**(c-b-1) * (1-t*z)**(-a) + >>> gammaprod([c],[b,c-b]) * quad(g, [0,1]) + 0.9480458814362824478852618 + >>> hyp2f1(a,b,c,z) + 0.9480458814362824478852618 + +Verifying the hypergeometric differential equation:: + + >>> f = lambda z: hyp2f1(a,b,c,z) + >>> chop(z*(1-z)*diff(f,z,2) + (c-(a+b+1)*z)*diff(f,z) - a*b*f(z)) + 0.0 + +""" + +hyp3f2 = r""" +Gives the generalized hypergeometric function `\,_3F_2`, defined for `|z| < 1` +as + +.. math :: + + \,_3F_2(a_1,a_2,a_3,b_1,b_2,z) = \sum_{k=0}^{\infty} + \frac{(a_1)_k (a_2)_k (a_3)_k}{(b_1)_k (b_2)_k} \frac{z^k}{k!}. + +and for `|z| \ge 1` by analytic continuation. The analytic structure of this +function is similar to that of `\,_2F_1`, generally with a singularity at +`z = 1` and a branch cut on `(1, \infty)`. + +Evaluation is supported inside, on, and outside +the circle of convergence `|z| = 1`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp3f2(1,2,3,4,5,0.25) + 1.083533123380934241548707 + >>> hyp3f2(1,2+2j,3,4,5,-10+10j) + (0.1574651066006004632914361 - 0.03194209021885226400892963j) + >>> hyp3f2(1,2,3,4,5,-10) + 0.3071141169208772603266489 + >>> hyp3f2(1,2,3,4,5,10) + (-0.4857045320523947050581423 - 0.5988311440454888436888028j) + >>> hyp3f2(0.25,1,1,2,1.5,1) + 1.157370995096772047567631 + >>> (8-pi-2*ln2)/3 + 1.157370995096772047567631 + >>> hyp3f2(1+j,0.5j,2,1,-2j,-1) + (1.74518490615029486475959 + 0.1454701525056682297614029j) + >>> hyp3f2(1+j,0.5j,2,1,-2j,sqrt(j)) + (0.9829816481834277511138055 - 0.4059040020276937085081127j) + >>> hyp3f2(-3,2,1,-5,4,1) + 1.41 + >>> hyp3f2(-3,2,1,-5,4,2) + 2.12 + +Evaluation very close to the unit circle:: + + >>> hyp3f2(1,2,3,4,5,'1.0001') + (1.564877796743282766872279 - 3.76821518787438186031973e-11j) + >>> hyp3f2(1,2,3,4,5,'1+0.0001j') + (1.564747153061671573212831 + 0.0001305757570366084557648482j) + >>> hyp3f2(1,2,3,4,5,'0.9999') + 1.564616644881686134983664 + >>> hyp3f2(1,2,3,4,5,'-0.9999') + 0.7823896253461678060196207 + +.. note :: + + Evaluation for `|z-1|` small can currently be inaccurate or slow + for some parameter combinations. + +For various parameter combinations, `\,_3F_2` admits representation in terms +of hypergeometric functions of lower degree, or in terms of +simpler functions:: + + >>> for a, b, z in [(1,2,-1), (2,0.5,1)]: + ... hyp2f1(a,b,a+b+0.5,z)**2 + ... hyp3f2(2*a,a+b,2*b,a+b+0.5,2*a+2*b,z) + ... + 0.4246104461966439006086308 + 0.4246104461966439006086308 + 7.111111111111111111111111 + 7.111111111111111111111111 + + >>> z = 2+3j + >>> hyp3f2(0.5,1,1.5,2,2,z) + (0.7621440939243342419729144 + 0.4249117735058037649915723j) + >>> 4*(pi-2*ellipe(z))/(pi*z) + (0.7621440939243342419729144 + 0.4249117735058037649915723j) + +""" + +hyperu = r""" +Gives the Tricomi confluent hypergeometric function `U`, also known as +the Kummer or confluent hypergeometric function of the second kind. This +function gives a second linearly independent solution to the confluent +hypergeometric differential equation (the first is provided by `\,_1F_1` -- +see :func:`~mpmath.hyp1f1`). + +**Examples** + +Evaluation for arbitrary complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyperu(2,3,4) + 0.0625 + >>> hyperu(0.25, 5, 1000) + 0.1779949416140579573763523 + >>> hyperu(0.25, 5, -1000) + (0.1256256609322773150118907 - 0.1256256609322773150118907j) + +The `U` function may be singular at `z = 0`:: + + >>> hyperu(1.5, 2, 0) + +inf + >>> hyperu(1.5, -2, 0) + 0.1719434921288400112603671 + +Verifying the differential equation:: + + >>> a, b = 1.5, 2 + >>> f = lambda z: hyperu(a,b,z) + >>> for z in [-10, 3, 3+4j]: + ... chop(z*diff(f,z,2) + (b-z)*diff(f,z) - a*f(z)) + ... + 0.0 + 0.0 + 0.0 + +An integral representation:: + + >>> a,b,z = 2, 3.5, 4.25 + >>> hyperu(a,b,z) + 0.06674960718150520648014567 + >>> quad(lambda t: exp(-z*t)*t**(a-1)*(1+t)**(b-a-1),[0,inf]) / gamma(a) + 0.06674960718150520648014567 + + +[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm +""" + +hyp2f0 = r""" +Gives the hypergeometric function `\,_2F_0`, defined formally by the +series + +.. math :: + + \,_2F_0(a,b;;z) = \sum_{n=0}^{\infty} (a)_n (b)_n \frac{z^n}{n!}. + +This series usually does not converge. For small enough `z`, it can be viewed +as an asymptotic series that may be summed directly with an appropriate +truncation. When this is not the case, :func:`~mpmath.hyp2f0` gives a regularized sum, +or equivalently, it uses a representation in terms of the +hypergeometric U function [1]. The series also converges when either `a` or `b` +is a nonpositive integer, as it then terminates into a polynomial +after `-a` or `-b` terms. + +**Examples** + +Evaluation is supported for arbitrary complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hyp2f0((2,3), 1.25, -100) + 0.07095851870980052763312791 + >>> hyp2f0((2,3), 1.25, 100) + (-0.03254379032170590665041131 + 0.07269254613282301012735797j) + >>> hyp2f0(-0.75, 1-j, 4j) + (-0.3579987031082732264862155 - 3.052951783922142735255881j) + +Even with real arguments, the regularized value of 2F0 is often complex-valued, +but the imaginary part decreases exponentially as `z \to 0`. In the following +example, the first call uses complex evaluation while the second has a small +enough `z` to evaluate using the direct series and thus the returned value +is strictly real (this should be taken to indicate that the imaginary +part is less than ``eps``):: + + >>> mp.dps = 15 + >>> hyp2f0(1.5, 0.5, 0.05) + (1.04166637647907 + 8.34584913683906e-8j) + >>> hyp2f0(1.5, 0.5, 0.0005) + 1.00037535207621 + +The imaginary part can be retrieved by increasing the working precision:: + + >>> mp.dps = 80 + >>> nprint(hyp2f0(1.5, 0.5, 0.009).imag) + 1.23828e-46 + +In the polynomial case (the series terminating), 2F0 can evaluate exactly:: + + >>> mp.dps = 15 + >>> hyp2f0(-6,-6,2) + 291793.0 + >>> identify(hyp2f0(-2,1,0.25)) + '(5/8)' + +The coefficients of the polynomials can be recovered using Taylor expansion:: + + >>> nprint(taylor(lambda x: hyp2f0(-3,0.5,x), 0, 10)) + [1.0, -1.5, 2.25, -1.875, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + >>> nprint(taylor(lambda x: hyp2f0(-4,0.5,x), 0, 10)) + [1.0, -2.0, 4.5, -7.5, 6.5625, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + + +[1] http://people.math.sfu.ca/~cbm/aands/page_504.htm +""" + + +gammainc = r""" +``gammainc(z, a=0, b=inf)`` computes the (generalized) incomplete +gamma function with integration limits `[a, b]`: + +.. math :: + + \Gamma(z,a,b) = \int_a^b t^{z-1} e^{-t} \, dt + +The generalized incomplete gamma function reduces to the +following special cases when one or both endpoints are fixed: + +* `\Gamma(z,0,\infty)` is the standard ("complete") + gamma function, `\Gamma(z)` (available directly + as the mpmath function :func:`~mpmath.gamma`) +* `\Gamma(z,a,\infty)` is the "upper" incomplete gamma + function, `\Gamma(z,a)` +* `\Gamma(z,0,b)` is the "lower" incomplete gamma + function, `\gamma(z,b)`. + +Of course, we have +`\Gamma(z,0,x) + \Gamma(z,x,\infty) = \Gamma(z)` +for all `z` and `x`. + +Note however that some authors reverse the order of the +arguments when defining the lower and upper incomplete +gamma function, so one should be careful to get the correct +definition. + +If also given the keyword argument ``regularized=True``, +:func:`~mpmath.gammainc` computes the "regularized" incomplete gamma +function + +.. math :: + + P(z,a,b) = \frac{\Gamma(z,a,b)}{\Gamma(z)}. + +**Examples** + +We can compare with numerical quadrature to verify that +:func:`~mpmath.gammainc` computes the integral in the definition:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> gammainc(2+3j, 4, 10) + (0.00977212668627705160602312 - 0.0770637306312989892451977j) + >>> quad(lambda t: t**(2+3j-1) * exp(-t), [4, 10]) + (0.00977212668627705160602312 - 0.0770637306312989892451977j) + +Argument symmetries follow directly from the integral definition:: + + >>> gammainc(3, 4, 5) + gammainc(3, 5, 4) + 0.0 + >>> gammainc(3,0,2) + gammainc(3,2,4); gammainc(3,0,4) + 1.523793388892911312363331 + 1.523793388892911312363331 + >>> findroot(lambda z: gammainc(2,z,3), 1) + 3.0 + +Evaluation for arbitrarily large arguments:: + + >>> gammainc(10, 100) + 4.083660630910611272288592e-26 + >>> gammainc(10, 10000000000000000) + 5.290402449901174752972486e-4342944819032375 + >>> gammainc(3+4j, 1000000+1000000j) + (-1.257913707524362408877881e-434284 + 2.556691003883483531962095e-434284j) + +Evaluation of a generalized incomplete gamma function automatically chooses +the representation that gives a more accurate result, depending on which +parameter is larger:: + + >>> gammainc(10000000, 3) - gammainc(10000000, 2) # Bad + 0.0 + >>> gammainc(10000000, 2, 3) # Good + 1.755146243738946045873491e+4771204 + >>> gammainc(2, 0, 100000001) - gammainc(2, 0, 100000000) # Bad + 0.0 + >>> gammainc(2, 100000000, 100000001) # Good + 4.078258353474186729184421e-43429441 + +The incomplete gamma functions satisfy simple recurrence +relations:: + + >>> mp.dps = 25 + >>> z, a = mpf(3.5), mpf(2) + >>> gammainc(z+1, a); z*gammainc(z,a) + a**z*exp(-a) + 10.60130296933533459267329 + 10.60130296933533459267329 + >>> gammainc(z+1,0,a); z*gammainc(z,0,a) - a**z*exp(-a) + 1.030425427232114336470932 + 1.030425427232114336470932 + +Evaluation at integers and poles:: + + >>> gammainc(-3, -4, -5) + (-0.2214577048967798566234192 + 0.0j) + >>> gammainc(-3, 0, 5) + +inf + +If `z` is an integer, the recurrence reduces the incomplete gamma +function to `P(a) \exp(-a) + Q(b) \exp(-b)` where `P` and +`Q` are polynomials:: + + >>> gammainc(1, 2); exp(-2) + 0.1353352832366126918939995 + 0.1353352832366126918939995 + >>> mp.dps = 50 + >>> identify(gammainc(6, 1, 2), ['exp(-1)', 'exp(-2)']) + '(326*exp(-1) + (-872)*exp(-2))' + +The incomplete gamma functions reduce to functions such as +the exponential integral Ei and the error function for special +arguments:: + + >>> mp.dps = 25 + >>> gammainc(0, 4); -ei(-4) + 0.00377935240984890647887486 + 0.00377935240984890647887486 + >>> gammainc(0.5, 0, 2); sqrt(pi)*erf(sqrt(2)) + 1.691806732945198336509541 + 1.691806732945198336509541 + +""" + +erf = r""" +Computes the error function, `\mathrm{erf}(x)`. The error +function is the normalized antiderivative of the Gaussian function +`\exp(-t^2)`. More precisely, + +.. math:: + + \mathrm{erf}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(-t^2) \,dt + +**Basic examples** + +Simple values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> erf(0) + 0.0 + >>> erf(1) + 0.842700792949715 + >>> erf(-1) + -0.842700792949715 + >>> erf(inf) + 1.0 + >>> erf(-inf) + -1.0 + +For large real `x`, `\mathrm{erf}(x)` approaches 1 very +rapidly:: + + >>> erf(3) + 0.999977909503001 + >>> erf(5) + 0.999999999998463 + +The error function is an odd function:: + + >>> nprint(chop(taylor(erf, 0, 5))) + [0.0, 1.12838, 0.0, -0.376126, 0.0, 0.112838] + +:func:`~mpmath.erf` implements arbitrary-precision evaluation and +supports complex numbers:: + + >>> mp.dps = 50 + >>> erf(0.5) + 0.52049987781304653768274665389196452873645157575796 + >>> mp.dps = 25 + >>> erf(1+j) + (1.316151281697947644880271 + 0.1904534692378346862841089j) + +Evaluation is supported for large arguments:: + + >>> mp.dps = 25 + >>> erf('1e1000') + 1.0 + >>> erf('-1e1000') + -1.0 + >>> erf('1e-1000') + 1.128379167095512573896159e-1000 + >>> erf('1e7j') + (0.0 + 8.593897639029319267398803e+43429448190317j) + >>> erf('1e7+1e7j') + (0.9999999858172446172631323 + 3.728805278735270407053139e-8j) + +**Related functions** + +See also :func:`~mpmath.erfc`, which is more accurate for large `x`, +and :func:`~mpmath.erfi` which gives the antiderivative of +`\exp(t^2)`. + +The Fresnel integrals :func:`~mpmath.fresnels` and :func:`~mpmath.fresnelc` +are also related to the error function. +""" + +erfc = r""" +Computes the complementary error function, +`\mathrm{erfc}(x) = 1-\mathrm{erf}(x)`. +This function avoids cancellation that occurs when naively +computing the complementary error function as ``1-erf(x)``:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> 1 - erf(10) + 0.0 + >>> erfc(10) + 2.08848758376254e-45 + +:func:`~mpmath.erfc` works accurately even for ludicrously large +arguments:: + + >>> erfc(10**10) + 4.3504398860243e-43429448190325182776 + +Complex arguments are supported:: + + >>> erfc(500+50j) + (1.19739830969552e-107492 + 1.46072418957528e-107491j) + +""" + + +erfi = r""" +Computes the imaginary error function, `\mathrm{erfi}(x)`. +The imaginary error function is defined in analogy with the +error function, but with a positive sign in the integrand: + +.. math :: + + \mathrm{erfi}(x) = \frac{2}{\sqrt \pi} \int_0^x \exp(t^2) \,dt + +Whereas the error function rapidly converges to 1 as `x` grows, +the imaginary error function rapidly diverges to infinity. +The functions are related as +`\mathrm{erfi}(x) = -i\,\mathrm{erf}(ix)` for all complex +numbers `x`. + +**Examples** + +Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> erfi(0) + 0.0 + >>> erfi(1) + 1.65042575879754 + >>> erfi(-1) + -1.65042575879754 + >>> erfi(inf) + +inf + >>> erfi(-inf) + -inf + +Note the symmetry between erf and erfi:: + + >>> erfi(3j) + (0.0 + 0.999977909503001j) + >>> erf(3) + 0.999977909503001 + >>> erf(1+2j) + (-0.536643565778565 - 5.04914370344703j) + >>> erfi(2+1j) + (-5.04914370344703 - 0.536643565778565j) + +Large arguments are supported:: + + >>> erfi(1000) + 1.71130938718796e+434291 + >>> erfi(10**10) + 7.3167287567024e+43429448190325182754 + >>> erfi(-10**10) + -7.3167287567024e+43429448190325182754 + >>> erfi(1000-500j) + (2.49895233563961e+325717 + 2.6846779342253e+325717j) + >>> erfi(100000j) + (0.0 + 1.0j) + >>> erfi(-100000j) + (0.0 - 1.0j) + + +""" + +erfinv = r""" +Computes the inverse error function, satisfying + +.. math :: + + \mathrm{erf}(\mathrm{erfinv}(x)) = + \mathrm{erfinv}(\mathrm{erf}(x)) = x. + +This function is defined only for `-1 \le x \le 1`. + +**Examples** + +Special values include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> erfinv(0) + 0.0 + >>> erfinv(1) + +inf + >>> erfinv(-1) + -inf + +The domain is limited to the standard interval:: + + >>> erfinv(2) + Traceback (most recent call last): + ... + ValueError: erfinv(x) is defined only for -1 <= x <= 1 + +It is simple to check that :func:`~mpmath.erfinv` computes inverse values of +:func:`~mpmath.erf` as promised:: + + >>> erf(erfinv(0.75)) + 0.75 + >>> erf(erfinv(-0.995)) + -0.995 + +:func:`~mpmath.erfinv` supports arbitrary-precision evaluation:: + + >>> mp.dps = 50 + >>> x = erf(2) + >>> x + 0.99532226501895273416206925636725292861089179704006 + >>> erfinv(x) + 2.0 + +A definite integral involving the inverse error function:: + + >>> mp.dps = 15 + >>> quad(erfinv, [0, 1]) + 0.564189583547756 + >>> 1/sqrt(pi) + 0.564189583547756 + +The inverse error function can be used to generate random numbers +with a Gaussian distribution (although this is a relatively +inefficient algorithm):: + + >>> nprint([erfinv(2*rand()-1) for n in range(6)]) # doctest: +SKIP + [-0.586747, 1.10233, -0.376796, 0.926037, -0.708142, -0.732012] + +""" + +npdf = r""" +``npdf(x, mu=0, sigma=1)`` evaluates the probability density +function of a normal distribution with mean value `\mu` +and variance `\sigma^2`. + +Elementary properties of the probability distribution can +be verified using numerical integration:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> quad(npdf, [-inf, inf]) + 1.0 + >>> quad(lambda x: npdf(x, 3), [3, inf]) + 0.5 + >>> quad(lambda x: npdf(x, 3, 2), [3, inf]) + 0.5 + +See also :func:`~mpmath.ncdf`, which gives the cumulative +distribution. +""" + +ncdf = r""" +``ncdf(x, mu=0, sigma=1)`` evaluates the cumulative distribution +function of a normal distribution with mean value `\mu` +and variance `\sigma^2`. + +See also :func:`~mpmath.npdf`, which gives the probability density. + +Elementary properties include:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> ncdf(pi, mu=pi) + 0.5 + >>> ncdf(-inf) + 0.0 + >>> ncdf(+inf) + 1.0 + +The cumulative distribution is the integral of the density +function having identical mu and sigma:: + + >>> mp.dps = 15 + >>> diff(ncdf, 2) + 0.053990966513188 + >>> npdf(2) + 0.053990966513188 + >>> diff(lambda x: ncdf(x, 1, 0.5), 0) + 0.107981933026376 + >>> npdf(0, 1, 0.5) + 0.107981933026376 +""" + +expint = r""" +:func:`~mpmath.expint(n,z)` gives the generalized exponential integral +or En-function, + +.. math :: + + \mathrm{E}_n(z) = \int_1^{\infty} \frac{e^{-zt}}{t^n} dt, + +where `n` and `z` may both be complex numbers. The case with `n = 1` is +also given by :func:`~mpmath.e1`. + +**Examples** + +Evaluation at real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> expint(1, 6.25) + 0.0002704758872637179088496194 + >>> expint(-3, 2+3j) + (0.00299658467335472929656159 + 0.06100816202125885450319632j) + >>> expint(2+3j, 4-5j) + (0.001803529474663565056945248 - 0.002235061547756185403349091j) + +At negative integer values of `n`, `E_n(z)` reduces to a +rational-exponential function:: + + >>> f = lambda n, z: fac(n)*sum(z**k/fac(k-1) for k in range(1,n+2))/\ + ... exp(z)/z**(n+2) + >>> n = 3 + >>> z = 1/pi + >>> expint(-n,z) + 584.2604820613019908668219 + >>> f(n,z) + 584.2604820613019908668219 + >>> n = 5 + >>> expint(-n,z) + 115366.5762594725451811138 + >>> f(n,z) + 115366.5762594725451811138 +""" + +e1 = r""" +Computes the exponential integral `\mathrm{E}_1(z)`, given by + +.. math :: + + \mathrm{E}_1(z) = \int_z^{\infty} \frac{e^{-t}}{t} dt. + +This is equivalent to :func:`~mpmath.expint` with `n = 1`. + +**Examples** + +Two ways to evaluate this function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> e1(6.25) + 0.0002704758872637179088496194 + >>> expint(1,6.25) + 0.0002704758872637179088496194 + +The E1-function is essentially the same as the Ei-function (:func:`~mpmath.ei`) +with negated argument, except for an imaginary branch cut term:: + + >>> e1(2.5) + 0.02491491787026973549562801 + >>> -ei(-2.5) + 0.02491491787026973549562801 + >>> e1(-2.5) + (-7.073765894578600711923552 - 3.141592653589793238462643j) + >>> -ei(2.5) + -7.073765894578600711923552 + +""" + +ei = r""" +Computes the exponential integral or Ei-function, `\mathrm{Ei}(x)`. +The exponential integral is defined as + +.. math :: + + \mathrm{Ei}(x) = \int_{-\infty\,}^x \frac{e^t}{t} \, dt. + +When the integration range includes `t = 0`, the exponential +integral is interpreted as providing the Cauchy principal value. + +For real `x`, the Ei-function behaves roughly like +`\mathrm{Ei}(x) \approx \exp(x) + \log(|x|)`. + +The Ei-function is related to the more general family of exponential +integral functions denoted by `E_n`, which are available as :func:`~mpmath.expint`. + +**Basic examples** + +Some basic values and limits are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> ei(0) + -inf + >>> ei(1) + 1.89511781635594 + >>> ei(inf) + +inf + >>> ei(-inf) + 0.0 + +For `x < 0`, the defining integral can be evaluated +numerically as a reference:: + + >>> ei(-4) + -0.00377935240984891 + >>> quad(lambda t: exp(t)/t, [-inf, -4]) + -0.00377935240984891 + +:func:`~mpmath.ei` supports complex arguments and arbitrary +precision evaluation:: + + >>> mp.dps = 50 + >>> ei(pi) + 10.928374389331410348638445906907535171566338835056 + >>> mp.dps = 25 + >>> ei(3+4j) + (-4.154091651642689822535359 + 4.294418620024357476985535j) + +**Related functions** + +The exponential integral is closely related to the logarithmic +integral. See :func:`~mpmath.li` for additional information. + +The exponential integral is related to the hyperbolic +and trigonometric integrals (see :func:`~mpmath.chi`, :func:`~mpmath.shi`, +:func:`~mpmath.ci`, :func:`~mpmath.si`) similarly to how the ordinary +exponential function is related to the hyperbolic and +trigonometric functions:: + + >>> mp.dps = 15 + >>> ei(3) + 9.93383257062542 + >>> chi(3) + shi(3) + 9.93383257062542 + >>> chop(ci(3j) - j*si(3j) - pi*j/2) + 9.93383257062542 + +Beware that logarithmic corrections, as in the last example +above, are required to obtain the correct branch in general. +For details, see [1]. + +The exponential integral is also a special case of the +hypergeometric function `\,_2F_2`:: + + >>> z = 0.6 + >>> z*hyper([1,1],[2,2],z) + (ln(z)-ln(1/z))/2 + euler + 0.769881289937359 + >>> ei(z) + 0.769881289937359 + +**References** + +1. Relations between Ei and other functions: + http://functions.wolfram.com/GammaBetaErf/ExpIntegralEi/27/01/ + +2. Abramowitz & Stegun, section 5: + http://people.math.sfu.ca/~cbm/aands/page_228.htm + +3. Asymptotic expansion for Ei: + http://mathworld.wolfram.com/En-Function.html +""" + +li = r""" +Computes the logarithmic integral or li-function +`\mathrm{li}(x)`, defined by + +.. math :: + + \mathrm{li}(x) = \int_0^x \frac{1}{\log t} \, dt + +The logarithmic integral has a singularity at `x = 1`. + +Alternatively, ``li(x, offset=True)`` computes the offset +logarithmic integral (used in number theory) + +.. math :: + + \mathrm{Li}(x) = \int_2^x \frac{1}{\log t} \, dt. + +These two functions are related via the simple identity +`\mathrm{Li}(x) = \mathrm{li}(x) - \mathrm{li}(2)`. + +The logarithmic integral should also not be confused with +the polylogarithm (also denoted by Li), which is implemented +as :func:`~mpmath.polylog`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> li(0) + 0.0 + >>> li(1) + -inf + >>> li(1) + -inf + >>> li(2) + 1.04516378011749278484458888919 + >>> findroot(li, 2) + 1.45136923488338105028396848589 + >>> li(inf) + +inf + >>> li(2, offset=True) + 0.0 + >>> li(1, offset=True) + -inf + >>> li(0, offset=True) + -1.04516378011749278484458888919 + >>> li(10, offset=True) + 5.12043572466980515267839286347 + +The logarithmic integral can be evaluated for arbitrary +complex arguments:: + + >>> mp.dps = 20 + >>> li(3+4j) + (3.1343755504645775265 + 2.6769247817778742392j) + +The logarithmic integral is related to the exponential integral:: + + >>> ei(log(3)) + 2.1635885946671919729 + >>> li(3) + 2.1635885946671919729 + +The logarithmic integral grows like `O(x/\log(x))`:: + + >>> mp.dps = 15 + >>> x = 10**100 + >>> x/log(x) + 4.34294481903252e+97 + >>> li(x) + 4.3619719871407e+97 + +The prime number theorem states that the number of primes less +than `x` is asymptotic to `\mathrm{Li}(x)` (equivalently +`\mathrm{li}(x)`). For example, it is known that there are +exactly 1,925,320,391,606,803,968,923 prime numbers less than +`10^{23}` [1]. The logarithmic integral provides a very +accurate estimate:: + + >>> li(10**23, offset=True) + 1.92532039161405e+21 + +A definite integral is:: + + >>> quad(li, [0, 1]) + -0.693147180559945 + >>> -ln(2) + -0.693147180559945 + +**References** + +1. http://mathworld.wolfram.com/PrimeCountingFunction.html + +2. http://mathworld.wolfram.com/LogarithmicIntegral.html + +""" + +ci = r""" +Computes the cosine integral, + +.. math :: + + \mathrm{Ci}(x) = -\int_x^{\infty} \frac{\cos t}{t}\,dt + = \gamma + \log x + \int_0^x \frac{\cos t - 1}{t}\,dt + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ci(0) + -inf + >>> ci(1) + 0.3374039229009681346626462 + >>> ci(pi) + 0.07366791204642548599010096 + >>> ci(inf) + 0.0 + >>> ci(-inf) + (0.0 + 3.141592653589793238462643j) + >>> ci(2+3j) + (1.408292501520849518759125 - 2.983617742029605093121118j) + +The cosine integral behaves roughly like the sinc function +(see :func:`~mpmath.sinc`) for large real `x`:: + + >>> ci(10**10) + -4.875060251748226537857298e-11 + >>> sinc(10**10) + -4.875060250875106915277943e-11 + >>> chop(limit(ci, inf)) + 0.0 + +It has infinitely many roots on the positive real axis:: + + >>> findroot(ci, 1) + 0.6165054856207162337971104 + >>> findroot(ci, 2) + 3.384180422551186426397851 + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> ci(10**6*(1+j)) + (4.449410587611035724984376e+434287 + 9.75744874290013526417059e+434287j) + +We can evaluate the defining integral as a reference:: + + >>> mp.dps = 15 + >>> -quadosc(lambda t: cos(t)/t, [5, inf], omega=1) + -0.190029749656644 + >>> ci(5) + -0.190029749656644 + +Some infinite series can be evaluated using the +cosine integral:: + + >>> nsum(lambda k: (-1)**k/(fac(2*k)*(2*k)), [1,inf]) + -0.239811742000565 + >>> ci(1) - euler + -0.239811742000565 + +""" + +si = r""" +Computes the sine integral, + +.. math :: + + \mathrm{Si}(x) = \int_0^x \frac{\sin t}{t}\,dt. + +The sine integral is thus the antiderivative of the sinc +function (see :func:`~mpmath.sinc`). + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> si(0) + 0.0 + >>> si(1) + 0.9460830703671830149413533 + >>> si(-1) + -0.9460830703671830149413533 + >>> si(pi) + 1.851937051982466170361053 + >>> si(inf) + 1.570796326794896619231322 + >>> si(-inf) + -1.570796326794896619231322 + >>> si(2+3j) + (4.547513889562289219853204 + 1.399196580646054789459839j) + +The sine integral approaches `\pi/2` for large real `x`:: + + >>> si(10**10) + 1.570796326707584656968511 + >>> pi/2 + 1.570796326794896619231322 + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> si(10**6*(1+j)) + (-9.75744874290013526417059e+434287 + 4.449410587611035724984376e+434287j) + +We can evaluate the defining integral as a reference:: + + >>> mp.dps = 15 + >>> quad(sinc, [0, 5]) + 1.54993124494467 + >>> si(5) + 1.54993124494467 + +Some infinite series can be evaluated using the +sine integral:: + + >>> nsum(lambda k: (-1)**k/(fac(2*k+1)*(2*k+1)), [0,inf]) + 0.946083070367183 + >>> si(1) + 0.946083070367183 + +""" + +chi = r""" +Computes the hyperbolic cosine integral, defined +in analogy with the cosine integral (see :func:`~mpmath.ci`) as + +.. math :: + + \mathrm{Chi}(x) = -\int_x^{\infty} \frac{\cosh t}{t}\,dt + = \gamma + \log x + \int_0^x \frac{\cosh t - 1}{t}\,dt + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> chi(0) + -inf + >>> chi(1) + 0.8378669409802082408946786 + >>> chi(inf) + +inf + >>> findroot(chi, 0.5) + 0.5238225713898644064509583 + >>> chi(2+3j) + (-0.1683628683277204662429321 + 2.625115880451325002151688j) + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> chi(10**6*(1+j)) + (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j) + +""" + +shi = r""" +Computes the hyperbolic sine integral, defined +in analogy with the sine integral (see :func:`~mpmath.si`) as + +.. math :: + + \mathrm{Shi}(x) = \int_0^x \frac{\sinh t}{t}\,dt. + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> shi(0) + 0.0 + >>> shi(1) + 1.057250875375728514571842 + >>> shi(-1) + -1.057250875375728514571842 + >>> shi(inf) + +inf + >>> shi(2+3j) + (-0.1931890762719198291678095 + 2.645432555362369624818525j) + +Evaluation is supported for `z` anywhere in the complex plane:: + + >>> shi(10**6*(1+j)) + (4.449410587611035724984376e+434287 - 9.75744874290013526417059e+434287j) + +""" + +fresnels = r""" +Computes the Fresnel sine integral + +.. math :: + + S(x) = \int_0^x \sin\left(\frac{\pi t^2}{2}\right) \,dt + +Note that some sources define this function +without the normalization factor `\pi/2`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> fresnels(0) + 0.0 + >>> fresnels(inf) + 0.5 + >>> fresnels(-inf) + -0.5 + >>> fresnels(1) + 0.4382591473903547660767567 + >>> fresnels(1+2j) + (36.72546488399143842838788 + 15.58775110440458732748279j) + +Comparing with the definition:: + + >>> fresnels(3) + 0.4963129989673750360976123 + >>> quad(lambda t: sin(pi*t**2/2), [0,3]) + 0.4963129989673750360976123 +""" + +fresnelc = r""" +Computes the Fresnel cosine integral + +.. math :: + + C(x) = \int_0^x \cos\left(\frac{\pi t^2}{2}\right) \,dt + +Note that some sources define this function +without the normalization factor `\pi/2`. + +**Examples** + +Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> fresnelc(0) + 0.0 + >>> fresnelc(inf) + 0.5 + >>> fresnelc(-inf) + -0.5 + >>> fresnelc(1) + 0.7798934003768228294742064 + >>> fresnelc(1+2j) + (16.08787137412548041729489 - 36.22568799288165021578758j) + +Comparing with the definition:: + + >>> fresnelc(3) + 0.6057207892976856295561611 + >>> quad(lambda t: cos(pi*t**2/2), [0,3]) + 0.6057207892976856295561611 +""" + +airyai = r""" +Computes the Airy function `\operatorname{Ai}(z)`, which is +the solution of the Airy differential equation `f''(z) - z f(z) = 0` +with initial conditions + +.. math :: + + \operatorname{Ai}(0) = + \frac{1}{3^{2/3}\Gamma\left(\frac{2}{3}\right)} + + \operatorname{Ai}'(0) = + -\frac{1}{3^{1/3}\Gamma\left(\frac{1}{3}\right)}. + +Other common ways of defining the Ai-function include +integrals such as + +.. math :: + + \operatorname{Ai}(x) = \frac{1}{\pi} + \int_0^{\infty} \cos\left(\frac{1}{3}t^3+xt\right) dt + \qquad x \in \mathbb{R} + + \operatorname{Ai}(z) = \frac{\sqrt{3}}{2\pi} + \int_0^{\infty} + \exp\left(-\frac{t^3}{3}-\frac{z^3}{3t^3}\right) dt. + +The Ai-function is an entire function with a turning point, +behaving roughly like a slowly decaying sine wave for `z < 0` and +like a rapidly decreasing exponential for `z > 0`. +A second solution of the Airy differential equation +is given by `\operatorname{Bi}(z)` (see :func:`~mpmath.airybi`). + +Optionally, with *derivative=alpha*, :func:`airyai` can compute the +`\alpha`-th order fractional derivative with respect to `z`. +For `\alpha = n = 1,2,3,\ldots` this gives the derivative +`\operatorname{Ai}^{(n)}(z)`, and for `\alpha = -n = -1,-2,-3,\ldots` +this gives the `n`-fold iterated integral + +.. math :: + + f_0(z) = \operatorname{Ai}(z) + + f_n(z) = \int_0^z f_{n-1}(t) dt. + +The Ai-function has infinitely many zeros, all located along the +negative half of the real axis. They can be computed with +:func:`~mpmath.airyaizero`. + +**Plots** + +.. literalinclude :: /plots/ai.py +.. image :: /plots/ai.png +.. literalinclude :: /plots/ai_c.py +.. image :: /plots/ai_c.png + +**Basic examples** + +Limits and values include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airyai(0); 1/(power(3,'2/3')*gamma('2/3')) + 0.3550280538878172392600632 + 0.3550280538878172392600632 + >>> airyai(1) + 0.1352924163128814155241474 + >>> airyai(-1) + 0.5355608832923521187995166 + >>> airyai(inf); airyai(-inf) + 0.0 + 0.0 + +Evaluation is supported for large magnitudes of the argument:: + + >>> airyai(-100) + 0.1767533932395528780908311 + >>> airyai(100) + 2.634482152088184489550553e-291 + >>> airyai(50+50j) + (-5.31790195707456404099817e-68 - 1.163588003770709748720107e-67j) + >>> airyai(-50+50j) + (1.041242537363167632587245e+158 + 3.347525544923600321838281e+157j) + +Huge arguments are also fine:: + + >>> airyai(10**10) + 1.162235978298741779953693e-289529654602171 + >>> airyai(-10**10) + 0.0001736206448152818510510181 + >>> w = airyai(10**10*(1+j)) + >>> w.real + 5.711508683721355528322567e-186339621747698 + >>> w.imag + 1.867245506962312577848166e-186339621747697 + +The first root of the Ai-function is:: + + >>> findroot(airyai, -2) + -2.338107410459767038489197 + >>> airyaizero(1) + -2.338107410459767038489197 + +**Properties and relations** + +Verifying the Airy differential equation:: + + >>> for z in [-3.4, 0, 2.5, 1+2j]: + ... chop(airyai(z,2) - z*airyai(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +The first few terms of the Taylor series expansion around `z = 0` +(every third term is zero):: + + >>> nprint(taylor(airyai, 0, 5)) + [0.355028, -0.258819, 0.0, 0.0591713, -0.0215683, 0.0] + +The Airy functions satisfy the Wronskian relation +`\operatorname{Ai}(z) \operatorname{Bi}'(z) - +\operatorname{Ai}'(z) \operatorname{Bi}(z) = 1/\pi`:: + + >>> z = -0.5 + >>> airyai(z)*airybi(z,1) - airyai(z,1)*airybi(z) + 0.3183098861837906715377675 + >>> 1/pi + 0.3183098861837906715377675 + +The Airy functions can be expressed in terms of Bessel +functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have:: + + >>> z = -3 + >>> airyai(z) + -0.3788142936776580743472439 + >>> y = 2*power(-z,'3/2')/3 + >>> (sqrt(-z) * (besselj('1/3',y) + besselj('-1/3',y)))/3 + -0.3788142936776580743472439 + +**Derivatives and integrals** + +Derivatives of the Ai-function (directly and using :func:`~mpmath.diff`):: + + >>> airyai(-3,1); diff(airyai,-3) + 0.3145837692165988136507873 + 0.3145837692165988136507873 + >>> airyai(-3,2); diff(airyai,-3,2) + 1.136442881032974223041732 + 1.136442881032974223041732 + >>> airyai(1000,1); diff(airyai,1000) + -2.943133917910336090459748e-9156 + -2.943133917910336090459748e-9156 + +Several derivatives at `z = 0`:: + + >>> airyai(0,0); airyai(0,1); airyai(0,2) + 0.3550280538878172392600632 + -0.2588194037928067984051836 + 0.0 + >>> airyai(0,3); airyai(0,4); airyai(0,5) + 0.3550280538878172392600632 + -0.5176388075856135968103671 + 0.0 + >>> airyai(0,15); airyai(0,16); airyai(0,17) + 1292.30211615165475090663 + -3188.655054727379756351861 + 0.0 + +The integral of the Ai-function:: + + >>> airyai(3,-1); quad(airyai, [0,3]) + 0.3299203760070217725002701 + 0.3299203760070217725002701 + >>> airyai(-10,-1); quad(airyai, [0,-10]) + -0.765698403134212917425148 + -0.765698403134212917425148 + +Integrals of high or fractional order:: + + >>> airyai(-2,0.5); differint(airyai,-2,0.5,0) + (0.0 + 0.2453596101351438273844725j) + (0.0 + 0.2453596101351438273844725j) + >>> airyai(-2,-4); differint(airyai,-2,-4,0) + 0.2939176441636809580339365 + 0.2939176441636809580339365 + >>> airyai(0,-1); airyai(0,-2); airyai(0,-3) + 0.0 + 0.0 + 0.0 + +Integrals of the Ai-function can be evaluated at limit points:: + + >>> airyai(-1000000,-1); airyai(-inf,-1) + -0.6666843728311539978751512 + -0.6666666666666666666666667 + >>> airyai(10,-1); airyai(+inf,-1) + 0.3333333332991690159427932 + 0.3333333333333333333333333 + >>> airyai(+inf,-2); airyai(+inf,-3) + +inf + +inf + >>> airyai(-1000000,-2); airyai(-inf,-2) + 666666.4078472650651209742 + +inf + >>> airyai(-1000000,-3); airyai(-inf,-3) + -333333074513.7520264995733 + -inf + +**References** + +1. [DLMF]_ Chapter 9: Airy and Related Functions +2. [WolframFunctions]_ section: Bessel-Type Functions + +""" + +airybi = r""" +Computes the Airy function `\operatorname{Bi}(z)`, which is +the solution of the Airy differential equation `f''(z) - z f(z) = 0` +with initial conditions + +.. math :: + + \operatorname{Bi}(0) = + \frac{1}{3^{1/6}\Gamma\left(\frac{2}{3}\right)} + + \operatorname{Bi}'(0) = + \frac{3^{1/6}}{\Gamma\left(\frac{1}{3}\right)}. + +Like the Ai-function (see :func:`~mpmath.airyai`), the Bi-function +is oscillatory for `z < 0`, but it grows rather than decreases +for `z > 0`. + +Optionally, as for :func:`~mpmath.airyai`, derivatives, integrals +and fractional derivatives can be computed with the *derivative* +parameter. + +The Bi-function has infinitely many zeros along the negative +half-axis, as well as complex zeros, which can all be computed +with :func:`~mpmath.airybizero`. + +**Plots** + +.. literalinclude :: /plots/bi.py +.. image :: /plots/bi.png +.. literalinclude :: /plots/bi_c.py +.. image :: /plots/bi_c.png + +**Basic examples** + +Limits and values include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airybi(0); 1/(power(3,'1/6')*gamma('2/3')) + 0.6149266274460007351509224 + 0.6149266274460007351509224 + >>> airybi(1) + 1.207423594952871259436379 + >>> airybi(-1) + 0.10399738949694461188869 + >>> airybi(inf); airybi(-inf) + +inf + 0.0 + +Evaluation is supported for large magnitudes of the argument:: + + >>> airybi(-100) + 0.02427388768016013160566747 + >>> airybi(100) + 6.041223996670201399005265e+288 + >>> airybi(50+50j) + (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j) + >>> airybi(-50+50j) + (-3.347525544923600321838281e+157 + 1.041242537363167632587245e+158j) + +Huge arguments:: + + >>> airybi(10**10) + 1.369385787943539818688433e+289529654602165 + >>> airybi(-10**10) + 0.001775656141692932747610973 + >>> w = airybi(10**10*(1+j)) + >>> w.real + -6.559955931096196875845858e+186339621747689 + >>> w.imag + -6.822462726981357180929024e+186339621747690 + +The first real root of the Bi-function is:: + + >>> findroot(airybi, -1); airybizero(1) + -1.17371322270912792491998 + -1.17371322270912792491998 + +**Properties and relations** + +Verifying the Airy differential equation:: + + >>> for z in [-3.4, 0, 2.5, 1+2j]: + ... chop(airybi(z,2) - z*airybi(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +The first few terms of the Taylor series expansion around `z = 0` +(every third term is zero):: + + >>> nprint(taylor(airybi, 0, 5)) + [0.614927, 0.448288, 0.0, 0.102488, 0.0373574, 0.0] + +The Airy functions can be expressed in terms of Bessel +functions of order `\pm 1/3`. For `\Re[z] \le 0`, we have:: + + >>> z = -3 + >>> airybi(z) + -0.1982896263749265432206449 + >>> p = 2*power(-z,'3/2')/3 + >>> sqrt(-mpf(z)/3)*(besselj('-1/3',p) - besselj('1/3',p)) + -0.1982896263749265432206449 + +**Derivatives and integrals** + +Derivatives of the Bi-function (directly and using :func:`~mpmath.diff`):: + + >>> airybi(-3,1); diff(airybi,-3) + -0.675611222685258537668032 + -0.675611222685258537668032 + >>> airybi(-3,2); diff(airybi,-3,2) + 0.5948688791247796296619346 + 0.5948688791247796296619346 + >>> airybi(1000,1); diff(airybi,1000) + 1.710055114624614989262335e+9156 + 1.710055114624614989262335e+9156 + +Several derivatives at `z = 0`:: + + >>> airybi(0,0); airybi(0,1); airybi(0,2) + 0.6149266274460007351509224 + 0.4482883573538263579148237 + 0.0 + >>> airybi(0,3); airybi(0,4); airybi(0,5) + 0.6149266274460007351509224 + 0.8965767147076527158296474 + 0.0 + >>> airybi(0,15); airybi(0,16); airybi(0,17) + 2238.332923903442675949357 + 5522.912562599140729510628 + 0.0 + +The integral of the Bi-function:: + + >>> airybi(3,-1); quad(airybi, [0,3]) + 10.06200303130620056316655 + 10.06200303130620056316655 + >>> airybi(-10,-1); quad(airybi, [0,-10]) + -0.01504042480614002045135483 + -0.01504042480614002045135483 + +Integrals of high or fractional order:: + + >>> airybi(-2,0.5); differint(airybi, -2, 0.5, 0) + (0.0 + 0.5019859055341699223453257j) + (0.0 + 0.5019859055341699223453257j) + >>> airybi(-2,-4); differint(airybi,-2,-4,0) + 0.2809314599922447252139092 + 0.2809314599922447252139092 + >>> airybi(0,-1); airybi(0,-2); airybi(0,-3) + 0.0 + 0.0 + 0.0 + +Integrals of the Bi-function can be evaluated at limit points:: + + >>> airybi(-1000000,-1); airybi(-inf,-1) + 0.000002191261128063434047966873 + 0.0 + >>> airybi(10,-1); airybi(+inf,-1) + 147809803.1074067161675853 + +inf + >>> airybi(+inf,-2); airybi(+inf,-3) + +inf + +inf + >>> airybi(-1000000,-2); airybi(-inf,-2) + 0.4482883750599908479851085 + 0.4482883573538263579148237 + >>> gamma('2/3')*power(3,'2/3')/(2*pi) + 0.4482883573538263579148237 + >>> airybi(-100000,-3); airybi(-inf,-3) + -44828.52827206932872493133 + -inf + >>> airybi(-100000,-4); airybi(-inf,-4) + 2241411040.437759489540248 + +inf + +""" + +airyaizero = r""" +Gives the `k`-th zero of the Airy Ai-function, +i.e. the `k`-th number `a_k` ordered by magnitude for which +`\operatorname{Ai}(a_k) = 0`. + +Optionally, with *derivative=1*, the corresponding +zero `a'_k` of the derivative function, i.e. +`\operatorname{Ai}'(a'_k) = 0`, is computed. + +**Examples** + +Some values of `a_k`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airyaizero(1) + -2.338107410459767038489197 + >>> airyaizero(2) + -4.087949444130970616636989 + >>> airyaizero(3) + -5.520559828095551059129856 + >>> airyaizero(1000) + -281.0315196125215528353364 + +Some values of `a'_k`:: + + >>> airyaizero(1,1) + -1.018792971647471089017325 + >>> airyaizero(2,1) + -3.248197582179836537875424 + >>> airyaizero(3,1) + -4.820099211178735639400616 + >>> airyaizero(1000,1) + -280.9378080358935070607097 + +Verification:: + + >>> chop(airyai(airyaizero(1))) + 0.0 + >>> chop(airyai(airyaizero(1,1),1)) + 0.0 + +""" + +airybizero = r""" +With *complex=False*, gives the `k`-th real zero of the Airy Bi-function, +i.e. the `k`-th number `b_k` ordered by magnitude for which +`\operatorname{Bi}(b_k) = 0`. + +With *complex=True*, gives the `k`-th complex zero in the upper +half plane `\beta_k`. Also the conjugate `\overline{\beta_k}` +is a zero. + +Optionally, with *derivative=1*, the corresponding +zero `b'_k` or `\beta'_k` of the derivative function, i.e. +`\operatorname{Bi}'(b'_k) = 0` or `\operatorname{Bi}'(\beta'_k) = 0`, +is computed. + +**Examples** + +Some values of `b_k`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> airybizero(1) + -1.17371322270912792491998 + >>> airybizero(2) + -3.271093302836352715680228 + >>> airybizero(3) + -4.830737841662015932667709 + >>> airybizero(1000) + -280.9378112034152401578834 + +Some values of `b_k`:: + + >>> airybizero(1,1) + -2.294439682614123246622459 + >>> airybizero(2,1) + -4.073155089071828215552369 + >>> airybizero(3,1) + -5.512395729663599496259593 + >>> airybizero(1000,1) + -281.0315164471118527161362 + +Some values of `\beta_k`:: + + >>> airybizero(1,complex=True) + (0.9775448867316206859469927 + 2.141290706038744575749139j) + >>> airybizero(2,complex=True) + (1.896775013895336346627217 + 3.627291764358919410440499j) + >>> airybizero(3,complex=True) + (2.633157739354946595708019 + 4.855468179979844983174628j) + >>> airybizero(1000,complex=True) + (140.4978560578493018899793 + 243.3907724215792121244867j) + +Some values of `\beta'_k`:: + + >>> airybizero(1,1,complex=True) + (0.2149470745374305676088329 + 1.100600143302797880647194j) + >>> airybizero(2,1,complex=True) + (1.458168309223507392028211 + 2.912249367458445419235083j) + >>> airybizero(3,1,complex=True) + (2.273760763013482299792362 + 4.254528549217097862167015j) + >>> airybizero(1000,1,complex=True) + (140.4509972835270559730423 + 243.3096175398562811896208j) + +Verification:: + + >>> chop(airybi(airybizero(1))) + 0.0 + >>> chop(airybi(airybizero(1,1),1)) + 0.0 + >>> u = airybizero(1,complex=True) + >>> chop(airybi(u)) + 0.0 + >>> chop(airybi(conj(u))) + 0.0 + +The complex zeros (in the upper and lower half-planes respectively) +asymptotically approach the rays `z = R \exp(\pm i \pi /3)`:: + + >>> arg(airybizero(1,complex=True)) + 1.142532510286334022305364 + >>> arg(airybizero(1000,complex=True)) + 1.047271114786212061583917 + >>> arg(airybizero(1000000,complex=True)) + 1.047197624741816183341355 + >>> pi/3 + 1.047197551196597746154214 + +""" + + +ellipk = r""" +Evaluates the complete elliptic integral of the first kind, +`K(m)`, defined by + +.. math :: + + K(m) = \int_0^{\pi/2} \frac{dt}{\sqrt{1-m \sin^2 t}} \, = \, + \frac{\pi}{2} \,_2F_1\left(\frac{1}{2}, \frac{1}{2}, 1, m\right). + +Note that the argument is the parameter `m = k^2`, +not the modulus `k` which is sometimes used. + +**Plots** + +.. literalinclude :: /plots/ellipk.py +.. image :: /plots/ellipk.png + +**Examples** + +Values and limits include:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipk(0) + 1.570796326794896619231322 + >>> ellipk(inf) + (0.0 + 0.0j) + >>> ellipk(-inf) + 0.0 + >>> ellipk(1) + +inf + >>> ellipk(-1) + 1.31102877714605990523242 + >>> ellipk(2) + (1.31102877714605990523242 - 1.31102877714605990523242j) + +Verifying the defining integral and hypergeometric +representation:: + + >>> ellipk(0.5) + 1.85407467730137191843385 + >>> quad(lambda t: (1-0.5*sin(t)**2)**-0.5, [0, pi/2]) + 1.85407467730137191843385 + >>> pi/2*hyp2f1(0.5,0.5,1,0.5) + 1.85407467730137191843385 + +Evaluation is supported for arbitrary complex `m`:: + + >>> ellipk(3+4j) + (0.9111955638049650086562171 + 0.6313342832413452438845091j) + +A definite integral:: + + >>> quad(ellipk, [0, 1]) + 2.0 +""" + +agm = r""" +``agm(a, b)`` computes the arithmetic-geometric mean of `a` and +`b`, defined as the limit of the following iteration: + +.. math :: + + a_0 = a + + b_0 = b + + a_{n+1} = \frac{a_n+b_n}{2} + + b_{n+1} = \sqrt{a_n b_n} + +This function can be called with a single argument, computing +`\mathrm{agm}(a,1) = \mathrm{agm}(1,a)`. + +**Examples** + +It is a well-known theorem that the geometric mean of +two distinct positive numbers is less than the arithmetic +mean. It follows that the arithmetic-geometric mean lies +between the two means:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> a = mpf(3) + >>> b = mpf(4) + >>> sqrt(a*b) + 3.46410161513775 + >>> agm(a,b) + 3.48202767635957 + >>> (a+b)/2 + 3.5 + +The arithmetic-geometric mean is scale-invariant:: + + >>> agm(10*e, 10*pi) + 29.261085515723 + >>> 10*agm(e, pi) + 29.261085515723 + +As an order-of-magnitude estimate, `\mathrm{agm}(1,x) \approx x` +for large `x`:: + + >>> agm(10**10) + 643448704.760133 + >>> agm(10**50) + 1.34814309345871e+48 + +For tiny `x`, `\mathrm{agm}(1,x) \approx -\pi/(2 \log(x/4))`:: + + >>> agm('0.01') + 0.262166887202249 + >>> -pi/2/log('0.0025') + 0.262172347753122 + +The arithmetic-geometric mean can also be computed for complex +numbers:: + + >>> agm(3, 2+j) + (2.51055133276184 + 0.547394054060638j) + +The AGM iteration converges very quickly (each step doubles +the number of correct digits), so :func:`~mpmath.agm` supports efficient +high-precision evaluation:: + + >>> mp.dps = 10000 + >>> a = agm(1,2) + >>> str(a)[-10:] + '1679581912' + +**Mathematical relations** + +The arithmetic-geometric mean may be used to evaluate the +following two parametric definite integrals: + +.. math :: + + I_1 = \int_0^{\infty} + \frac{1}{\sqrt{(x^2+a^2)(x^2+b^2)}} \,dx + + I_2 = \int_0^{\pi/2} + \frac{1}{\sqrt{a^2 \cos^2(x) + b^2 \sin^2(x)}} \,dx + +We have:: + + >>> mp.dps = 15 + >>> a = 3 + >>> b = 4 + >>> f1 = lambda x: ((x**2+a**2)*(x**2+b**2))**-0.5 + >>> f2 = lambda x: ((a*cos(x))**2 + (b*sin(x))**2)**-0.5 + >>> quad(f1, [0, inf]) + 0.451115405388492 + >>> quad(f2, [0, pi/2]) + 0.451115405388492 + >>> pi/(2*agm(a,b)) + 0.451115405388492 + +A formula for `\Gamma(1/4)`:: + + >>> gamma(0.25) + 3.62560990822191 + >>> sqrt(2*sqrt(2*pi**3)/agm(1,sqrt(2))) + 3.62560990822191 + +**Possible issues** + +The branch cut chosen for complex `a` and `b` is somewhat +arbitrary. + +""" + +gegenbauer = r""" +Evaluates the Gegenbauer polynomial, or ultraspherical polynomial, + +.. math :: + + C_n^{(a)}(z) = {n+2a-1 \choose n} \,_2F_1\left(-n, n+2a; + a+\frac{1}{2}; \frac{1}{2}(1-z)\right). + +When `n` is a nonnegative integer, this formula gives a polynomial +in `z` of degree `n`, but all parameters are permitted to be +complex numbers. With `a = 1/2`, the Gegenbauer polynomial +reduces to a Legendre polynomial. + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> gegenbauer(3, 0.5, -10) + -2485.0 + >>> gegenbauer(1000, 10, 100) + 3.012757178975667428359374e+2322 + >>> gegenbauer(2+3j, -0.75, -1000j) + (-5038991.358609026523401901 + 9414549.285447104177860806j) + +Evaluation at negative integer orders:: + + >>> gegenbauer(-4, 2, 1.75) + -1.0 + >>> gegenbauer(-4, 3, 1.75) + 0.0 + >>> gegenbauer(-4, 2j, 1.75) + 0.0 + >>> gegenbauer(-7, 0.5, 3) + 8989.0 + +The Gegenbauer polynomials solve the differential equation:: + + >>> n, a = 4.5, 1+2j + >>> f = lambda z: gegenbauer(n, a, z) + >>> for z in [0, 0.75, -0.5j]: + ... chop((1-z**2)*diff(f,z,2) - (2*a+1)*z*diff(f,z) + n*(n+2*a)*f(z)) + ... + 0.0 + 0.0 + 0.0 + +The Gegenbauer polynomials have generating function +`(1-2zt+t^2)^{-a}`:: + + >>> a, z = 2.5, 1 + >>> taylor(lambda t: (1-2*z*t+t**2)**(-a), 0, 3) + [1.0, 5.0, 15.0, 35.0] + >>> [gegenbauer(n,a,z) for n in range(4)] + [1.0, 5.0, 15.0, 35.0] + +The Gegenbauer polynomials are orthogonal on `[-1, 1]` with respect +to the weight `(1-z^2)^{a-\frac{1}{2}}`:: + + >>> a, n, m = 2.5, 4, 5 + >>> Cn = lambda z: gegenbauer(n, a, z, zeroprec=1000) + >>> Cm = lambda z: gegenbauer(m, a, z, zeroprec=1000) + >>> chop(quad(lambda z: Cn(z)*Cm(z)*(1-z**2)*(a-0.5), [-1, 1])) + 0.0 +""" + +laguerre = r""" +Gives the generalized (associated) Laguerre polynomial, defined by + +.. math :: + + L_n^a(z) = \frac{\Gamma(n+b+1)}{\Gamma(b+1) \Gamma(n+1)} + \,_1F_1(-n, a+1, z). + +With `a = 0` and `n` a nonnegative integer, this reduces to an ordinary +Laguerre polynomial, the sequence of which begins +`L_0(z) = 1, L_1(z) = 1-z, L_2(z) = z^2-2z+1, \ldots`. + +The Laguerre polynomials are orthogonal with respect to the weight +`z^a e^{-z}` on `[0, \infty)`. + +**Plots** + +.. literalinclude :: /plots/laguerre.py +.. image :: /plots/laguerre.png + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> laguerre(5, 0, 0.25) + 0.03726399739583333333333333 + >>> laguerre(1+j, 0.5, 2+3j) + (4.474921610704496808379097 - 11.02058050372068958069241j) + >>> laguerre(2, 0, 10000) + 49980001.0 + >>> laguerre(2.5, 0, 10000) + -9.327764910194842158583189e+4328 + +The first few Laguerre polynomials, normalized to have integer +coefficients:: + + >>> for n in range(7): + ... chop(taylor(lambda z: fac(n)*laguerre(n, 0, z), 0, n)) + ... + [1.0] + [1.0, -1.0] + [2.0, -4.0, 1.0] + [6.0, -18.0, 9.0, -1.0] + [24.0, -96.0, 72.0, -16.0, 1.0] + [120.0, -600.0, 600.0, -200.0, 25.0, -1.0] + [720.0, -4320.0, 5400.0, -2400.0, 450.0, -36.0, 1.0] + +Verifying orthogonality:: + + >>> Lm = lambda t: laguerre(m,a,t) + >>> Ln = lambda t: laguerre(n,a,t) + >>> a, n, m = 2.5, 2, 3 + >>> chop(quad(lambda t: exp(-t)*t**a*Lm(t)*Ln(t), [0,inf])) + 0.0 + + +""" + +hermite = r""" +Evaluates the Hermite polynomial `H_n(z)`, which may be defined using +the recurrence + +.. math :: + + H_0(z) = 1 + + H_1(z) = 2z + + H_{n+1} = 2z H_n(z) - 2n H_{n-1}(z). + +The Hermite polynomials are orthogonal on `(-\infty, \infty)` with +respect to the weight `e^{-z^2}`. More generally, allowing arbitrary complex +values of `n`, the Hermite function `H_n(z)` is defined as + +.. math :: + + H_n(z) = (2z)^n \,_2F_0\left(-\frac{n}{2}, \frac{1-n}{2}, + -\frac{1}{z^2}\right) + +for `\Re{z} > 0`, or generally + +.. math :: + + H_n(z) = 2^n \sqrt{\pi} \left( + \frac{1}{\Gamma\left(\frac{1-n}{2}\right)} + \,_1F_1\left(-\frac{n}{2}, \frac{1}{2}, z^2\right) - + \frac{2z}{\Gamma\left(-\frac{n}{2}\right)} + \,_1F_1\left(\frac{1-n}{2}, \frac{3}{2}, z^2\right) + \right). + +**Plots** + +.. literalinclude :: /plots/hermite.py +.. image :: /plots/hermite.png + +**Examples** + +Evaluation for arbitrary arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hermite(0, 10) + 1.0 + >>> hermite(1, 10); hermite(2, 10) + 20.0 + 398.0 + >>> hermite(10000, 2) + 4.950440066552087387515653e+19334 + >>> hermite(3, -10**8) + -7999999999999998800000000.0 + >>> hermite(-3, -10**8) + 1.675159751729877682920301e+4342944819032534 + >>> hermite(2+3j, -1+2j) + (-0.07652130602993513389421901 - 0.1084662449961914580276007j) + +Coefficients of the first few Hermite polynomials are:: + + >>> for n in range(7): + ... chop(taylor(lambda z: hermite(n, z), 0, n)) + ... + [1.0] + [0.0, 2.0] + [-2.0, 0.0, 4.0] + [0.0, -12.0, 0.0, 8.0] + [12.0, 0.0, -48.0, 0.0, 16.0] + [0.0, 120.0, 0.0, -160.0, 0.0, 32.0] + [-120.0, 0.0, 720.0, 0.0, -480.0, 0.0, 64.0] + +Values at `z = 0`:: + + >>> for n in range(-5, 9): + ... hermite(n, 0) + ... + 0.02769459142039868792653387 + 0.08333333333333333333333333 + 0.2215567313631895034122709 + 0.5 + 0.8862269254527580136490837 + 1.0 + 0.0 + -2.0 + 0.0 + 12.0 + 0.0 + -120.0 + 0.0 + 1680.0 + +Hermite functions satisfy the differential equation:: + + >>> n = 4 + >>> f = lambda z: hermite(n, z) + >>> z = 1.5 + >>> chop(diff(f,z,2) - 2*z*diff(f,z) + 2*n*f(z)) + 0.0 + +Verifying orthogonality:: + + >>> chop(quad(lambda t: hermite(2,t)*hermite(4,t)*exp(-t**2), [-inf,inf])) + 0.0 + +""" + +jacobi = r""" +``jacobi(n, a, b, x)`` evaluates the Jacobi polynomial +`P_n^{(a,b)}(x)`. The Jacobi polynomials are a special +case of the hypergeometric function `\,_2F_1` given by: + +.. math :: + + P_n^{(a,b)}(x) = {n+a \choose n} + \,_2F_1\left(-n,1+a+b+n,a+1,\frac{1-x}{2}\right). + +Note that this definition generalizes to nonintegral values +of `n`. When `n` is an integer, the hypergeometric series +terminates after a finite number of terms, giving +a polynomial in `x`. + +**Evaluation of Jacobi polynomials** + +A special evaluation is `P_n^{(a,b)}(1) = {n+a \choose n}`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> jacobi(4, 0.5, 0.25, 1) + 2.4609375 + >>> binomial(4+0.5, 4) + 2.4609375 + +A Jacobi polynomial of degree `n` is equal to its +Taylor polynomial of degree `n`. The explicit +coefficients of Jacobi polynomials can therefore +be recovered easily using :func:`~mpmath.taylor`:: + + >>> for n in range(5): + ... nprint(taylor(lambda x: jacobi(n,1,2,x), 0, n)) + ... + [1.0] + [-0.5, 2.5] + [-0.75, -1.5, 5.25] + [0.5, -3.5, -3.5, 10.5] + [0.625, 2.5, -11.25, -7.5, 20.625] + +For nonintegral `n`, the Jacobi "polynomial" is no longer +a polynomial:: + + >>> nprint(taylor(lambda x: jacobi(0.5,1,2,x), 0, 4)) + [0.309983, 1.84119, -1.26933, 1.26699, -1.34808] + +**Orthogonality** + +The Jacobi polynomials are orthogonal on the interval +`[-1, 1]` with respect to the weight function +`w(x) = (1-x)^a (1+x)^b`. That is, +`w(x) P_n^{(a,b)}(x) P_m^{(a,b)}(x)` integrates to +zero if `m \ne n` and to a nonzero number if `m = n`. + +The orthogonality is easy to verify using numerical +quadrature:: + + >>> P = jacobi + >>> f = lambda x: (1-x)**a * (1+x)**b * P(m,a,b,x) * P(n,a,b,x) + >>> a = 2 + >>> b = 3 + >>> m, n = 3, 4 + >>> chop(quad(f, [-1, 1]), 1) + 0.0 + >>> m, n = 4, 4 + >>> quad(f, [-1, 1]) + 1.9047619047619 + +**Differential equation** + +The Jacobi polynomials are solutions of the differential +equation + +.. math :: + + (1-x^2) y'' + (b-a-(a+b+2)x) y' + n (n+a+b+1) y = 0. + +We can verify that :func:`~mpmath.jacobi` approximately satisfies +this equation:: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> a = 2.5 + >>> b = 4 + >>> n = 3 + >>> y = lambda x: jacobi(n,a,b,x) + >>> x = pi + >>> A0 = n*(n+a+b+1)*y(x) + >>> A1 = (b-a-(a+b+2)*x)*diff(y,x) + >>> A2 = (1-x**2)*diff(y,x,2) + >>> nprint(A2 + A1 + A0, 1) + 4.0e-12 + +The difference of order `10^{-12}` is as close to zero as +it could be at 15-digit working precision, since the terms +are large:: + + >>> A0, A1, A2 + (26560.2328981879, -21503.7641037294, -5056.46879445852) + +""" + +legendre = r""" +``legendre(n, x)`` evaluates the Legendre polynomial `P_n(x)`. +The Legendre polynomials are given by the formula + +.. math :: + + P_n(x) = \frac{1}{2^n n!} \frac{d^n}{dx^n} (x^2 -1)^n. + +Alternatively, they can be computed recursively using + +.. math :: + + P_0(x) = 1 + + P_1(x) = x + + (n+1) P_{n+1}(x) = (2n+1) x P_n(x) - n P_{n-1}(x). + +A third definition is in terms of the hypergeometric function +`\,_2F_1`, whereby they can be generalized to arbitrary `n`: + +.. math :: + + P_n(x) = \,_2F_1\left(-n, n+1, 1, \frac{1-x}{2}\right) + +**Plots** + +.. literalinclude :: /plots/legendre.py +.. image :: /plots/legendre.png + +**Basic evaluation** + +The Legendre polynomials assume fixed values at the points +`x = -1` and `x = 1`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint([legendre(n, 1) for n in range(6)]) + [1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + >>> nprint([legendre(n, -1) for n in range(6)]) + [1.0, -1.0, 1.0, -1.0, 1.0, -1.0] + +The coefficients of Legendre polynomials can be recovered +using degree-`n` Taylor expansion:: + + >>> for n in range(5): + ... nprint(chop(taylor(lambda x: legendre(n, x), 0, n))) + ... + [1.0] + [0.0, 1.0] + [-0.5, 0.0, 1.5] + [0.0, -1.5, 0.0, 2.5] + [0.375, 0.0, -3.75, 0.0, 4.375] + +The roots of Legendre polynomials are located symmetrically +on the interval `[-1, 1]`:: + + >>> for n in range(5): + ... nprint(polyroots(taylor(lambda x: legendre(n, x), 0, n)[::-1])) + ... + [] + [0.0] + [-0.57735, 0.57735] + [-0.774597, 0.0, 0.774597] + [-0.861136, -0.339981, 0.339981, 0.861136] + +An example of an evaluation for arbitrary `n`:: + + >>> legendre(0.75, 2+4j) + (1.94952805264875 + 2.1071073099422j) + +**Orthogonality** + +The Legendre polynomials are orthogonal on `[-1, 1]` with respect +to the trivial weight `w(x) = 1`. That is, `P_m(x) P_n(x)` +integrates to zero if `m \ne n` and to `2/(2n+1)` if `m = n`:: + + >>> m, n = 3, 4 + >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1]) + 0.0 + >>> m, n = 4, 4 + >>> quad(lambda x: legendre(m,x)*legendre(n,x), [-1, 1]) + 0.222222222222222 + +**Differential equation** + +The Legendre polynomials satisfy the differential equation + +.. math :: + + ((1-x^2) y')' + n(n+1) y' = 0. + +We can verify this numerically:: + + >>> n = 3.6 + >>> x = 0.73 + >>> P = legendre + >>> A = diff(lambda t: (1-t**2)*diff(lambda u: P(n,u), t), x) + >>> B = n*(n+1)*P(n,x) + >>> nprint(A+B,1) + 9.0e-16 + +""" + + +legenp = r""" +Calculates the (associated) Legendre function of the first kind of +degree *n* and order *m*, `P_n^m(z)`. Taking `m = 0` gives the ordinary +Legendre function of the first kind, `P_n(z)`. The parameters may be +complex numbers. + +In terms of the Gauss hypergeometric function, the (associated) Legendre +function is defined as + +.. math :: + + P_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(1+z)^{m/2}}{(1-z)^{m/2}} + \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right). + +With *type=3* instead of *type=2*, the alternative +definition + +.. math :: + + \hat{P}_n^m(z) = \frac{1}{\Gamma(1-m)} \frac{(z+1)^{m/2}}{(z-1)^{m/2}} + \,_2F_1\left(-n, n+1, 1-m, \frac{1-z}{2}\right). + +is used. These functions correspond respectively to ``LegendreP[n,m,2,z]`` +and ``LegendreP[n,m,3,z]`` in Mathematica. + +The general solution of the (associated) Legendre differential equation + +.. math :: + + (1-z^2) f''(z) - 2zf'(z) + \left(n(n+1)-\frac{m^2}{1-z^2}\right)f(z) = 0 + +is given by `C_1 P_n^m(z) + C_2 Q_n^m(z)` for arbitrary constants +`C_1`, `C_2`, where `Q_n^m(z)` is a Legendre function of the +second kind as implemented by :func:`~mpmath.legenq`. + +**Examples** + +Evaluation for arbitrary parameters and arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> legenp(2, 0, 10); legendre(2, 10) + 149.5 + 149.5 + >>> legenp(-2, 0.5, 2.5) + (1.972260393822275434196053 - 1.972260393822275434196053j) + >>> legenp(2+3j, 1-j, -0.5+4j) + (-3.335677248386698208736542 - 5.663270217461022307645625j) + >>> chop(legenp(3, 2, -1.5, type=2)) + 28.125 + >>> chop(legenp(3, 2, -1.5, type=3)) + -28.125 + +Verifying the associated Legendre differential equation:: + + >>> n, m = 2, -0.5 + >>> C1, C2 = 1, -3 + >>> f = lambda z: C1*legenp(n,m,z) + C2*legenq(n,m,z) + >>> deq = lambda z: (1-z**2)*diff(f,z,2) - 2*z*diff(f,z) + \ + ... (n*(n+1)-m**2/(1-z**2))*f(z) + >>> for z in [0, 2, -1.5, 0.5+2j]: + ... chop(deq(mpmathify(z))) + ... + 0.0 + 0.0 + 0.0 + 0.0 +""" + +legenq = r""" +Calculates the (associated) Legendre function of the second kind of +degree *n* and order *m*, `Q_n^m(z)`. Taking `m = 0` gives the ordinary +Legendre function of the second kind, `Q_n(z)`. The parameters may be +complex numbers. + +The Legendre functions of the second kind give a second set of +solutions to the (associated) Legendre differential equation. +(See :func:`~mpmath.legenp`.) +Unlike the Legendre functions of the first kind, they are not +polynomials of `z` for integer `n`, `m` but rational or logarithmic +functions with poles at `z = \pm 1`. + +There are various ways to define Legendre functions of +the second kind, giving rise to different complex structure. +A version can be selected using the *type* keyword argument. +The *type=2* and *type=3* functions are given respectively by + +.. math :: + + Q_n^m(z) = \frac{\pi}{2 \sin(\pi m)} + \left( \cos(\pi m) P_n^m(z) - + \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} P_n^{-m}(z)\right) + + \hat{Q}_n^m(z) = \frac{\pi}{2 \sin(\pi m)} e^{\pi i m} + \left( \hat{P}_n^m(z) - + \frac{\Gamma(1+m+n)}{\Gamma(1-m+n)} \hat{P}_n^{-m}(z)\right) + +where `P` and `\hat{P}` are the *type=2* and *type=3* Legendre functions +of the first kind. The formulas above should be understood as limits +when `m` is an integer. + +These functions correspond to ``LegendreQ[n,m,2,z]`` (or ``LegendreQ[n,m,z]``) +and ``LegendreQ[n,m,3,z]`` in Mathematica. The *type=3* function +is essentially the same as the function defined in +Abramowitz & Stegun (eq. 8.1.3) but with `(z+1)^{m/2}(z-1)^{m/2}` instead +of `(z^2-1)^{m/2}`, giving slightly different branches. + +**Examples** + +Evaluation for arbitrary parameters and arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> legenq(2, 0, 0.5) + -0.8186632680417568557122028 + >>> legenq(-1.5, -2, 2.5) + (0.6655964618250228714288277 + 0.3937692045497259717762649j) + >>> legenq(2-j, 3+4j, -6+5j) + (-10001.95256487468541686564 - 6011.691337610097577791134j) + +Different versions of the function:: + + >>> legenq(2, 1, 0.5) + 0.7298060598018049369381857 + >>> legenq(2, 1, 1.5) + (-7.902916572420817192300921 + 0.1998650072605976600724502j) + >>> legenq(2, 1, 0.5, type=3) + (2.040524284763495081918338 - 0.7298060598018049369381857j) + >>> chop(legenq(2, 1, 1.5, type=3)) + -0.1998650072605976600724502 + +""" + +chebyt = r""" +``chebyt(n, x)`` evaluates the Chebyshev polynomial of the first +kind `T_n(x)`, defined by the identity + +.. math :: + + T_n(\cos x) = \cos(n x). + +The Chebyshev polynomials of the first kind are a special +case of the Jacobi polynomials, and by extension of the +hypergeometric function `\,_2F_1`. They can thus also be +evaluated for nonintegral `n`. + +**Plots** + +.. literalinclude :: /plots/chebyt.py +.. image :: /plots/chebyt.png + +**Basic evaluation** + +The coefficients of the `n`-th polynomial can be recovered +using using degree-`n` Taylor expansion:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(chop(taylor(lambda x: chebyt(n, x), 0, n))) + ... + [1.0] + [0.0, 1.0] + [-1.0, 0.0, 2.0] + [0.0, -3.0, 0.0, 4.0] + [1.0, 0.0, -8.0, 0.0, 8.0] + +**Orthogonality** + +The Chebyshev polynomials of the first kind are orthogonal +on the interval `[-1, 1]` with respect to the weight +function `w(x) = 1/\sqrt{1-x^2}`:: + + >>> f = lambda x: chebyt(m,x)*chebyt(n,x)/sqrt(1-x**2) + >>> m, n = 3, 4 + >>> nprint(quad(f, [-1, 1]),1) + 0.0 + >>> m, n = 4, 4 + >>> quad(f, [-1, 1]) + 1.57079632596448 + +""" + +chebyu = r""" +``chebyu(n, x)`` evaluates the Chebyshev polynomial of the second +kind `U_n(x)`, defined by the identity + +.. math :: + + U_n(\cos x) = \frac{\sin((n+1)x)}{\sin(x)}. + +The Chebyshev polynomials of the second kind are a special +case of the Jacobi polynomials, and by extension of the +hypergeometric function `\,_2F_1`. They can thus also be +evaluated for nonintegral `n`. + +**Plots** + +.. literalinclude :: /plots/chebyu.py +.. image :: /plots/chebyu.png + +**Basic evaluation** + +The coefficients of the `n`-th polynomial can be recovered +using using degree-`n` Taylor expansion:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(5): + ... nprint(chop(taylor(lambda x: chebyu(n, x), 0, n))) + ... + [1.0] + [0.0, 2.0] + [-1.0, 0.0, 4.0] + [0.0, -4.0, 0.0, 8.0] + [1.0, 0.0, -12.0, 0.0, 16.0] + +**Orthogonality** + +The Chebyshev polynomials of the second kind are orthogonal +on the interval `[-1, 1]` with respect to the weight +function `w(x) = \sqrt{1-x^2}`:: + + >>> f = lambda x: chebyu(m,x)*chebyu(n,x)*sqrt(1-x**2) + >>> m, n = 3, 4 + >>> quad(f, [-1, 1]) + 0.0 + >>> m, n = 4, 4 + >>> quad(f, [-1, 1]) + 1.5707963267949 +""" + +besselj = r""" +``besselj(n, x, derivative=0)`` gives the Bessel function of the first kind +`J_n(x)`. Bessel functions of the first kind are defined as +solutions of the differential equation + +.. math :: + + x^2 y'' + x y' + (x^2 - n^2) y = 0 + +which appears, among other things, when solving the radial +part of Laplace's equation in cylindrical coordinates. This +equation has two solutions for given `n`, where the +`J_n`-function is the solution that is nonsingular at `x = 0`. +For positive integer `n`, `J_n(x)` behaves roughly like a sine +(odd `n`) or cosine (even `n`) multiplied by a magnitude factor +that decays slowly as `x \to \pm\infty`. + +Generally, `J_n` is a special case of the hypergeometric +function `\,_0F_1`: + +.. math :: + + J_n(x) = \frac{x^n}{2^n \Gamma(n+1)} + \,_0F_1\left(n+1,-\frac{x^2}{4}\right) + +With *derivative* = `m \ne 0`, the `m`-th derivative + +.. math :: + + \frac{d^m}{dx^m} J_n(x) + +is computed. + +**Plots** + +.. literalinclude :: /plots/besselj.py +.. image :: /plots/besselj.png +.. literalinclude :: /plots/besselj_c.py +.. image :: /plots/besselj_c.png + +**Examples** + +Evaluation is supported for arbitrary arguments, and at +arbitrary precision:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> besselj(2, 1000) + -0.024777229528606 + >>> besselj(4, 0.75) + 0.000801070086542314 + >>> besselj(2, 1000j) + (-2.48071721019185e+432 + 6.41567059811949e-437j) + >>> mp.dps = 25 + >>> besselj(0.75j, 3+4j) + (-2.778118364828153309919653 - 1.5863603889018621585533j) + >>> mp.dps = 50 + >>> besselj(1, pi) + 0.28461534317975275734531059968613140570981118184947 + +Arguments may be large:: + + >>> mp.dps = 25 + >>> besselj(0, 10000) + -0.007096160353388801477265164 + >>> besselj(0, 10**10) + 0.000002175591750246891726859055 + >>> besselj(2, 10**100) + 7.337048736538615712436929e-51 + >>> besselj(2, 10**5*j) + (-3.540725411970948860173735e+43426 + 4.4949812409615803110051e-43433j) + +The Bessel functions of the first kind satisfy simple +symmetries around `x = 0`:: + + >>> mp.dps = 15 + >>> nprint([besselj(n,0) for n in range(5)]) + [1.0, 0.0, 0.0, 0.0, 0.0] + >>> nprint([besselj(n,pi) for n in range(5)]) + [-0.304242, 0.284615, 0.485434, 0.333458, 0.151425] + >>> nprint([besselj(n,-pi) for n in range(5)]) + [-0.304242, -0.284615, 0.485434, -0.333458, 0.151425] + +Roots of Bessel functions are often used:: + + >>> nprint([findroot(j0, k) for k in [2, 5, 8, 11, 14]]) + [2.40483, 5.52008, 8.65373, 11.7915, 14.9309] + >>> nprint([findroot(j1, k) for k in [3, 7, 10, 13, 16]]) + [3.83171, 7.01559, 10.1735, 13.3237, 16.4706] + +The roots are not periodic, but the distance between successive +roots asymptotically approaches `2 \pi`. Bessel functions of +the first kind have the following normalization:: + + >>> quadosc(j0, [0, inf], period=2*pi) + 1.0 + >>> quadosc(j1, [0, inf], period=2*pi) + 1.0 + +For `n = 1/2` or `n = -1/2`, the Bessel function reduces to a +trigonometric function:: + + >>> x = 10 + >>> besselj(0.5, x), sqrt(2/(pi*x))*sin(x) + (-0.13726373575505, -0.13726373575505) + >>> besselj(-0.5, x), sqrt(2/(pi*x))*cos(x) + (-0.211708866331398, -0.211708866331398) + +Derivatives of any order can be computed (negative orders +correspond to integration):: + + >>> mp.dps = 25 + >>> besselj(0, 7.5, 1) + -0.1352484275797055051822405 + >>> diff(lambda x: besselj(0,x), 7.5) + -0.1352484275797055051822405 + >>> besselj(0, 7.5, 10) + -0.1377811164763244890135677 + >>> diff(lambda x: besselj(0,x), 7.5, 10) + -0.1377811164763244890135677 + >>> besselj(0,7.5,-1) - besselj(0,3.5,-1) + -0.1241343240399987693521378 + >>> quad(j0, [3.5, 7.5]) + -0.1241343240399987693521378 + +Differentiation with a noninteger order gives the fractional derivative +in the sense of the Riemann-Liouville differintegral, as computed by +:func:`~mpmath.differint`:: + + >>> mp.dps = 15 + >>> besselj(1, 3.5, 0.75) + -0.385977722939384 + >>> differint(lambda x: besselj(1, x), 3.5, 0.75) + -0.385977722939384 + +""" + +besseli = r""" +``besseli(n, x, derivative=0)`` gives the modified Bessel function of the +first kind, + +.. math :: + + I_n(x) = i^{-n} J_n(ix). + +With *derivative* = `m \ne 0`, the `m`-th derivative + +.. math :: + + \frac{d^m}{dx^m} I_n(x) + +is computed. + +**Plots** + +.. literalinclude :: /plots/besseli.py +.. image :: /plots/besseli.png +.. literalinclude :: /plots/besseli_c.py +.. image :: /plots/besseli_c.png + +**Examples** + +Some values of `I_n(x)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besseli(0,0) + 1.0 + >>> besseli(1,0) + 0.0 + >>> besseli(0,1) + 1.266065877752008335598245 + >>> besseli(3.5, 2+3j) + (-0.2904369752642538144289025 - 0.4469098397654815837307006j) + +Arguments may be large:: + + >>> besseli(2, 1000) + 2.480717210191852440616782e+432 + >>> besseli(2, 10**10) + 4.299602851624027900335391e+4342944813 + >>> besseli(2, 6000+10000j) + (-2.114650753239580827144204e+2603 + 4.385040221241629041351886e+2602j) + +For integers `n`, the following integral representation holds:: + + >>> mp.dps = 15 + >>> n = 3 + >>> x = 2.3 + >>> quad(lambda t: exp(x*cos(t))*cos(n*t), [0,pi])/pi + 0.349223221159309 + >>> besseli(n,x) + 0.349223221159309 + +Derivatives and antiderivatives of any order can be computed:: + + >>> mp.dps = 25 + >>> besseli(2, 7.5, 1) + 195.8229038931399062565883 + >>> diff(lambda x: besseli(2,x), 7.5) + 195.8229038931399062565883 + >>> besseli(2, 7.5, 10) + 153.3296508971734525525176 + >>> diff(lambda x: besseli(2,x), 7.5, 10) + 153.3296508971734525525176 + >>> besseli(2,7.5,-1) - besseli(2,3.5,-1) + 202.5043900051930141956876 + >>> quad(lambda x: besseli(2,x), [3.5, 7.5]) + 202.5043900051930141956876 + +""" + +bessely = r""" +``bessely(n, x, derivative=0)`` gives the Bessel function of the second kind, + +.. math :: + + Y_n(x) = \frac{J_n(x) \cos(\pi n) - J_{-n}(x)}{\sin(\pi n)}. + +For `n` an integer, this formula should be understood as a +limit. With *derivative* = `m \ne 0`, the `m`-th derivative + +.. math :: + + \frac{d^m}{dx^m} Y_n(x) + +is computed. + +**Plots** + +.. literalinclude :: /plots/bessely.py +.. image :: /plots/bessely.png +.. literalinclude :: /plots/bessely_c.py +.. image :: /plots/bessely_c.png + +**Examples** + +Some values of `Y_n(x)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> bessely(0,0), bessely(1,0), bessely(2,0) + (-inf, -inf, -inf) + >>> bessely(1, pi) + 0.3588729167767189594679827 + >>> bessely(0.5, 3+4j) + (9.242861436961450520325216 - 3.085042824915332562522402j) + +Arguments may be large:: + + >>> bessely(0, 10000) + 0.00364780555898660588668872 + >>> bessely(2.5, 10**50) + -4.8952500412050989295774e-26 + >>> bessely(2.5, -10**50) + (0.0 + 4.8952500412050989295774e-26j) + +Derivatives and antiderivatives of any order can be computed:: + + >>> bessely(2, 3.5, 1) + 0.3842618820422660066089231 + >>> diff(lambda x: bessely(2, x), 3.5) + 0.3842618820422660066089231 + >>> bessely(0.5, 3.5, 1) + -0.2066598304156764337900417 + >>> diff(lambda x: bessely(0.5, x), 3.5) + -0.2066598304156764337900417 + >>> diff(lambda x: bessely(2, x), 0.5, 10) + -208173867409.5547350101511 + >>> bessely(2, 0.5, 10) + -208173867409.5547350101511 + >>> bessely(2, 100.5, 100) + 0.02668487547301372334849043 + >>> quad(lambda x: bessely(2,x), [1,3]) + -1.377046859093181969213262 + >>> bessely(2,3,-1) - bessely(2,1,-1) + -1.377046859093181969213262 + +""" + +besselk = r""" +``besselk(n, x)`` gives the modified Bessel function of the +second kind, + +.. math :: + + K_n(x) = \frac{\pi}{2} \frac{I_{-n}(x)-I_{n}(x)}{\sin(\pi n)} + +For `n` an integer, this formula should be understood as a +limit. + +**Plots** + +.. literalinclude :: /plots/besselk.py +.. image :: /plots/besselk.png +.. literalinclude :: /plots/besselk_c.py +.. image :: /plots/besselk_c.png + +**Examples** + +Evaluation is supported for arbitrary complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besselk(0,1) + 0.4210244382407083333356274 + >>> besselk(0, -1) + (0.4210244382407083333356274 - 3.97746326050642263725661j) + >>> besselk(3.5, 2+3j) + (-0.02090732889633760668464128 + 0.2464022641351420167819697j) + >>> besselk(2+3j, 0.5) + (0.9615816021726349402626083 + 0.1918250181801757416908224j) + +Arguments may be large:: + + >>> besselk(0, 100) + 4.656628229175902018939005e-45 + >>> besselk(1, 10**6) + 4.131967049321725588398296e-434298 + >>> besselk(1, 10**6*j) + (0.001140348428252385844876706 - 0.0005200017201681152909000961j) + >>> besselk(4.5, fmul(10**50, j, exact=True)) + (1.561034538142413947789221e-26 + 1.243554598118700063281496e-25j) + +The point `x = 0` is a singularity (logarithmic if `n = 0`):: + + >>> besselk(0,0) + +inf + >>> besselk(1,0) + +inf + >>> for n in range(-4, 5): + ... print(besselk(n, '1e-1000')) + ... + 4.8e+4001 + 8.0e+3000 + 2.0e+2000 + 1.0e+1000 + 2302.701024509704096466802 + 1.0e+1000 + 2.0e+2000 + 8.0e+3000 + 4.8e+4001 + +""" + +hankel1 = r""" +``hankel1(n,x)`` computes the Hankel function of the first kind, +which is the complex combination of Bessel functions given by + +.. math :: + + H_n^{(1)}(x) = J_n(x) + i Y_n(x). + +**Plots** + +.. literalinclude :: /plots/hankel1.py +.. image :: /plots/hankel1.png +.. literalinclude :: /plots/hankel1_c.py +.. image :: /plots/hankel1_c.png + +**Examples** + +The Hankel function is generally complex-valued:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hankel1(2, pi) + (0.4854339326315091097054957 - 0.0999007139290278787734903j) + >>> hankel1(3.5, pi) + (0.2340002029630507922628888 - 0.6419643823412927142424049j) +""" + +hankel2 = r""" +``hankel2(n,x)`` computes the Hankel function of the second kind, +which is the complex combination of Bessel functions given by + +.. math :: + + H_n^{(2)}(x) = J_n(x) - i Y_n(x). + +**Plots** + +.. literalinclude :: /plots/hankel2.py +.. image :: /plots/hankel2.png +.. literalinclude :: /plots/hankel2_c.py +.. image :: /plots/hankel2_c.png + +**Examples** + +The Hankel function is generally complex-valued:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> hankel2(2, pi) + (0.4854339326315091097054957 + 0.0999007139290278787734903j) + >>> hankel2(3.5, pi) + (0.2340002029630507922628888 + 0.6419643823412927142424049j) +""" + +lambertw = r""" +The Lambert W function `W(z)` is defined as the inverse function +of `w \exp(w)`. In other words, the value of `W(z)` is such that +`z = W(z) \exp(W(z))` for any complex number `z`. + +The Lambert W function is a multivalued function with infinitely +many branches `W_k(z)`, indexed by `k \in \mathbb{Z}`. Each branch +gives a different solution `w` of the equation `z = w \exp(w)`. +All branches are supported by :func:`~mpmath.lambertw`: + +* ``lambertw(z)`` gives the principal solution (branch 0) + +* ``lambertw(z, k)`` gives the solution on branch `k` + +The Lambert W function has two partially real branches: the +principal branch (`k = 0`) is real for real `z > -1/e`, and the +`k = -1` branch is real for `-1/e < z < 0`. All branches except +`k = 0` have a logarithmic singularity at `z = 0`. + +The definition, implementation and choice of branches +is based on [Corless]_. + +**Plots** + +.. literalinclude :: /plots/lambertw.py +.. image :: /plots/lambertw.png +.. literalinclude :: /plots/lambertw_c.py +.. image :: /plots/lambertw_c.png + +**Basic examples** + +The Lambert W function is the inverse of `w \exp(w)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> w = lambertw(1) + >>> w + 0.5671432904097838729999687 + >>> w*exp(w) + 1.0 + +Any branch gives a valid inverse:: + + >>> w = lambertw(1, k=3) + >>> w + (-2.853581755409037807206819 + 17.11353553941214591260783j) + >>> w = lambertw(1, k=25) + >>> w + (-5.047020464221569709378686 + 155.4763860949415867162066j) + >>> chop(w*exp(w)) + 1.0 + +**Applications to equation-solving** + +The Lambert W function may be used to solve various kinds of +equations, such as finding the value of the infinite power +tower `z^{z^{z^{\ldots}}}`:: + + >>> def tower(z, n): + ... if n == 0: + ... return z + ... return z ** tower(z, n-1) + ... + >>> tower(mpf(0.5), 100) + 0.6411857445049859844862005 + >>> -lambertw(-log(0.5))/log(0.5) + 0.6411857445049859844862005 + +**Properties** + +The Lambert W function grows roughly like the natural logarithm +for large arguments:: + + >>> lambertw(1000); log(1000) + 5.249602852401596227126056 + 6.907755278982137052053974 + >>> lambertw(10**100); log(10**100) + 224.8431064451185015393731 + 230.2585092994045684017991 + +The principal branch of the Lambert W function has a rational +Taylor series expansion around `z = 0`:: + + >>> nprint(taylor(lambertw, 0, 6), 10) + [0.0, 1.0, -1.0, 1.5, -2.666666667, 5.208333333, -10.8] + +Some special values and limits are:: + + >>> lambertw(0) + 0.0 + >>> lambertw(1) + 0.5671432904097838729999687 + >>> lambertw(e) + 1.0 + >>> lambertw(inf) + +inf + >>> lambertw(0, k=-1) + -inf + >>> lambertw(0, k=3) + -inf + >>> lambertw(inf, k=2) + (+inf + 12.56637061435917295385057j) + >>> lambertw(inf, k=3) + (+inf + 18.84955592153875943077586j) + >>> lambertw(-inf, k=3) + (+inf + 21.9911485751285526692385j) + +The `k = 0` and `k = -1` branches join at `z = -1/e` where +`W(z) = -1` for both branches. Since `-1/e` can only be represented +approximately with binary floating-point numbers, evaluating the +Lambert W function at this point only gives `-1` approximately:: + + >>> lambertw(-1/e, 0) + -0.9999999999998371330228251 + >>> lambertw(-1/e, -1) + -1.000000000000162866977175 + +If `-1/e` happens to round in the negative direction, there might be +a small imaginary part:: + + >>> mp.dps = 15 + >>> lambertw(-1/e) + (-1.0 + 8.22007971483662e-9j) + >>> lambertw(-1/e+eps) + -0.999999966242188 + +**References** + +1. [Corless]_ +""" + +barnesg = r""" +Evaluates the Barnes G-function, which generalizes the +superfactorial (:func:`~mpmath.superfac`) and by extension also the +hyperfactorial (:func:`~mpmath.hyperfac`) to the complex numbers +in an analogous way to how the gamma function generalizes +the ordinary factorial. + +The Barnes G-function may be defined in terms of a Weierstrass +product: + +.. math :: + + G(z+1) = (2\pi)^{z/2} e^{-[z(z+1)+\gamma z^2]/2} + \prod_{n=1}^\infty + \left[\left(1+\frac{z}{n}\right)^ne^{-z+z^2/(2n)}\right] + +For positive integers `n`, we have have relation to superfactorials +`G(n) = \mathrm{sf}(n-2) = 0! \cdot 1! \cdots (n-2)!`. + +**Examples** + +Some elementary values and limits of the Barnes G-function:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> barnesg(1), barnesg(2), barnesg(3) + (1.0, 1.0, 1.0) + >>> barnesg(4) + 2.0 + >>> barnesg(5) + 12.0 + >>> barnesg(6) + 288.0 + >>> barnesg(7) + 34560.0 + >>> barnesg(8) + 24883200.0 + >>> barnesg(inf) + +inf + >>> barnesg(0), barnesg(-1), barnesg(-2) + (0.0, 0.0, 0.0) + +Closed-form values are known for some rational arguments:: + + >>> barnesg('1/2') + 0.603244281209446 + >>> sqrt(exp(0.25+log(2)/12)/sqrt(pi)/glaisher**3) + 0.603244281209446 + >>> barnesg('1/4') + 0.29375596533861 + >>> nthroot(exp('3/8')/exp(catalan/pi)/ + ... gamma(0.25)**3/sqrt(glaisher)**9, 4) + 0.29375596533861 + +The Barnes G-function satisfies the functional equation +`G(z+1) = \Gamma(z) G(z)`:: + + >>> z = pi + >>> barnesg(z+1) + 2.39292119327948 + >>> gamma(z)*barnesg(z) + 2.39292119327948 + +The asymptotic growth rate of the Barnes G-function is related to +the Glaisher-Kinkelin constant:: + + >>> limit(lambda n: barnesg(n+1)/(n**(n**2/2-mpf(1)/12)* + ... (2*pi)**(n/2)*exp(-3*n**2/4)), inf) + 0.847536694177301 + >>> exp('1/12')/glaisher + 0.847536694177301 + +The Barnes G-function can be differentiated in closed form:: + + >>> z = 3 + >>> diff(barnesg, z) + 0.264507203401607 + >>> barnesg(z)*((z-1)*psi(0,z)-z+(log(2*pi)+1)/2) + 0.264507203401607 + +Evaluation is supported for arbitrary arguments and at arbitrary +precision:: + + >>> barnesg(6.5) + 2548.7457695685 + >>> barnesg(-pi) + 0.00535976768353037 + >>> barnesg(3+4j) + (-0.000676375932234244 - 4.42236140124728e-5j) + >>> mp.dps = 50 + >>> barnesg(1/sqrt(2)) + 0.81305501090451340843586085064413533788206204124732 + >>> q = barnesg(10j) + >>> q.real + 0.000000000021852360840356557241543036724799812371995850552234 + >>> q.imag + -0.00000000000070035335320062304849020654215545839053210041457588 + >>> mp.dps = 15 + >>> barnesg(100) + 3.10361006263698e+6626 + >>> barnesg(-101) + 0.0 + >>> barnesg(-10.5) + 5.94463017605008e+25 + >>> barnesg(-10000.5) + -6.14322868174828e+167480422 + >>> barnesg(1000j) + (5.21133054865546e-1173597 + 4.27461836811016e-1173597j) + >>> barnesg(-1000+1000j) + (2.43114569750291e+1026623 + 2.24851410674842e+1026623j) + + +**References** + +1. Whittaker & Watson, *A Course of Modern Analysis*, + Cambridge University Press, 4th edition (1927), p.264 +2. http://en.wikipedia.org/wiki/Barnes_G-function +3. http://mathworld.wolfram.com/BarnesG-Function.html + +""" + +superfac = r""" +Computes the superfactorial, defined as the product of +consecutive factorials + +.. math :: + + \mathrm{sf}(n) = \prod_{k=1}^n k! + +For general complex `z`, `\mathrm{sf}(z)` is defined +in terms of the Barnes G-function (see :func:`~mpmath.barnesg`). + +**Examples** + +The first few superfactorials are (OEIS A000178):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(10): + ... print("%s %s" % (n, superfac(n))) + ... + 0 1.0 + 1 1.0 + 2 2.0 + 3 12.0 + 4 288.0 + 5 34560.0 + 6 24883200.0 + 7 125411328000.0 + 8 5.05658474496e+15 + 9 1.83493347225108e+21 + +Superfactorials grow very rapidly:: + + >>> superfac(1000) + 3.24570818422368e+1177245 + >>> superfac(10**10) + 2.61398543581249e+467427913956904067453 + +Evaluation is supported for arbitrary arguments:: + + >>> mp.dps = 25 + >>> superfac(pi) + 17.20051550121297985285333 + >>> superfac(2+3j) + (-0.005915485633199789627466468 + 0.008156449464604044948738263j) + >>> diff(superfac, 1) + 0.2645072034016070205673056 + +**References** + +1. http://oeis.org/A000178 + +""" + + +hyperfac = r""" +Computes the hyperfactorial, defined for integers as the product + +.. math :: + + H(n) = \prod_{k=1}^n k^k. + + +The hyperfactorial satisfies the recurrence formula `H(z) = z^z H(z-1)`. +It can be defined more generally in terms of the Barnes G-function (see +:func:`~mpmath.barnesg`) and the gamma function by the formula + +.. math :: + + H(z) = \frac{\Gamma(z+1)^z}{G(z)}. + +The extension to complex numbers can also be done via +the integral representation + +.. math :: + + H(z) = (2\pi)^{-z/2} \exp \left[ + {z+1 \choose 2} + \int_0^z \log(t!)\,dt + \right]. + +**Examples** + +The rapidly-growing sequence of hyperfactorials begins +(OEIS A002109):: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(10): + ... print("%s %s" % (n, hyperfac(n))) + ... + 0 1.0 + 1 1.0 + 2 4.0 + 3 108.0 + 4 27648.0 + 5 86400000.0 + 6 4031078400000.0 + 7 3.3197663987712e+18 + 8 5.56964379417266e+25 + 9 2.15779412229419e+34 + +Some even larger hyperfactorials are:: + + >>> hyperfac(1000) + 5.46458120882585e+1392926 + >>> hyperfac(10**10) + 4.60408207642219e+489142638002418704309 + +The hyperfactorial can be evaluated for arbitrary arguments:: + + >>> hyperfac(0.5) + 0.880449235173423 + >>> diff(hyperfac, 1) + 0.581061466795327 + >>> hyperfac(pi) + 205.211134637462 + >>> hyperfac(-10+1j) + (3.01144471378225e+46 - 2.45285242480185e+46j) + +The recurrence property of the hyperfactorial holds +generally:: + + >>> z = 3-4*j + >>> hyperfac(z) + (-4.49795891462086e-7 - 6.33262283196162e-7j) + >>> z**z * hyperfac(z-1) + (-4.49795891462086e-7 - 6.33262283196162e-7j) + >>> z = mpf(-0.6) + >>> chop(z**z * hyperfac(z-1)) + 1.28170142849352 + >>> hyperfac(z) + 1.28170142849352 + +The hyperfactorial may also be computed using the integral +definition:: + + >>> z = 2.5 + >>> hyperfac(z) + 15.9842119922237 + >>> (2*pi)**(-z/2)*exp(binomial(z+1,2) + + ... quad(lambda t: loggamma(t+1), [0, z])) + 15.9842119922237 + +:func:`~mpmath.hyperfac` supports arbitrary-precision evaluation:: + + >>> mp.dps = 50 + >>> hyperfac(10) + 215779412229418562091680268288000000000000000.0 + >>> hyperfac(1/sqrt(2)) + 0.89404818005227001975423476035729076375705084390942 + +**References** + +1. http://oeis.org/A002109 +2. http://mathworld.wolfram.com/Hyperfactorial.html + +""" + +rgamma = r""" +Computes the reciprocal of the gamma function, `1/\Gamma(z)`. This +function evaluates to zero at the poles +of the gamma function, `z = 0, -1, -2, \ldots`. + +**Examples** + +Basic examples:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> rgamma(1) + 1.0 + >>> rgamma(4) + 0.1666666666666666666666667 + >>> rgamma(0); rgamma(-1) + 0.0 + 0.0 + >>> rgamma(1000) + 2.485168143266784862783596e-2565 + >>> rgamma(inf) + 0.0 + +A definite integral that can be evaluated in terms of elementary +integrals:: + + >>> quad(rgamma, [0,inf]) + 2.807770242028519365221501 + >>> e + quad(lambda t: exp(-t)/(pi**2+log(t)**2), [0,inf]) + 2.807770242028519365221501 +""" + +loggamma = r""" +Computes the principal branch of the log-gamma function, +`\ln \Gamma(z)`. Unlike `\ln(\Gamma(z))`, which has infinitely many +complex branch cuts, the principal log-gamma function only has a single +branch cut along the negative half-axis. The principal branch +continuously matches the asymptotic Stirling expansion + +.. math :: + + \ln \Gamma(z) \sim \frac{\ln(2 \pi)}{2} + + \left(z-\frac{1}{2}\right) \ln(z) - z + O(z^{-1}). + +The real parts of both functions agree, but their imaginary +parts generally differ by `2 n \pi` for some `n \in \mathbb{Z}`. +They coincide for `z \in \mathbb{R}, z > 0`. + +Computationally, it is advantageous to use :func:`~mpmath.loggamma` +instead of :func:`~mpmath.gamma` for extremely large arguments. + +**Examples** + +Comparing with `\ln(\Gamma(z))`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> loggamma('13.2'); log(gamma('13.2')) + 20.49400419456603678498394 + 20.49400419456603678498394 + >>> loggamma(3+4j) + (-1.756626784603784110530604 + 4.742664438034657928194889j) + >>> log(gamma(3+4j)) + (-1.756626784603784110530604 - 1.540520869144928548730397j) + >>> log(gamma(3+4j)) + 2*pi*j + (-1.756626784603784110530604 + 4.742664438034657928194889j) + +Note the imaginary parts for negative arguments:: + + >>> loggamma(-0.5); loggamma(-1.5); loggamma(-2.5) + (1.265512123484645396488946 - 3.141592653589793238462643j) + (0.8600470153764810145109327 - 6.283185307179586476925287j) + (-0.05624371649767405067259453 - 9.42477796076937971538793j) + +Some special values:: + + >>> loggamma(1); loggamma(2) + 0.0 + 0.0 + >>> loggamma(3); +ln2 + 0.6931471805599453094172321 + 0.6931471805599453094172321 + >>> loggamma(3.5); log(15*sqrt(pi)/8) + 1.200973602347074224816022 + 1.200973602347074224816022 + >>> loggamma(inf) + +inf + +Huge arguments are permitted:: + + >>> loggamma('1e30') + 6.807755278982137052053974e+31 + >>> loggamma('1e300') + 6.897755278982137052053974e+302 + >>> loggamma('1e3000') + 6.906755278982137052053974e+3003 + >>> loggamma('1e100000000000000000000') + 2.302585092994045684007991e+100000000000000000020 + >>> loggamma('1e30j') + (-1.570796326794896619231322e+30 + 6.807755278982137052053974e+31j) + >>> loggamma('1e300j') + (-1.570796326794896619231322e+300 + 6.897755278982137052053974e+302j) + >>> loggamma('1e3000j') + (-1.570796326794896619231322e+3000 + 6.906755278982137052053974e+3003j) + +The log-gamma function can be integrated analytically +on any interval of unit length:: + + >>> z = 0 + >>> quad(loggamma, [z,z+1]); log(2*pi)/2 + 0.9189385332046727417803297 + 0.9189385332046727417803297 + >>> z = 3+4j + >>> quad(loggamma, [z,z+1]); (log(z)-1)*z + log(2*pi)/2 + (-0.9619286014994750641314421 + 5.219637303741238195688575j) + (-0.9619286014994750641314421 + 5.219637303741238195688575j) + +The derivatives of the log-gamma function are given by the +polygamma function (:func:`~mpmath.psi`):: + + >>> diff(loggamma, -4+3j); psi(0, -4+3j) + (1.688493531222971393607153 + 2.554898911356806978892748j) + (1.688493531222971393607153 + 2.554898911356806978892748j) + >>> diff(loggamma, -4+3j, 2); psi(1, -4+3j) + (-0.1539414829219882371561038 - 0.1020485197430267719746479j) + (-0.1539414829219882371561038 - 0.1020485197430267719746479j) + +The log-gamma function satisfies an additive form of the +recurrence relation for the ordinary gamma function:: + + >>> z = 2+3j + >>> loggamma(z); loggamma(z+1) - log(z) + (-2.092851753092733349564189 + 2.302396543466867626153708j) + (-2.092851753092733349564189 + 2.302396543466867626153708j) + +""" + +siegeltheta = r""" +Computes the Riemann-Siegel theta function, + +.. math :: + + \theta(t) = \frac{ + \log\Gamma\left(\frac{1+2it}{4}\right) - + \log\Gamma\left(\frac{1-2it}{4}\right) + }{2i} - \frac{\log \pi}{2} t. + +The Riemann-Siegel theta function is important in +providing the phase factor for the Z-function +(see :func:`~mpmath.siegelz`). Evaluation is supported for real and +complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> siegeltheta(0) + 0.0 + >>> siegeltheta(inf) + +inf + >>> siegeltheta(-inf) + -inf + >>> siegeltheta(1) + -1.767547952812290388302216 + >>> siegeltheta(10+0.25j) + (-3.068638039426838572528867 + 0.05804937947429712998395177j) + +Arbitrary derivatives may be computed with derivative = k + + >>> siegeltheta(1234, derivative=2) + 0.0004051864079114053109473741 + >>> diff(siegeltheta, 1234, n=2) + 0.0004051864079114053109473741 + + +The Riemann-Siegel theta function has odd symmetry around `t = 0`, +two local extreme points and three real roots including 0 (located +symmetrically):: + + >>> nprint(chop(taylor(siegeltheta, 0, 5))) + [0.0, -2.68609, 0.0, 2.69433, 0.0, -6.40218] + >>> findroot(diffun(siegeltheta), 7) + 6.28983598883690277966509 + >>> findroot(siegeltheta, 20) + 17.84559954041086081682634 + +For large `t`, there is a famous asymptotic formula +for `\theta(t)`, to first order given by:: + + >>> t = mpf(10**6) + >>> siegeltheta(t) + 5488816.353078403444882823 + >>> -t*log(2*pi/t)/2-t/2 + 5488816.745777464310273645 +""" + +grampoint = r""" +Gives the `n`-th Gram point `g_n`, defined as the solution +to the equation `\theta(g_n) = \pi n` where `\theta(t)` +is the Riemann-Siegel theta function (:func:`~mpmath.siegeltheta`). + +The first few Gram points are:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> grampoint(0) + 17.84559954041086081682634 + >>> grampoint(1) + 23.17028270124630927899664 + >>> grampoint(2) + 27.67018221781633796093849 + >>> grampoint(3) + 31.71797995476405317955149 + +Checking the definition:: + + >>> siegeltheta(grampoint(3)) + 9.42477796076937971538793 + >>> 3*pi + 9.42477796076937971538793 + +A large Gram point:: + + >>> grampoint(10**10) + 3293531632.728335454561153 + +Gram points are useful when studying the Z-function +(:func:`~mpmath.siegelz`). See the documentation of that function +for additional examples. + +:func:`~mpmath.grampoint` can solve the defining equation for +nonintegral `n`. There is a fixed point where `g(x) = x`:: + + >>> findroot(lambda x: grampoint(x) - x, 10000) + 9146.698193171459265866198 + +**References** + +1. http://mathworld.wolfram.com/GramPoint.html + +""" + +siegelz = r""" +Computes the Z-function, also known as the Riemann-Siegel Z function, + +.. math :: + + Z(t) = e^{i \theta(t)} \zeta(1/2+it) + +where `\zeta(s)` is the Riemann zeta function (:func:`~mpmath.zeta`) +and where `\theta(t)` denotes the Riemann-Siegel theta function +(see :func:`~mpmath.siegeltheta`). + +Evaluation is supported for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> siegelz(1) + -0.7363054628673177346778998 + >>> siegelz(3+4j) + (-0.1852895764366314976003936 - 0.2773099198055652246992479j) + +The first four derivatives are supported, using the +optional *derivative* keyword argument:: + + >>> siegelz(1234567, derivative=3) + 56.89689348495089294249178 + >>> diff(siegelz, 1234567, n=3) + 56.89689348495089294249178 + + +The Z-function has a Maclaurin expansion:: + + >>> nprint(chop(taylor(siegelz, 0, 4))) + [-1.46035, 0.0, 2.73588, 0.0, -8.39357] + +The Z-function `Z(t)` is equal to `\pm |\zeta(s)|` on the +critical line `s = 1/2+it` (i.e. for real arguments `t` +to `Z`). Its zeros coincide with those of the Riemann zeta +function:: + + >>> findroot(siegelz, 14) + 14.13472514173469379045725 + >>> findroot(siegelz, 20) + 21.02203963877155499262848 + >>> findroot(zeta, 0.5+14j) + (0.5 + 14.13472514173469379045725j) + >>> findroot(zeta, 0.5+20j) + (0.5 + 21.02203963877155499262848j) + +Since the Z-function is real-valued on the critical line +(and unlike `|\zeta(s)|` analytic), it is useful for +investigating the zeros of the Riemann zeta function. +For example, one can use a root-finding algorithm based +on sign changes:: + + >>> findroot(siegelz, [100, 200], solver='bisect') + 176.4414342977104188888926 + +To locate roots, Gram points `g_n` which can be computed +by :func:`~mpmath.grampoint` are useful. If `(-1)^n Z(g_n)` is +positive for two consecutive `n`, then `Z(t)` must have +a zero between those points:: + + >>> g10 = grampoint(10) + >>> g11 = grampoint(11) + >>> (-1)**10 * siegelz(g10) > 0 + True + >>> (-1)**11 * siegelz(g11) > 0 + True + >>> findroot(siegelz, [g10, g11], solver='bisect') + 56.44624769706339480436776 + >>> g10, g11 + (54.67523744685325626632663, 57.54516517954725443703014) + +""" + +riemannr = r""" +Evaluates the Riemann R function, a smooth approximation of the +prime counting function `\pi(x)` (see :func:`~mpmath.primepi`). The Riemann +R function gives a fast numerical approximation useful e.g. to +roughly estimate the number of primes in a given interval. + +The Riemann R function is computed using the rapidly convergent Gram +series, + +.. math :: + + R(x) = 1 + \sum_{k=1}^{\infty} + \frac{\log^k x}{k k! \zeta(k+1)}. + +From the Gram series, one sees that the Riemann R function is a +well-defined analytic function (except for a branch cut along +the negative real half-axis); it can be evaluated for arbitrary +real or complex arguments. + +The Riemann R function gives a very accurate approximation +of the prime counting function. For example, it is wrong by at +most 2 for `x < 1000`, and for `x = 10^9` differs from the exact +value of `\pi(x)` by 79, or less than two parts in a million. +It is about 10 times more accurate than the logarithmic integral +estimate (see :func:`~mpmath.li`), which however is even faster to evaluate. +It is orders of magnitude more accurate than the extremely +fast `x/\log x` estimate. + +**Examples** + +For small arguments, the Riemann R function almost exactly +gives the prime counting function if rounded to the nearest +integer:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> primepi(50), riemannr(50) + (15, 14.9757023241462) + >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(100)) + 1 + >>> max(abs(primepi(n)-int(round(riemannr(n)))) for n in range(300)) + 2 + +The Riemann R function can be evaluated for arguments far too large +for exact determination of `\pi(x)` to be computationally +feasible with any presently known algorithm:: + + >>> riemannr(10**30) + 1.46923988977204e+28 + >>> riemannr(10**100) + 4.3619719871407e+97 + >>> riemannr(10**1000) + 4.3448325764012e+996 + +A comparison of the Riemann R function and logarithmic integral estimates +for `\pi(x)` using exact values of `\pi(10^n)` up to `n = 9`. +The fractional error is shown in parentheses:: + + >>> exact = [4,25,168,1229,9592,78498,664579,5761455,50847534] + >>> for n, p in enumerate(exact): + ... n += 1 + ... r, l = riemannr(10**n), li(10**n) + ... rerr, lerr = nstr((r-p)/p,3), nstr((l-p)/p,3) + ... print("%i %i %s(%s) %s(%s)" % (n, p, r, rerr, l, lerr)) + ... + 1 4 4.56458314100509(0.141) 6.1655995047873(0.541) + 2 25 25.6616332669242(0.0265) 30.1261415840796(0.205) + 3 168 168.359446281167(0.00214) 177.609657990152(0.0572) + 4 1229 1226.93121834343(-0.00168) 1246.13721589939(0.0139) + 5 9592 9587.43173884197(-0.000476) 9629.8090010508(0.00394) + 6 78498 78527.3994291277(0.000375) 78627.5491594622(0.00165) + 7 664579 664667.447564748(0.000133) 664918.405048569(0.000511) + 8 5761455 5761551.86732017(1.68e-5) 5762209.37544803(0.000131) + 9 50847534 50847455.4277214(-1.55e-6) 50849234.9570018(3.35e-5) + +The derivative of the Riemann R function gives the approximate +probability for a number of magnitude `x` to be prime:: + + >>> diff(riemannr, 1000) + 0.141903028110784 + >>> mpf(primepi(1050) - primepi(950)) / 100 + 0.15 + +Evaluation is supported for arbitrary arguments and at arbitrary +precision:: + + >>> mp.dps = 30 + >>> riemannr(7.5) + 3.72934743264966261918857135136 + >>> riemannr(-4+2j) + (-0.551002208155486427591793957644 + 2.16966398138119450043195899746j) + +""" + +primepi = r""" +Evaluates the prime counting function, `\pi(x)`, which gives +the number of primes less than or equal to `x`. The argument +`x` may be fractional. + +The prime counting function is very expensive to evaluate +precisely for large `x`, and the present implementation is +not optimized in any way. For numerical approximation of the +prime counting function, it is better to use :func:`~mpmath.primepi2` +or :func:`~mpmath.riemannr`. + +Some values of the prime counting function:: + + >>> from mpmath import * + >>> [primepi(k) for k in range(20)] + [0, 0, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 6, 6, 6, 6, 7, 7, 8] + >>> primepi(3.5) + 2 + >>> primepi(100000) + 9592 + +""" + +primepi2 = r""" +Returns an interval (as an ``mpi`` instance) providing bounds +for the value of the prime counting function `\pi(x)`. For small +`x`, :func:`~mpmath.primepi2` returns an exact interval based on +the output of :func:`~mpmath.primepi`. For `x > 2656`, a loose interval +based on Schoenfeld's inequality + +.. math :: + + |\pi(x) - \mathrm{li}(x)| < \frac{\sqrt x \log x}{8 \pi} + +is returned. This estimate is rigorous assuming the truth of +the Riemann hypothesis, and can be computed very quickly. + +**Examples** + +Exact values of the prime counting function for small `x`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> iv.dps = 15; iv.pretty = True + >>> primepi2(10) + [4.0, 4.0] + >>> primepi2(100) + [25.0, 25.0] + >>> primepi2(1000) + [168.0, 168.0] + +Loose intervals are generated for moderately large `x`: + + >>> primepi2(10000), primepi(10000) + ([1209.0, 1283.0], 1229) + >>> primepi2(50000), primepi(50000) + ([5070.0, 5263.0], 5133) + +As `x` increases, the absolute error gets worse while the relative +error improves. The exact value of `\pi(10^{23})` is +1925320391606803968923, and :func:`~mpmath.primepi2` gives 9 significant +digits:: + + >>> p = primepi2(10**23) + >>> p + [1.9253203909477020467e+21, 1.925320392280406229e+21] + >>> mpf(p.delta) / mpf(p.a) + 6.9219865355293e-10 + +A more precise, nonrigorous estimate for `\pi(x)` can be +obtained using the Riemann R function (:func:`~mpmath.riemannr`). +For large enough `x`, the value returned by :func:`~mpmath.primepi2` +essentially amounts to a small perturbation of the value returned by +:func:`~mpmath.riemannr`:: + + >>> primepi2(10**100) + [4.3619719871407024816e+97, 4.3619719871407032404e+97] + >>> riemannr(10**100) + 4.3619719871407e+97 +""" + +primezeta = r""" +Computes the prime zeta function, which is defined +in analogy with the Riemann zeta function (:func:`~mpmath.zeta`) +as + +.. math :: + + P(s) = \sum_p \frac{1}{p^s} + +where the sum is taken over all prime numbers `p`. Although +this sum only converges for `\mathrm{Re}(s) > 1`, the +function is defined by analytic continuation in the +half-plane `\mathrm{Re}(s) > 0`. + +**Examples** + +Arbitrary-precision evaluation for real and complex arguments is +supported:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> primezeta(2) + 0.452247420041065498506543364832 + >>> primezeta(pi) + 0.15483752698840284272036497397 + >>> mp.dps = 50 + >>> primezeta(3) + 0.17476263929944353642311331466570670097541212192615 + >>> mp.dps = 20 + >>> primezeta(3+4j) + (-0.12085382601645763295 - 0.013370403397787023602j) + +The prime zeta function has a logarithmic pole at `s = 1`, +with residue equal to the difference of the Mertens and +Euler constants:: + + >>> primezeta(1) + +inf + >>> extradps(25)(lambda x: primezeta(1+x)+log(x))(+eps) + -0.31571845205389007685 + >>> mertens-euler + -0.31571845205389007685 + +The analytic continuation to `0 < \mathrm{Re}(s) \le 1` +is implemented. In this strip the function exhibits +very complex behavior; on the unit interval, it has poles at +`1/n` for every squarefree integer `n`:: + + >>> primezeta(0.5) # Pole at s = 1/2 + (-inf + 3.1415926535897932385j) + >>> primezeta(0.25) + (-1.0416106801757269036 + 0.52359877559829887308j) + >>> primezeta(0.5+10j) + (0.54892423556409790529 + 0.45626803423487934264j) + +Although evaluation works in principle for any `\mathrm{Re}(s) > 0`, +it should be noted that the evaluation time increases exponentially +as `s` approaches the imaginary axis. + +For large `\mathrm{Re}(s)`, `P(s)` is asymptotic to `2^{-s}`:: + + >>> primezeta(inf) + 0.0 + >>> primezeta(10), mpf(2)**-10 + (0.00099360357443698021786, 0.0009765625) + >>> primezeta(1000) + 9.3326361850321887899e-302 + >>> primezeta(1000+1000j) + (-3.8565440833654995949e-302 - 8.4985390447553234305e-302j) + +**References** + +Carl-Erik Froberg, "On the prime zeta function", +BIT 8 (1968), pp. 187-202. + +""" + +bernpoly = r""" +Evaluates the Bernoulli polynomial `B_n(z)`. + +The first few Bernoulli polynomials are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(6): + ... nprint(chop(taylor(lambda x: bernpoly(n,x), 0, n))) + ... + [1.0] + [-0.5, 1.0] + [0.166667, -1.0, 1.0] + [0.0, 0.5, -1.5, 1.0] + [-0.0333333, 0.0, 1.0, -2.0, 1.0] + [0.0, -0.166667, 0.0, 1.66667, -2.5, 1.0] + +At `z = 0`, the Bernoulli polynomial evaluates to a +Bernoulli number (see :func:`~mpmath.bernoulli`):: + + >>> bernpoly(12, 0), bernoulli(12) + (-0.253113553113553, -0.253113553113553) + >>> bernpoly(13, 0), bernoulli(13) + (0.0, 0.0) + +Evaluation is accurate for large `n` and small `z`:: + + >>> mp.dps = 25 + >>> bernpoly(100, 0.5) + 2.838224957069370695926416e+78 + >>> bernpoly(1000, 10.5) + 5.318704469415522036482914e+1769 + +""" + +polylog = r""" +Computes the polylogarithm, defined by the sum + +.. math :: + + \mathrm{Li}_s(z) = \sum_{k=1}^{\infty} \frac{z^k}{k^s}. + +This series is convergent only for `|z| < 1`, so elsewhere +the analytic continuation is implied. + +The polylogarithm should not be confused with the logarithmic +integral (also denoted by Li or li), which is implemented +as :func:`~mpmath.li`. + +**Examples** + +The polylogarithm satisfies a huge number of functional identities. +A sample of polylogarithm evaluations is shown below:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> polylog(1,0.5), log(2) + (0.693147180559945, 0.693147180559945) + >>> polylog(2,0.5), (pi**2-6*log(2)**2)/12 + (0.582240526465012, 0.582240526465012) + >>> polylog(2,-phi), -log(phi)**2-pi**2/10 + (-1.21852526068613, -1.21852526068613) + >>> polylog(3,0.5), 7*zeta(3)/8-pi**2*log(2)/12+log(2)**3/6 + (0.53721319360804, 0.53721319360804) + +:func:`~mpmath.polylog` can evaluate the analytic continuation of the +polylogarithm when `s` is an integer:: + + >>> polylog(2, 10) + (0.536301287357863 - 7.23378441241546j) + >>> polylog(2, -10) + -4.1982778868581 + >>> polylog(2, 10j) + (-3.05968879432873 + 3.71678149306807j) + >>> polylog(-2, 10) + -0.150891632373114 + >>> polylog(-2, -10) + 0.067618332081142 + >>> polylog(-2, 10j) + (0.0384353698579347 + 0.0912451798066779j) + +Some more examples, with arguments on the unit circle (note that +the series definition cannot be used for computation here):: + + >>> polylog(2,j) + (-0.205616758356028 + 0.915965594177219j) + >>> j*catalan-pi**2/48 + (-0.205616758356028 + 0.915965594177219j) + >>> polylog(3,exp(2*pi*j/3)) + (-0.534247512515375 + 0.765587078525922j) + >>> -4*zeta(3)/9 + 2*j*pi**3/81 + (-0.534247512515375 + 0.765587078525921j) + +Polylogarithms of different order are related by integration +and differentiation:: + + >>> s, z = 3, 0.5 + >>> polylog(s+1, z) + 0.517479061673899 + >>> quad(lambda t: polylog(s,t)/t, [0, z]) + 0.517479061673899 + >>> z*diff(lambda t: polylog(s+2,t), z) + 0.517479061673899 + +Taylor series expansions around `z = 0` are:: + + >>> for n in range(-3, 4): + ... nprint(taylor(lambda x: polylog(n,x), 0, 5)) + ... + [0.0, 1.0, 8.0, 27.0, 64.0, 125.0] + [0.0, 1.0, 4.0, 9.0, 16.0, 25.0] + [0.0, 1.0, 2.0, 3.0, 4.0, 5.0] + [0.0, 1.0, 1.0, 1.0, 1.0, 1.0] + [0.0, 1.0, 0.5, 0.333333, 0.25, 0.2] + [0.0, 1.0, 0.25, 0.111111, 0.0625, 0.04] + [0.0, 1.0, 0.125, 0.037037, 0.015625, 0.008] + +The series defining the polylogarithm is simultaneously +a Taylor series and an L-series. For certain values of `z`, the +polylogarithm reduces to a pure zeta function:: + + >>> polylog(pi, 1), zeta(pi) + (1.17624173838258, 1.17624173838258) + >>> polylog(pi, -1), -altzeta(pi) + (-0.909670702980385, -0.909670702980385) + +Evaluation for arbitrary, nonintegral `s` is supported +for `z` within the unit circle: + + >>> polylog(3+4j, 0.25) + (0.24258605789446 - 0.00222938275488344j) + >>> nsum(lambda k: 0.25**k / k**(3+4j), [1,inf]) + (0.24258605789446 - 0.00222938275488344j) + +It is also supported outside of the unit circle:: + + >>> polylog(1+j, 20+40j) + (-7.1421172179728 - 3.92726697721369j) + >>> polylog(1+j, 200+400j) + (-5.41934747194626 - 9.94037752563927j) + +**References** + +1. Richard Crandall, "Note on fast polylogarithm computation" + http://www.reed.edu/physics/faculty/crandall/papers/Polylog.pdf +2. http://en.wikipedia.org/wiki/Polylogarithm +3. http://mathworld.wolfram.com/Polylogarithm.html + +""" + +bell = r""" +For `n` a nonnegative integer, ``bell(n,x)`` evaluates the Bell +polynomial `B_n(x)`, the first few of which are + +.. math :: + + B_0(x) = 1 + + B_1(x) = x + + B_2(x) = x^2+x + + B_3(x) = x^3+3x^2+x + +If `x = 1` or :func:`~mpmath.bell` is called with only one argument, it +gives the `n`-th Bell number `B_n`, which is the number of +partitions of a set with `n` elements. By setting the precision to +at least `\log_{10} B_n` digits, :func:`~mpmath.bell` provides fast +calculation of exact Bell numbers. + +In general, :func:`~mpmath.bell` computes + +.. math :: + + B_n(x) = e^{-x} \left(\mathrm{sinc}(\pi n) + E_n(x)\right) + +where `E_n(x)` is the generalized exponential function implemented +by :func:`~mpmath.polyexp`. This is an extension of Dobinski's formula [1], +where the modification is the sinc term ensuring that `B_n(x)` is +continuous in `n`; :func:`~mpmath.bell` can thus be evaluated, +differentiated, etc for arbitrary complex arguments. + +**Examples** + +Simple evaluations:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> bell(0, 2.5) + 1.0 + >>> bell(1, 2.5) + 2.5 + >>> bell(2, 2.5) + 8.75 + +Evaluation for arbitrary complex arguments:: + + >>> bell(5.75+1j, 2-3j) + (-10767.71345136587098445143 - 15449.55065599872579097221j) + +The first few Bell polynomials:: + + >>> for k in range(7): + ... nprint(taylor(lambda x: bell(k,x), 0, k)) + ... + [1.0] + [0.0, 1.0] + [0.0, 1.0, 1.0] + [0.0, 1.0, 3.0, 1.0] + [0.0, 1.0, 7.0, 6.0, 1.0] + [0.0, 1.0, 15.0, 25.0, 10.0, 1.0] + [0.0, 1.0, 31.0, 90.0, 65.0, 15.0, 1.0] + +The first few Bell numbers and complementary Bell numbers:: + + >>> [int(bell(k)) for k in range(10)] + [1, 1, 2, 5, 15, 52, 203, 877, 4140, 21147] + >>> [int(bell(k,-1)) for k in range(10)] + [1, -1, 0, 1, 1, -2, -9, -9, 50, 267] + +Large Bell numbers:: + + >>> mp.dps = 50 + >>> bell(50) + 185724268771078270438257767181908917499221852770.0 + >>> bell(50,-1) + -29113173035759403920216141265491160286912.0 + +Some even larger values:: + + >>> mp.dps = 25 + >>> bell(1000,-1) + -1.237132026969293954162816e+1869 + >>> bell(1000) + 2.989901335682408421480422e+1927 + >>> bell(1000,2) + 6.591553486811969380442171e+1987 + >>> bell(1000,100.5) + 9.101014101401543575679639e+2529 + +A determinant identity satisfied by Bell numbers:: + + >>> mp.dps = 15 + >>> N = 8 + >>> det([[bell(k+j) for j in range(N)] for k in range(N)]) + 125411328000.0 + >>> superfac(N-1) + 125411328000.0 + +**References** + +1. http://mathworld.wolfram.com/DobinskisFormula.html + +""" + +polyexp = r""" +Evaluates the polyexponential function, defined for arbitrary +complex `s`, `z` by the series + +.. math :: + + E_s(z) = \sum_{k=1}^{\infty} \frac{k^s}{k!} z^k. + +`E_s(z)` is constructed from the exponential function analogously +to how the polylogarithm is constructed from the ordinary +logarithm; as a function of `s` (with `z` fixed), `E_s` is an L-series +It is an entire function of both `s` and `z`. + +The polyexponential function provides a generalization of the +Bell polynomials `B_n(x)` (see :func:`~mpmath.bell`) to noninteger orders `n`. +In terms of the Bell polynomials, + +.. math :: + + E_s(z) = e^z B_s(z) - \mathrm{sinc}(\pi s). + +Note that `B_n(x)` and `e^{-x} E_n(x)` are identical if `n` +is a nonzero integer, but not otherwise. In particular, they differ +at `n = 0`. + +**Examples** + +Evaluating a series:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> nsum(lambda k: sqrt(k)/fac(k), [1,inf]) + 2.101755547733791780315904 + >>> polyexp(0.5,1) + 2.101755547733791780315904 + +Evaluation for arbitrary arguments:: + + >>> polyexp(-3-4j, 2.5+2j) + (2.351660261190434618268706 + 1.202966666673054671364215j) + +Evaluation is accurate for tiny function values:: + + >>> polyexp(4, -100) + 3.499471750566824369520223e-36 + +If `n` is a nonpositive integer, `E_n` reduces to a special +instance of the hypergeometric function `\,_pF_q`:: + + >>> n = 3 + >>> x = pi + >>> polyexp(-n,x) + 4.042192318847986561771779 + >>> x*hyper([1]*(n+1), [2]*(n+1), x) + 4.042192318847986561771779 + +""" + +cyclotomic = r""" +Evaluates the cyclotomic polynomial `\Phi_n(x)`, defined by + +.. math :: + + \Phi_n(x) = \prod_{\zeta} (x - \zeta) + +where `\zeta` ranges over all primitive `n`-th roots of unity +(see :func:`~mpmath.unitroots`). An equivalent representation, used +for computation, is + +.. math :: + + \Phi_n(x) = \prod_{d\mid n}(x^d-1)^{\mu(n/d)} = \Phi_n(x) + +where `\mu(m)` denotes the Moebius function. The cyclotomic +polynomials are integer polynomials, the first of which can be +written explicitly as + +.. math :: + + \Phi_0(x) = 1 + + \Phi_1(x) = x - 1 + + \Phi_2(x) = x + 1 + + \Phi_3(x) = x^3 + x^2 + 1 + + \Phi_4(x) = x^2 + 1 + + \Phi_5(x) = x^4 + x^3 + x^2 + x + 1 + + \Phi_6(x) = x^2 - x + 1 + +**Examples** + +The coefficients of low-order cyclotomic polynomials can be recovered +using Taylor expansion:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> for n in range(9): + ... p = chop(taylor(lambda x: cyclotomic(n,x), 0, 10)) + ... print("%s %s" % (n, nstr(p[:10+1-p[::-1].index(1)]))) + ... + 0 [1.0] + 1 [-1.0, 1.0] + 2 [1.0, 1.0] + 3 [1.0, 1.0, 1.0] + 4 [1.0, 0.0, 1.0] + 5 [1.0, 1.0, 1.0, 1.0, 1.0] + 6 [1.0, -1.0, 1.0] + 7 [1.0, 1.0, 1.0, 1.0, 1.0, 1.0, 1.0] + 8 [1.0, 0.0, 0.0, 0.0, 1.0] + +The definition as a product over primitive roots may be checked +by computing the product explicitly (for a real argument, this +method will generally introduce numerical noise in the imaginary +part):: + + >>> mp.dps = 25 + >>> z = 3+4j + >>> cyclotomic(10, z) + (-419.0 - 360.0j) + >>> fprod(z-r for r in unitroots(10, primitive=True)) + (-419.0 - 360.0j) + >>> z = 3 + >>> cyclotomic(10, z) + 61.0 + >>> fprod(z-r for r in unitroots(10, primitive=True)) + (61.0 - 3.146045605088568607055454e-25j) + +Up to permutation, the roots of a given cyclotomic polynomial +can be checked to agree with the list of primitive roots:: + + >>> p = taylor(lambda x: cyclotomic(6,x), 0, 6)[:3] + >>> for r in polyroots(p[::-1]): + ... print(r) + ... + (0.5 - 0.8660254037844386467637232j) + (0.5 + 0.8660254037844386467637232j) + >>> + >>> for r in unitroots(6, primitive=True): + ... print(r) + ... + (0.5 + 0.8660254037844386467637232j) + (0.5 - 0.8660254037844386467637232j) + +""" + +meijerg = r""" +Evaluates the Meijer G-function, defined as + +.. math :: + + G^{m,n}_{p,q} \left( \left. \begin{matrix} + a_1, \dots, a_n ; a_{n+1} \dots a_p \\ + b_1, \dots, b_m ; b_{m+1} \dots b_q + \end{matrix}\; \right| \; z ; r \right) = + \frac{1}{2 \pi i} \int_L + \frac{\prod_{j=1}^m \Gamma(b_j+s) \prod_{j=1}^n\Gamma(1-a_j-s)} + {\prod_{j=n+1}^{p}\Gamma(a_j+s) \prod_{j=m+1}^q \Gamma(1-b_j-s)} + z^{-s/r} ds + +for an appropriate choice of the contour `L` (see references). + +There are `p` elements `a_j`. +The argument *a_s* should be a pair of lists, the first containing the +`n` elements `a_1, \ldots, a_n` and the second containing +the `p-n` elements `a_{n+1}, \ldots a_p`. + +There are `q` elements `b_j`. +The argument *b_s* should be a pair of lists, the first containing the +`m` elements `b_1, \ldots, b_m` and the second containing +the `q-m` elements `b_{m+1}, \ldots b_q`. + +The implicit tuple `(m, n, p, q)` constitutes the order or degree of the +Meijer G-function, and is determined by the lengths of the coefficient +vectors. Confusingly, the indices in this tuple appear in a different order +from the coefficients, but this notation is standard. The many examples +given below should hopefully clear up any potential confusion. + +**Algorithm** + +The Meijer G-function is evaluated as a combination of hypergeometric series. +There are two versions of the function, which can be selected with +the optional *series* argument. + +*series=1* uses a sum of `m` `\,_pF_{q-1}` functions of `z` + +*series=2* uses a sum of `n` `\,_qF_{p-1}` functions of `1/z` + +The default series is chosen based on the degree and `|z|` in order +to be consistent with Mathematica's. This definition of the Meijer G-function +has a discontinuity at `|z| = 1` for some orders, which can +be avoided by explicitly specifying a series. + +Keyword arguments are forwarded to :func:`~mpmath.hypercomb`. + +**Examples** + +Many standard functions are special cases of the Meijer G-function +(possibly rescaled and/or with branch cut corrections). We define +some test parameters:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a = mpf(0.75) + >>> b = mpf(1.5) + >>> z = mpf(2.25) + +The exponential function: +`e^z = G^{1,0}_{0,1} \left( \left. \begin{matrix} - \\ 0 \end{matrix} \; +\right| \; -z \right)` + + >>> meijerg([[],[]], [[0],[]], -z) + 9.487735836358525720550369 + >>> exp(z) + 9.487735836358525720550369 + +The natural logarithm: +`\log(1+z) = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 0 +\end{matrix} \; \right| \; -z \right)` + + >>> meijerg([[1,1],[]], [[1],[0]], z) + 1.178654996341646117219023 + >>> log(1+z) + 1.178654996341646117219023 + +A rational function: +`\frac{z}{z+1} = G^{1,2}_{2,2} \left( \left. \begin{matrix} 1, 1 \\ 1, 1 +\end{matrix} \; \right| \; z \right)` + + >>> meijerg([[1,1],[]], [[1],[1]], z) + 0.6923076923076923076923077 + >>> z/(z+1) + 0.6923076923076923076923077 + +The sine and cosine functions: + +`\frac{1}{\sqrt \pi} \sin(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} +- \\ \frac{1}{2}, 0 \end{matrix} \; \right| \; z \right)` + +`\frac{1}{\sqrt \pi} \cos(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. \begin{matrix} +- \\ 0, \frac{1}{2} \end{matrix} \; \right| \; z \right)` + + >>> meijerg([[],[]], [[0.5],[0]], (z/2)**2) + 0.4389807929218676682296453 + >>> sin(z)/sqrt(pi) + 0.4389807929218676682296453 + >>> meijerg([[],[]], [[0],[0.5]], (z/2)**2) + -0.3544090145996275423331762 + >>> cos(z)/sqrt(pi) + -0.3544090145996275423331762 + +Bessel functions: + +`J_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. +\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} +\end{matrix} \; \right| \; z \right)` + +`Y_a(2 \sqrt z) = G^{2,0}_{1,3} \left( \left. +\begin{matrix} \frac{-a-1}{2} \\ \frac{a}{2}, -\frac{a}{2}, \frac{-a-1}{2} +\end{matrix} \; \right| \; z \right)` + +`(-z)^{a/2} z^{-a/2} I_a(2 \sqrt z) = G^{1,0}_{0,2} \left( \left. +\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} +\end{matrix} \; \right| \; -z \right)` + +`2 K_a(2 \sqrt z) = G^{2,0}_{0,2} \left( \left. +\begin{matrix} - \\ \frac{a}{2}, -\frac{a}{2} +\end{matrix} \; \right| \; z \right)` + +As the example with the Bessel *I* function shows, a branch +factor is required for some arguments when inverting the square root. + + >>> meijerg([[],[]], [[a/2],[-a/2]], (z/2)**2) + 0.5059425789597154858527264 + >>> besselj(a,z) + 0.5059425789597154858527264 + >>> meijerg([[],[(-a-1)/2]], [[a/2,-a/2],[(-a-1)/2]], (z/2)**2) + 0.1853868950066556941442559 + >>> bessely(a, z) + 0.1853868950066556941442559 + >>> meijerg([[],[]], [[a/2],[-a/2]], -(z/2)**2) + (0.8685913322427653875717476 + 2.096964974460199200551738j) + >>> (-z)**(a/2) / z**(a/2) * besseli(a, z) + (0.8685913322427653875717476 + 2.096964974460199200551738j) + >>> 0.5*meijerg([[],[]], [[a/2,-a/2],[]], (z/2)**2) + 0.09334163695597828403796071 + >>> besselk(a,z) + 0.09334163695597828403796071 + +Error functions: + +`\sqrt{\pi} z^{2(a-1)} \mathrm{erfc}(z) = G^{2,0}_{1,2} \left( \left. +\begin{matrix} a \\ a-1, a-\frac{1}{2} +\end{matrix} \; \right| \; z, \frac{1}{2} \right)` + + >>> meijerg([[],[a]], [[a-1,a-0.5],[]], z, 0.5) + 0.00172839843123091957468712 + >>> sqrt(pi) * z**(2*a-2) * erfc(z) + 0.00172839843123091957468712 + +A Meijer G-function of higher degree, (1,1,2,3): + + >>> meijerg([[a],[b]], [[a],[b,a-1]], z) + 1.55984467443050210115617 + >>> sin((b-a)*pi)/pi*(exp(z)-1)*z**(a-1) + 1.55984467443050210115617 + +A Meijer G-function of still higher degree, (4,1,2,4), that can +be expanded as a messy combination of exponential integrals: + + >>> meijerg([[a],[2*b-a]], [[b,a,b-0.5,-1-a+2*b],[]], z) + 0.3323667133658557271898061 + >>> chop(4**(a-b+1)*sqrt(pi)*gamma(2*b-2*a)*z**a*\ + ... expint(2*b-2*a, -2*sqrt(-z))*expint(2*b-2*a, 2*sqrt(-z))) + 0.3323667133658557271898061 + +In the following case, different series give different values:: + + >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2)) + -0.06417628097442437076207337 + >>> meijerg([[1],[0.25]],[[3],[0.5]],-2,series=1) + 0.1428699426155117511873047 + >>> chop(meijerg([[1],[0.25]],[[3],[0.5]],-2,series=2)) + -0.06417628097442437076207337 + +**References** + +1. http://en.wikipedia.org/wiki/Meijer_G-function + +2. http://mathworld.wolfram.com/MeijerG-Function.html + +3. http://functions.wolfram.com/HypergeometricFunctions/MeijerG/ + +4. http://functions.wolfram.com/HypergeometricFunctions/MeijerG1/ + +""" + +clsin = r""" +Computes the Clausen sine function, defined formally by the series + +.. math :: + + \mathrm{Cl}_s(z) = \sum_{k=1}^{\infty} \frac{\sin(kz)}{k^s}. + +The special case `\mathrm{Cl}_2(z)` (i.e. ``clsin(2,z)``) is the classical +"Clausen function". More generally, the Clausen function is defined for +complex `s` and `z`, even when the series does not converge. The +Clausen function is related to the polylogarithm (:func:`~mpmath.polylog`) as + +.. math :: + + \mathrm{Cl}_s(z) = \frac{1}{2i}\left(\mathrm{Li}_s\left(e^{iz}\right) - + \mathrm{Li}_s\left(e^{-iz}\right)\right) + + = \mathrm{Im}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}), + +and this representation can be taken to provide the analytic continuation of the +series. The complementary function :func:`~mpmath.clcos` gives the corresponding +cosine sum. + +**Examples** + +Evaluation for arbitrarily chosen `s` and `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> s, z = 3, 4 + >>> clsin(s, z); nsum(lambda k: sin(z*k)/k**s, [1,inf]) + -0.6533010136329338746275795 + -0.6533010136329338746275795 + +Using `z + \pi` instead of `z` gives an alternating series:: + + >>> clsin(s, z+pi) + 0.8860032351260589402871624 + >>> nsum(lambda k: (-1)**k*sin(z*k)/k**s, [1,inf]) + 0.8860032351260589402871624 + +With `s = 1`, the sum can be expressed in closed form +using elementary functions:: + + >>> z = 1 + sqrt(3) + >>> clsin(1, z) + 0.2047709230104579724675985 + >>> chop((log(1-exp(-j*z)) - log(1-exp(j*z)))/(2*j)) + 0.2047709230104579724675985 + >>> nsum(lambda k: sin(k*z)/k, [1,inf]) + 0.2047709230104579724675985 + +The classical Clausen function `\mathrm{Cl}_2(\theta)` gives the +value of the integral `\int_0^{\theta} -\ln(2\sin(x/2)) dx` for +`0 < \theta < 2 \pi`:: + + >>> cl2 = lambda t: clsin(2, t) + >>> cl2(3.5) + -0.2465045302347694216534255 + >>> -quad(lambda x: ln(2*sin(0.5*x)), [0, 3.5]) + -0.2465045302347694216534255 + +This function is symmetric about `\theta = \pi` with zeros and extreme +points:: + + >>> cl2(0); cl2(pi/3); chop(cl2(pi)); cl2(5*pi/3); chop(cl2(2*pi)) + 0.0 + 1.014941606409653625021203 + 0.0 + -1.014941606409653625021203 + 0.0 + +Catalan's constant is a special value:: + + >>> cl2(pi/2) + 0.9159655941772190150546035 + >>> +catalan + 0.9159655941772190150546035 + +The Clausen sine function can be expressed in closed form when +`s` is an odd integer (becoming zero when `s` < 0):: + + >>> z = 1 + sqrt(2) + >>> clsin(1, z); (pi-z)/2 + 0.3636895456083490948304773 + 0.3636895456083490948304773 + >>> clsin(3, z); pi**2/6*z - pi*z**2/4 + z**3/12 + 0.5661751584451144991707161 + 0.5661751584451144991707161 + >>> clsin(-1, z) + 0.0 + >>> clsin(-3, z) + 0.0 + +It can also be expressed in closed form for even integer `s \le 0`, +providing a finite sum for series such as +`\sin(z) + \sin(2z) + \sin(3z) + \ldots`:: + + >>> z = 1 + sqrt(2) + >>> clsin(0, z) + 0.1903105029507513881275865 + >>> cot(z/2)/2 + 0.1903105029507513881275865 + >>> clsin(-2, z) + -0.1089406163841548817581392 + >>> -cot(z/2)*csc(z/2)**2/4 + -0.1089406163841548817581392 + +Call with ``pi=True`` to multiply `z` by `\pi` exactly:: + + >>> clsin(3, 3*pi) + -8.892316224968072424732898e-26 + >>> clsin(3, 3, pi=True) + 0.0 + +Evaluation for complex `s`, `z` in a nonconvergent case:: + + >>> s, z = -1-j, 1+2j + >>> clsin(s, z) + (-0.593079480117379002516034 + 0.9038644233367868273362446j) + >>> extraprec(20)(nsum)(lambda k: sin(k*z)/k**s, [1,inf]) + (-0.593079480117379002516034 + 0.9038644233367868273362446j) + +""" + +clcos = r""" +Computes the Clausen cosine function, defined formally by the series + +.. math :: + + \mathrm{\widetilde{Cl}}_s(z) = \sum_{k=1}^{\infty} \frac{\cos(kz)}{k^s}. + +This function is complementary to the Clausen sine function +:func:`~mpmath.clsin`. In terms of the polylogarithm, + +.. math :: + + \mathrm{\widetilde{Cl}}_s(z) = + \frac{1}{2}\left(\mathrm{Li}_s\left(e^{iz}\right) + + \mathrm{Li}_s\left(e^{-iz}\right)\right) + + = \mathrm{Re}\left[\mathrm{Li}_s(e^{iz})\right] \quad (s, z \in \mathbb{R}). + +**Examples** + +Evaluation for arbitrarily chosen `s` and `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> s, z = 3, 4 + >>> clcos(s, z); nsum(lambda k: cos(z*k)/k**s, [1,inf]) + -0.6518926267198991308332759 + -0.6518926267198991308332759 + +Using `z + \pi` instead of `z` gives an alternating series:: + + >>> s, z = 3, 0.5 + >>> clcos(s, z+pi) + -0.8155530586502260817855618 + >>> nsum(lambda k: (-1)**k*cos(z*k)/k**s, [1,inf]) + -0.8155530586502260817855618 + +With `s = 1`, the sum can be expressed in closed form +using elementary functions:: + + >>> z = 1 + sqrt(3) + >>> clcos(1, z) + -0.6720334373369714849797918 + >>> chop(-0.5*(log(1-exp(j*z))+log(1-exp(-j*z)))) + -0.6720334373369714849797918 + >>> -log(abs(2*sin(0.5*z))) # Equivalent to above when z is real + -0.6720334373369714849797918 + >>> nsum(lambda k: cos(k*z)/k, [1,inf]) + -0.6720334373369714849797918 + +It can also be expressed in closed form when `s` is an even integer. +For example, + + >>> clcos(2,z) + -0.7805359025135583118863007 + >>> pi**2/6 - pi*z/2 + z**2/4 + -0.7805359025135583118863007 + +The case `s = 0` gives the renormalized sum of +`\cos(z) + \cos(2z) + \cos(3z) + \ldots` (which happens to be the same for +any value of `z`):: + + >>> clcos(0, z) + -0.5 + >>> nsum(lambda k: cos(k*z), [1,inf]) + -0.5 + +Also the sums + +.. math :: + + \cos(z) + 2\cos(2z) + 3\cos(3z) + \ldots + +and + +.. math :: + + \cos(z) + 2^n \cos(2z) + 3^n \cos(3z) + \ldots + +for higher integer powers `n = -s` can be done in closed form. They are zero +when `n` is positive and even (`s` negative and even):: + + >>> clcos(-1, z); 1/(2*cos(z)-2) + -0.2607829375240542480694126 + -0.2607829375240542480694126 + >>> clcos(-3, z); (2+cos(z))*csc(z/2)**4/8 + 0.1472635054979944390848006 + 0.1472635054979944390848006 + >>> clcos(-2, z); clcos(-4, z); clcos(-6, z) + 0.0 + 0.0 + 0.0 + +With `z = \pi`, the series reduces to that of the Riemann zeta function +(more generally, if `z = p \pi/q`, it is a finite sum over Hurwitz zeta +function values):: + + >>> clcos(2.5, 0); zeta(2.5) + 1.34148725725091717975677 + 1.34148725725091717975677 + >>> clcos(2.5, pi); -altzeta(2.5) + -0.8671998890121841381913472 + -0.8671998890121841381913472 + +Call with ``pi=True`` to multiply `z` by `\pi` exactly:: + + >>> clcos(-3, 2*pi) + 2.997921055881167659267063e+102 + >>> clcos(-3, 2, pi=True) + 0.008333333333333333333333333 + +Evaluation for complex `s`, `z` in a nonconvergent case:: + + >>> s, z = -1-j, 1+2j + >>> clcos(s, z) + (0.9407430121562251476136807 + 0.715826296033590204557054j) + >>> extraprec(20)(nsum)(lambda k: cos(k*z)/k**s, [1,inf]) + (0.9407430121562251476136807 + 0.715826296033590204557054j) + +""" + +whitm = r""" +Evaluates the Whittaker function `M(k,m,z)`, which gives a solution +to the Whittaker differential equation + +.. math :: + + \frac{d^2f}{dz^2} + \left(-\frac{1}{4}+\frac{k}{z}+ + \frac{(\frac{1}{4}-m^2)}{z^2}\right) f = 0. + +A second solution is given by :func:`~mpmath.whitw`. + +The Whittaker functions are defined in Abramowitz & Stegun, section 13.1. +They are alternate forms of the confluent hypergeometric functions +`\,_1F_1` and `U`: + +.. math :: + + M(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m} + \,_1F_1(\tfrac{1}{2}+m-k, 1+2m, z) + + W(k,m,z) = e^{-\frac{1}{2}z} z^{\frac{1}{2}+m} + U(\tfrac{1}{2}+m-k, 1+2m, z). + +**Examples** + +Evaluation for arbitrary real and complex arguments is supported:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> whitm(1, 1, 1) + 0.7302596799460411820509668 + >>> whitm(1, 1, -1) + (0.0 - 1.417977827655098025684246j) + >>> whitm(j, j/2, 2+3j) + (3.245477713363581112736478 - 0.822879187542699127327782j) + >>> whitm(2, 3, 100000) + 4.303985255686378497193063e+21707 + +Evaluation at zero:: + + >>> whitm(1,-1,0); whitm(1,-0.5,0); whitm(1,0,0) + +inf + nan + 0.0 + +We can verify that :func:`~mpmath.whitm` numerically satisfies the +differential equation for arbitrarily chosen values:: + + >>> k = mpf(0.25) + >>> m = mpf(1.5) + >>> f = lambda z: whitm(k,m,z) + >>> for z in [-1, 2.5, 3, 1+2j]: + ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +An integral involving both :func:`~mpmath.whitm` and :func:`~mpmath.whitw`, +verifying evaluation along the real axis:: + + >>> quad(lambda x: exp(-x)*whitm(3,2,x)*whitw(1,-2,x), [0,inf]) + 3.438869842576800225207341 + >>> 128/(21*sqrt(pi)) + 3.438869842576800225207341 + +""" + +whitw = r""" +Evaluates the Whittaker function `W(k,m,z)`, which gives a second +solution to the Whittaker differential equation. (See :func:`~mpmath.whitm`.) + +**Examples** + +Evaluation for arbitrary real and complex arguments is supported:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> whitw(1, 1, 1) + 1.19532063107581155661012 + >>> whitw(1, 1, -1) + (-0.9424875979222187313924639 - 0.2607738054097702293308689j) + >>> whitw(j, j/2, 2+3j) + (0.1782899315111033879430369 - 0.01609578360403649340169406j) + >>> whitw(2, 3, 100000) + 1.887705114889527446891274e-21705 + >>> whitw(-1, -1, 100) + 1.905250692824046162462058e-24 + +Evaluation at zero:: + + >>> for m in [-1, -0.5, 0, 0.5, 1]: + ... whitw(1, m, 0) + ... + +inf + nan + 0.0 + nan + +inf + +We can verify that :func:`~mpmath.whitw` numerically satisfies the +differential equation for arbitrarily chosen values:: + + >>> k = mpf(0.25) + >>> m = mpf(1.5) + >>> f = lambda z: whitw(k,m,z) + >>> for z in [-1, 2.5, 3, 1+2j]: + ... chop(diff(f,z,2) + (-0.25 + k/z + (0.25-m**2)/z**2)*f(z)) + ... + 0.0 + 0.0 + 0.0 + 0.0 + +""" + +ber = r""" +Computes the Kelvin function ber, which for real arguments gives the real part +of the Bessel J function of a rotated argument + +.. math :: + + J_n\left(x e^{3\pi i/4}\right) = \mathrm{ber}_n(x) + i \mathrm{bei}_n(x). + +The imaginary part is given by :func:`~mpmath.bei`. + +**Plots** + +.. literalinclude :: /plots/ber.py +.. image :: /plots/ber.png + +**Examples** + +Verifying the defining relation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> n, x = 2, 3.5 + >>> ber(n,x) + 1.442338852571888752631129 + >>> bei(n,x) + -0.948359035324558320217678 + >>> besselj(n, x*root(1,8,3)) + (1.442338852571888752631129 - 0.948359035324558320217678j) + +The ber and bei functions are also defined by analytic continuation +for complex arguments:: + + >>> ber(1+j, 2+3j) + (4.675445984756614424069563 - 15.84901771719130765656316j) + >>> bei(1+j, 2+3j) + (15.83886679193707699364398 + 4.684053288183046528703611j) + +""" + +bei = r""" +Computes the Kelvin function bei, which for real arguments gives the +imaginary part of the Bessel J function of a rotated argument. +See :func:`~mpmath.ber`. +""" + +ker = r""" +Computes the Kelvin function ker, which for real arguments gives the real part +of the (rescaled) Bessel K function of a rotated argument + +.. math :: + + e^{-\pi i/2} K_n\left(x e^{3\pi i/4}\right) = \mathrm{ker}_n(x) + i \mathrm{kei}_n(x). + +The imaginary part is given by :func:`~mpmath.kei`. + +**Plots** + +.. literalinclude :: /plots/ker.py +.. image :: /plots/ker.png + +**Examples** + +Verifying the defining relation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> n, x = 2, 4.5 + >>> ker(n,x) + 0.02542895201906369640249801 + >>> kei(n,x) + -0.02074960467222823237055351 + >>> exp(-n*pi*j/2) * besselk(n, x*root(1,8,1)) + (0.02542895201906369640249801 - 0.02074960467222823237055351j) + +The ker and kei functions are also defined by analytic continuation +for complex arguments:: + + >>> ker(1+j, 3+4j) + (1.586084268115490421090533 - 2.939717517906339193598719j) + >>> kei(1+j, 3+4j) + (-2.940403256319453402690132 - 1.585621643835618941044855j) + +""" + +kei = r""" +Computes the Kelvin function kei, which for real arguments gives the +imaginary part of the (rescaled) Bessel K function of a rotated argument. +See :func:`~mpmath.ker`. +""" + +struveh = r""" +Gives the Struve function + +.. math :: + + \,\mathbf{H}_n(z) = + \sum_{k=0}^\infty \frac{(-1)^k}{\Gamma(k+\frac{3}{2}) + \Gamma(k+n+\frac{3}{2})} {\left({\frac{z}{2}}\right)}^{2k+n+1} + +which is a solution to the Struve differential equation + +.. math :: + + z^2 f''(z) + z f'(z) + (z^2-n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}. + +**Examples** + +Evaluation for arbitrary real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> struveh(0, 3.5) + 0.3608207733778295024977797 + >>> struveh(-1, 10) + -0.255212719726956768034732 + >>> struveh(1, -100.5) + 0.5819566816797362287502246 + >>> struveh(2.5, 10000000000000) + 3153915652525200060.308937 + >>> struveh(2.5, -10000000000000) + (0.0 - 3153915652525200060.308937j) + >>> struveh(1+j, 1000000+4000000j) + (-3.066421087689197632388731e+1737173 - 1.596619701076529803290973e+1737173j) + +A Struve function of half-integer order is elementary; for example: + + >>> z = 3 + >>> struveh(0.5, 3) + 0.9167076867564138178671595 + >>> sqrt(2/(pi*z))*(1-cos(z)) + 0.9167076867564138178671595 + +Numerically verifying the differential equation:: + + >>> z = mpf(4.5) + >>> n = 3 + >>> f = lambda z: struveh(n,z) + >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) + (z**2-n**2)*f(z) + >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi + >>> lhs + 17.40359302709875496632744 + >>> rhs + 17.40359302709875496632744 + +""" + +struvel = r""" +Gives the modified Struve function + +.. math :: + + \,\mathbf{L}_n(z) = -i e^{-n\pi i/2} \mathbf{H}_n(i z) + +which solves to the modified Struve differential equation + +.. math :: + + z^2 f''(z) + z f'(z) - (z^2+n^2) f(z) = \frac{2 z^{n+1}}{\pi (2n-1)!!}. + +**Examples** + +Evaluation for arbitrary real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> struvel(0, 3.5) + 7.180846515103737996249972 + >>> struvel(-1, 10) + 2670.994904980850550721511 + >>> struvel(1, -100.5) + 1.757089288053346261497686e+42 + >>> struvel(2.5, 10000000000000) + 4.160893281017115450519948e+4342944819025 + >>> struvel(2.5, -10000000000000) + (0.0 - 4.160893281017115450519948e+4342944819025j) + >>> struvel(1+j, 700j) + (-0.1721150049480079451246076 + 0.1240770953126831093464055j) + >>> struvel(1+j, 1000000+4000000j) + (-2.973341637511505389128708e+434290 - 5.164633059729968297147448e+434290j) + +Numerically verifying the differential equation:: + + >>> z = mpf(3.5) + >>> n = 3 + >>> f = lambda z: struvel(n,z) + >>> lhs = z**2*diff(f,z,2) + z*diff(f,z) - (z**2+n**2)*f(z) + >>> rhs = 2*z**(n+1)/fac2(2*n-1)/pi + >>> lhs + 6.368850306060678353018165 + >>> rhs + 6.368850306060678353018165 +""" + +appellf1 = r""" +Gives the Appell F1 hypergeometric function of two variables, + +.. math :: + + F_1(a,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c)_{m+n}} + \frac{x^m y^n}{m! n!}. + +This series is only generally convergent when `|x| < 1` and `|y| < 1`, +although :func:`~mpmath.appellf1` can evaluate an analytic continuation +with respecto to either variable, and sometimes both. + +**Examples** + +Evaluation is supported for real and complex parameters:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf1(1,0,0.5,1,0.5,0.25) + 1.154700538379251529018298 + >>> appellf1(1,1+j,0.5,1,0.5,0.5j) + (1.138403860350148085179415 + 1.510544741058517621110615j) + +For some integer parameters, the F1 series reduces to a polynomial:: + + >>> appellf1(2,-4,-3,1,2,5) + -816.0 + >>> appellf1(-5,1,2,1,4,5) + -20528.0 + +The analytic continuation with respect to either `x` or `y`, +and sometimes with respect to both, can be evaluated:: + + >>> appellf1(2,3,4,5,100,0.5) + (0.0006231042714165329279738662 + 0.0000005769149277148425774499857j) + >>> appellf1('1.1', '0.3', '0.2+2j', '0.4', '0.2', 1.5+3j) + (-0.1782604566893954897128702 + 0.002472407104546216117161499j) + >>> appellf1(1,2,3,4,10,12) + -0.07122993830066776374929313 + +For certain arguments, F1 reduces to an ordinary hypergeometric function:: + + >>> appellf1(1,2,3,5,0.5,0.25) + 1.547902270302684019335555 + >>> 4*hyp2f1(1,2,5,'1/3')/3 + 1.547902270302684019335555 + >>> appellf1(1,2,3,4,0,1.5) + (-1.717202506168937502740238 - 2.792526803190927323077905j) + >>> hyp2f1(1,3,4,1.5) + (-1.717202506168937502740238 - 2.792526803190927323077905j) + +The F1 function satisfies a system of partial differential equations:: + + >>> a,b1,b2,c,x,y = map(mpf, [1,0.5,0.25,1.125,0.25,-0.25]) + >>> F = lambda x,y: appellf1(a,b1,b2,c,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) + + ... y*(1-x)*diff(F,(x,y),(1,1)) + + ... (c-(a+b1+1)*x)*diff(F,(x,y),(1,0)) - + ... b1*y*diff(F,(x,y),(0,1)) - + ... a*b1*F(x,y)) + 0.0 + >>> + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) + + ... x*(1-y)*diff(F,(x,y),(1,1)) + + ... (c-(a+b2+1)*y)*diff(F,(x,y),(0,1)) - + ... b2*x*diff(F,(x,y),(1,0)) - + ... a*b2*F(x,y)) + 0.0 + +The Appell F1 function allows for closed-form evaluation of various +integrals, such as any integral of the form +`\int x^r (x+a)^p (x+b)^q dx`:: + + >>> def integral(a,b,p,q,r,x1,x2): + ... a,b,p,q,r,x1,x2 = map(mpmathify, [a,b,p,q,r,x1,x2]) + ... f = lambda x: x**r * (x+a)**p * (x+b)**q + ... def F(x): + ... v = x**(r+1)/(r+1) * (a+x)**p * (b+x)**q + ... v *= (1+x/a)**(-p) + ... v *= (1+x/b)**(-q) + ... v *= appellf1(r+1,-p,-q,2+r,-x/a,-x/b) + ... return v + ... print("Num. quad: %s" % quad(f, [x1,x2])) + ... print("Appell F1: %s" % (F(x2)-F(x1))) + ... + >>> integral('1/5','4/3','-2','3','1/2',0,1) + Num. quad: 9.073335358785776206576981 + Appell F1: 9.073335358785776206576981 + >>> integral('3/2','4/3','-2','3','1/2',0,1) + Num. quad: 1.092829171999626454344678 + Appell F1: 1.092829171999626454344678 + >>> integral('3/2','4/3','-2','3','1/2',12,25) + Num. quad: 1106.323225040235116498927 + Appell F1: 1106.323225040235116498927 + +Also incomplete elliptic integrals fall into this category [1]:: + + >>> def E(z, m): + ... if (pi/2).ae(z): + ... return ellipe(m) + ... return 2*round(re(z)/pi)*ellipe(m) + mpf(-1)**round(re(z)/pi)*\ + ... sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2) + ... + >>> z, m = 1, 0.5 + >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z]) + 0.9273298836244400669659042 + 0.9273298836244400669659042 + >>> z, m = 3, 2 + >>> E(z,m); quad(lambda t: sqrt(1-m*sin(t)**2), [0,pi/4,3*pi/4,z]) + (1.057495752337234229715836 + 1.198140234735592207439922j) + (1.057495752337234229715836 + 1.198140234735592207439922j) + +**References** + +1. [WolframFunctions]_ http://functions.wolfram.com/EllipticIntegrals/EllipticE2/26/01/ +2. [SrivastavaKarlsson]_ +3. [CabralRosetti]_ +4. [Vidunas]_ +5. [Slater]_ + +""" + +angerj = r""" +Gives the Anger function + +.. math :: + + \mathbf{J}_{\nu}(z) = \frac{1}{\pi} + \int_0^{\pi} \cos(\nu t - z \sin t) dt + +which is an entire function of both the parameter `\nu` and +the argument `z`. It solves the inhomogeneous Bessel differential +equation + +.. math :: + + f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z) + = \frac{(z-\nu)}{\pi z^2} \sin(\pi \nu). + +**Examples** + +Evaluation for real and complex parameter and argument:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> angerj(2,3) + 0.4860912605858910769078311 + >>> angerj(-3+4j, 2+5j) + (-5033.358320403384472395612 + 585.8011892476145118551756j) + >>> angerj(3.25, 1e6j) + (4.630743639715893346570743e+434290 - 1.117960409887505906848456e+434291j) + >>> angerj(-1.5, 1e6) + 0.0002795719747073879393087011 + +The Anger function coincides with the Bessel J-function when `\nu` +is an integer:: + + >>> angerj(1,3); besselj(1,3) + 0.3390589585259364589255146 + 0.3390589585259364589255146 + >>> angerj(1.5,3); besselj(1.5,3) + 0.4088969848691080859328847 + 0.4777182150870917715515015 + +Verifying the differential equation:: + + >>> v,z = mpf(2.25), 0.75 + >>> f = lambda z: angerj(v,z) + >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z) + -0.6002108774380707130367995 + >>> (z-v)/(pi*z**2) * sinpi(v) + -0.6002108774380707130367995 + +Verifying the integral representation:: + + >>> angerj(v,z) + 0.1145380759919333180900501 + >>> quad(lambda t: cos(v*t-z*sin(t))/pi, [0,pi]) + 0.1145380759919333180900501 + +**References** + +1. [DLMF]_ section 11.10: Anger-Weber Functions +""" + +webere = r""" +Gives the Weber function + +.. math :: + + \mathbf{E}_{\nu}(z) = \frac{1}{\pi} + \int_0^{\pi} \sin(\nu t - z \sin t) dt + +which is an entire function of both the parameter `\nu` and +the argument `z`. It solves the inhomogeneous Bessel differential +equation + +.. math :: + + f''(z) + \frac{1}{z}f'(z) + \left(1-\frac{\nu^2}{z^2}\right) f(z) + = -\frac{1}{\pi z^2} (z+\nu+(z-\nu)\cos(\pi \nu)). + +**Examples** + +Evaluation for real and complex parameter and argument:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> webere(2,3) + -0.1057668973099018425662646 + >>> webere(-3+4j, 2+5j) + (-585.8081418209852019290498 - 5033.314488899926921597203j) + >>> webere(3.25, 1e6j) + (-1.117960409887505906848456e+434291 - 4.630743639715893346570743e+434290j) + >>> webere(3.25, 1e6) + -0.00002812518265894315604914453 + +Up to addition of a rational function of `z`, the Weber function coincides +with the Struve H-function when `\nu` is an integer:: + + >>> webere(1,3); 2/pi-struveh(1,3) + -0.3834897968188690177372881 + -0.3834897968188690177372881 + >>> webere(5,3); 26/(35*pi)-struveh(5,3) + 0.2009680659308154011878075 + 0.2009680659308154011878075 + +Verifying the differential equation:: + + >>> v,z = mpf(2.25), 0.75 + >>> f = lambda z: webere(v,z) + >>> diff(f,z,2) + diff(f,z)/z + (1-(v/z)**2)*f(z) + -1.097441848875479535164627 + >>> -(z+v+(z-v)*cospi(v))/(pi*z**2) + -1.097441848875479535164627 + +Verifying the integral representation:: + + >>> webere(v,z) + 0.1486507351534283744485421 + >>> quad(lambda t: sin(v*t-z*sin(t))/pi, [0,pi]) + 0.1486507351534283744485421 + +**References** + +1. [DLMF]_ section 11.10: Anger-Weber Functions +""" + +lommels1 = r""" +Gives the Lommel function `s_{\mu,\nu}` or `s^{(1)}_{\mu,\nu}` + +.. math :: + + s_{\mu,\nu}(z) = \frac{z^{\mu+1}}{(\mu-\nu+1)(\mu+\nu+1)} + \,_1F_2\left(1; \frac{\mu-\nu+3}{2}, \frac{\mu+\nu+3}{2}; + -\frac{z^2}{4} \right) + +which solves the inhomogeneous Bessel equation + +.. math :: + + z^2 f''(z) + z f'(z) + (z^2-\nu^2) f(z) = z^{\mu+1}. + +A second solution is given by :func:`~mpmath.lommels2`. + +**Plots** + +.. literalinclude :: /plots/lommels1.py +.. image :: /plots/lommels1.png + +**Examples** + +An integral representation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> u,v,z = 0.25, 0.125, mpf(0.75) + >>> lommels1(u,v,z) + 0.4276243877565150372999126 + >>> (bessely(v,z)*quad(lambda t: t**u*besselj(v,t), [0,z]) - \ + ... besselj(v,z)*quad(lambda t: t**u*bessely(v,t), [0,z]))*(pi/2) + 0.4276243877565150372999126 + +A special value:: + + >>> lommels1(v,v,z) + 0.5461221367746048054932553 + >>> gamma(v+0.5)*sqrt(pi)*power(2,v-1)*struveh(v,z) + 0.5461221367746048054932553 + +Verifying the differential equation:: + + >>> f = lambda z: lommels1(u,v,z) + >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z) + 0.6979536443265746992059141 + >>> z**(u+1) + 0.6979536443265746992059141 + +**References** + +1. [GradshteynRyzhik]_ +2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html +""" + +lommels2 = r""" +Gives the second Lommel function `S_{\mu,\nu}` or `s^{(2)}_{\mu,\nu}` + +.. math :: + + S_{\mu,\nu}(z) = s_{\mu,\nu}(z) + 2^{\mu-1} + \Gamma\left(\tfrac{1}{2}(\mu-\nu+1)\right) + \Gamma\left(\tfrac{1}{2}(\mu+\nu+1)\right) \times + + \left[\sin(\tfrac{1}{2}(\mu-\nu)\pi) J_{\nu}(z) - + \cos(\tfrac{1}{2}(\mu-\nu)\pi) Y_{\nu}(z) + \right] + +which solves the same differential equation as +:func:`~mpmath.lommels1`. + +**Plots** + +.. literalinclude :: /plots/lommels2.py +.. image :: /plots/lommels2.png + +**Examples** + +For large `|z|`, `S_{\mu,\nu} \sim z^{\mu-1}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> lommels2(10,2,30000) + 1.968299831601008419949804e+40 + >>> power(30000,9) + 1.9683e+40 + +A special value:: + + >>> u,v,z = 0.5, 0.125, mpf(0.75) + >>> lommels2(v,v,z) + 0.9589683199624672099969765 + >>> (struveh(v,z)-bessely(v,z))*power(2,v-1)*sqrt(pi)*gamma(v+0.5) + 0.9589683199624672099969765 + +Verifying the differential equation:: + + >>> f = lambda z: lommels2(u,v,z) + >>> z**2*diff(f,z,2) + z*diff(f,z) + (z**2-v**2)*f(z) + 0.6495190528383289850727924 + >>> z**(u+1) + 0.6495190528383289850727924 + +**References** + +1. [GradshteynRyzhik]_ +2. [Weisstein]_ http://mathworld.wolfram.com/LommelFunction.html +""" + +appellf2 = r""" +Gives the Appell F2 hypergeometric function of two variables + +.. math :: + + F_2(a,b_1,b_2,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b_1)_m (b_2)_n}{(c_1)_m (c_2)_n} + \frac{x^m y^n}{m! n!}. + +The series is generally absolutely convergent for `|x| + |y| < 1`. + +**Examples** + +Evaluation for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf2(1,2,3,4,5,0.25,0.125) + 1.257417193533135344785602 + >>> appellf2(1,-3,-4,2,3,2,3) + -42.8 + >>> appellf2(0.5,0.25,-0.25,2,3,0.25j,0.25) + (0.9880539519421899867041719 + 0.01497616165031102661476978j) + >>> chop(appellf2(1,1+j,1-j,3j,-3j,0.25,0.25)) + 1.201311219287411337955192 + >>> appellf2(1,1,1,4,6,0.125,16) + (-0.09455532250274744282125152 - 0.7647282253046207836769297j) + +A transformation formula:: + + >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,2,0.5,0.25,1.625,-0.125,0.125]) + >>> appellf2(a,b1,b2,c1,c2,x,y) + 0.2299211717841180783309688 + >>> (1-x)**(-a)*appellf2(a,c1-b1,b2,c1,c2,x/(x-1),y/(1-x)) + 0.2299211717841180783309688 + +A system of partial differential equations satisfied by F2:: + + >>> a,b1,b2,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,1.5,0.0625,-0.0625]) + >>> F = lambda x,y: appellf2(a,b1,b2,c1,c2,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) - + ... x*y*diff(F,(x,y),(1,1)) + + ... (c1-(a+b1+1)*x)*diff(F,(x,y),(1,0)) - + ... b1*y*diff(F,(x,y),(0,1)) - + ... a*b1*F(x,y)) + 0.0 + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) - + ... x*y*diff(F,(x,y),(1,1)) + + ... (c2-(a+b2+1)*y)*diff(F,(x,y),(0,1)) - + ... b2*x*diff(F,(x,y),(1,0)) - + ... a*b2*F(x,y)) + 0.0 + +**References** + +See references for :func:`~mpmath.appellf1`. +""" + +appellf3 = r""" +Gives the Appell F3 hypergeometric function of two variables + +.. math :: + + F_3(a_1,a_2,b_1,b_2,c,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a_1)_m (a_2)_n (b_1)_m (b_2)_n}{(c)_{m+n}} + \frac{x^m y^n}{m! n!}. + +The series is generally absolutely convergent for `|x| < 1, |y| < 1`. + +**Examples** + +Evaluation for various parameters and variables:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf3(1,2,3,4,5,0.5,0.25) + 2.221557778107438938158705 + >>> appellf3(1,2,3,4,5,6,0); hyp2f1(1,3,5,6) + (-0.5189554589089861284537389 - 0.1454441043328607980769742j) + (-0.5189554589089861284537389 - 0.1454441043328607980769742j) + >>> appellf3(1,-2,-3,1,1,4,6) + -17.4 + >>> appellf3(1,2,-3,1,1,4,6) + (17.7876136773677356641825 + 19.54768762233649126154534j) + >>> appellf3(1,2,-3,1,1,6,4) + (85.02054175067929402953645 + 148.4402528821177305173599j) + >>> chop(appellf3(1+j,2,1-j,2,3,0.25,0.25)) + 1.719992169545200286696007 + +Many transformations and evaluations for special combinations +of the parameters are possible, e.g.: + + >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125]) + >>> appellf3(a,c-a,b,c-b,c,x,y) + 1.093432340896087107444363 + >>> (1-y)**(a+b-c)*hyp2f1(a,b,c,x+y-x*y) + 1.093432340896087107444363 + >>> x**2*appellf3(1,1,1,1,3,x,-x) + 0.01568646277445385390945083 + >>> polylog(2,x**2) + 0.01568646277445385390945083 + >>> a1,a2,b1,b2,c,x = map(mpf, [0.5,0.25,0.125,0.5,4.25,0.125]) + >>> appellf3(a1,a2,b1,b2,c,x,1) + 1.03947361709111140096947 + >>> gammaprod([c,c-a2-b2],[c-a2,c-b2])*hyp3f2(a1,b1,c-a2-b2,c-a2,c-b2,x) + 1.03947361709111140096947 + +The Appell F3 function satisfies a pair of partial +differential equations:: + + >>> a1,a2,b1,b2,c,x,y = map(mpf, [0.5,0.25,0.125,0.5,0.625,0.0625,-0.0625]) + >>> F = lambda x,y: appellf3(a1,a2,b1,b2,c,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) + + ... y*diff(F,(x,y),(1,1)) + + ... (c-(a1+b1+1)*x)*diff(F,(x,y),(1,0)) - + ... a1*b1*F(x,y)) + 0.0 + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) + + ... x*diff(F,(x,y),(1,1)) + + ... (c-(a2+b2+1)*y)*diff(F,(x,y),(0,1)) - + ... a2*b2*F(x,y)) + 0.0 + +**References** + +See references for :func:`~mpmath.appellf1`. +""" + +appellf4 = r""" +Gives the Appell F4 hypergeometric function of two variables + +.. math :: + + F_4(a,b,c_1,c_2,x,y) = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b)_{m+n}}{(c_1)_m (c_2)_n} + \frac{x^m y^n}{m! n!}. + +The series is generally absolutely convergent for +`\sqrt{|x|} + \sqrt{|y|} < 1`. + +**Examples** + +Evaluation for various parameters and arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> appellf4(1,1,2,2,0.25,0.125) + 1.286182069079718313546608 + >>> appellf4(-2,-3,4,5,4,5) + 34.8 + >>> appellf4(5,4,2,3,0.25j,-0.125j) + (-0.2585967215437846642163352 + 2.436102233553582711818743j) + +Reduction to `\,_2F_1` in a special case:: + + >>> a,b,c,x,y = map(mpf, [0.5,0.25,0.125,0.125,-0.125]) + >>> appellf4(a,b,c,a+b-c+1,x*(1-y),y*(1-x)) + 1.129143488466850868248364 + >>> hyp2f1(a,b,c,x)*hyp2f1(a,b,a+b-c+1,y) + 1.129143488466850868248364 + +A system of partial differential equations satisfied by F4:: + + >>> a,b,c1,c2,x,y = map(mpf, [1,0.5,0.25,1.125,0.0625,-0.0625]) + >>> F = lambda x,y: appellf4(a,b,c1,c2,x,y) + >>> chop(x*(1-x)*diff(F,(x,y),(2,0)) - + ... y**2*diff(F,(x,y),(0,2)) - + ... 2*x*y*diff(F,(x,y),(1,1)) + + ... (c1-(a+b+1)*x)*diff(F,(x,y),(1,0)) - + ... ((a+b+1)*y)*diff(F,(x,y),(0,1)) - + ... a*b*F(x,y)) + 0.0 + >>> chop(y*(1-y)*diff(F,(x,y),(0,2)) - + ... x**2*diff(F,(x,y),(2,0)) - + ... 2*x*y*diff(F,(x,y),(1,1)) + + ... (c2-(a+b+1)*y)*diff(F,(x,y),(0,1)) - + ... ((a+b+1)*x)*diff(F,(x,y),(1,0)) - + ... a*b*F(x,y)) + 0.0 + +**References** + +See references for :func:`~mpmath.appellf1`. +""" + +zeta = r""" +Computes the Riemann zeta function + +.. math :: + + \zeta(s) = 1+\frac{1}{2^s}+\frac{1}{3^s}+\frac{1}{4^s}+\ldots + +or, with `a \ne 1`, the more general Hurwitz zeta function + +.. math :: + + \zeta(s,a) = \sum_{k=0}^\infty \frac{1}{(a+k)^s}. + +Optionally, ``zeta(s, a, n)`` computes the `n`-th derivative with +respect to `s`, + +.. math :: + + \zeta^{(n)}(s,a) = (-1)^n \sum_{k=0}^\infty \frac{\log^n(a+k)}{(a+k)^s}. + +Although these series only converge for `\Re(s) > 1`, the Riemann and Hurwitz +zeta functions are defined through analytic continuation for arbitrary +complex `s \ne 1` (`s = 1` is a pole). + +The implementation uses three algorithms: the Borwein algorithm for +the Riemann zeta function when `s` is close to the real line; +the Riemann-Siegel formula for the Riemann zeta function when `s` is +large imaginary, and Euler-Maclaurin summation in all other cases. +The reflection formula for `\Re(s) < 0` is implemented in some cases. +The algorithm can be chosen with ``method = 'borwein'``, +``method='riemann-siegel'`` or ``method = 'euler-maclaurin'``. + +The parameter `a` is usually a rational number `a = p/q`, and may be specified +as such by passing an integer tuple `(p, q)`. Evaluation is supported for +arbitrary complex `a`, but may be slow and/or inaccurate when `\Re(s) < 0` for +nonrational `a` or when computing derivatives. + +**Examples** + +Some values of the Riemann zeta function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> zeta(2); pi**2 / 6 + 1.644934066848226436472415 + 1.644934066848226436472415 + >>> zeta(0) + -0.5 + >>> zeta(-1) + -0.08333333333333333333333333 + >>> zeta(-2) + 0.0 + +For large positive `s`, `\zeta(s)` rapidly approaches 1:: + + >>> zeta(50) + 1.000000000000000888178421 + >>> zeta(100) + 1.0 + >>> zeta(inf) + 1.0 + >>> 1-sum((zeta(k)-1)/k for k in range(2,85)); +euler + 0.5772156649015328606065121 + 0.5772156649015328606065121 + >>> nsum(lambda k: zeta(k)-1, [2, inf]) + 1.0 + +Evaluation is supported for complex `s` and `a`: + + >>> zeta(-3+4j) + (-0.03373057338827757067584698 + 0.2774499251557093745297677j) + >>> zeta(2+3j, -1+j) + (389.6841230140842816370741 + 295.2674610150305334025962j) + +The Riemann zeta function has so-called nontrivial zeros on +the critical line `s = 1/2 + it`:: + + >>> findroot(zeta, 0.5+14j); zetazero(1) + (0.5 + 14.13472514173469379045725j) + (0.5 + 14.13472514173469379045725j) + >>> findroot(zeta, 0.5+21j); zetazero(2) + (0.5 + 21.02203963877155499262848j) + (0.5 + 21.02203963877155499262848j) + >>> findroot(zeta, 0.5+25j); zetazero(3) + (0.5 + 25.01085758014568876321379j) + (0.5 + 25.01085758014568876321379j) + >>> chop(zeta(zetazero(10))) + 0.0 + +Evaluation on and near the critical line is supported for large +heights `t` by means of the Riemann-Siegel formula (currently +for `a = 1`, `n \le 4`):: + + >>> zeta(0.5+100000j) + (1.073032014857753132114076 + 5.780848544363503984261041j) + >>> zeta(0.75+1000000j) + (0.9535316058375145020351559 + 0.9525945894834273060175651j) + >>> zeta(0.5+10000000j) + (11.45804061057709254500227 - 8.643437226836021723818215j) + >>> zeta(0.5+100000000j, derivative=1) + (51.12433106710194942681869 + 43.87221167872304520599418j) + >>> zeta(0.5+100000000j, derivative=2) + (-444.2760822795430400549229 - 896.3789978119185981665403j) + >>> zeta(0.5+100000000j, derivative=3) + (3230.72682687670422215339 + 14374.36950073615897616781j) + >>> zeta(0.5+100000000j, derivative=4) + (-11967.35573095046402130602 - 218945.7817789262839266148j) + >>> zeta(1+10000000j) # off the line + (2.859846483332530337008882 + 0.491808047480981808903986j) + >>> zeta(1+10000000j, derivative=1) + (-4.333835494679647915673205 - 0.08405337962602933636096103j) + >>> zeta(1+10000000j, derivative=4) + (453.2764822702057701894278 - 581.963625832768189140995j) + +For investigation of the zeta function zeros, the Riemann-Siegel +Z-function is often more convenient than working with the Riemann +zeta function directly (see :func:`~mpmath.siegelz`). + +Some values of the Hurwitz zeta function:: + + >>> zeta(2, 3); -5./4 + pi**2/6 + 0.3949340668482264364724152 + 0.3949340668482264364724152 + >>> zeta(2, (3,4)); pi**2 - 8*catalan + 2.541879647671606498397663 + 2.541879647671606498397663 + +For positive integer values of `s`, the Hurwitz zeta function is +equivalent to a polygamma function (except for a normalizing factor):: + + >>> zeta(4, (1,5)); psi(3, '1/5')/6 + 625.5408324774542966919938 + 625.5408324774542966919938 + +Evaluation of derivatives:: + + >>> zeta(0, 3+4j, 1); loggamma(3+4j) - ln(2*pi)/2 + (-2.675565317808456852310934 + 4.742664438034657928194889j) + (-2.675565317808456852310934 + 4.742664438034657928194889j) + >>> zeta(2, 1, 20) + 2432902008176640000.000242 + >>> zeta(3+4j, 5.5+2j, 4) + (-0.140075548947797130681075 - 0.3109263360275413251313634j) + >>> zeta(0.5+100000j, 1, 4) + (-10407.16081931495861539236 + 13777.78669862804508537384j) + >>> zeta(-100+0.5j, (1,3), derivative=4) + (4.007180821099823942702249e+79 + 4.916117957092593868321778e+78j) + +Generating a Taylor series at `s = 2` using derivatives:: + + >>> for k in range(11): print("%s * (s-2)^%i" % (zeta(2,1,k)/fac(k), k)) + ... + 1.644934066848226436472415 * (s-2)^0 + -0.9375482543158437537025741 * (s-2)^1 + 0.9946401171494505117104293 * (s-2)^2 + -1.000024300473840810940657 * (s-2)^3 + 1.000061933072352565457512 * (s-2)^4 + -1.000006869443931806408941 * (s-2)^5 + 1.000000173233769531820592 * (s-2)^6 + -0.9999999569989868493432399 * (s-2)^7 + 0.9999999937218844508684206 * (s-2)^8 + -0.9999999996355013916608284 * (s-2)^9 + 1.000000000004610645020747 * (s-2)^10 + +Evaluation at zero and for negative integer `s`:: + + >>> zeta(0, 10) + -9.5 + >>> zeta(-2, (2,3)); mpf(1)/81 + 0.01234567901234567901234568 + 0.01234567901234567901234568 + >>> zeta(-3+4j, (5,4)) + (0.2899236037682695182085988 + 0.06561206166091757973112783j) + >>> zeta(-3.25, 1/pi) + -0.0005117269627574430494396877 + >>> zeta(-3.5, pi, 1) + 11.156360390440003294709 + >>> zeta(-100.5, (8,3)) + -4.68162300487989766727122e+77 + >>> zeta(-10.5, (-8,3)) + (-0.01521913704446246609237979 + 29907.72510874248161608216j) + >>> zeta(-1000.5, (-8,3)) + (1.031911949062334538202567e+1770 + 1.519555750556794218804724e+426j) + >>> zeta(-1+j, 3+4j) + (-16.32988355630802510888631 - 22.17706465801374033261383j) + >>> zeta(-1+j, 3+4j, 2) + (32.48985276392056641594055 - 51.11604466157397267043655j) + >>> diff(lambda s: zeta(s, 3+4j), -1+j, 2) + (32.48985276392056641594055 - 51.11604466157397267043655j) + +**References** + +1. http://mathworld.wolfram.com/RiemannZetaFunction.html + +2. http://mathworld.wolfram.com/HurwitzZetaFunction.html + +3. [BorweinZeta]_ + +""" + +dirichlet = r""" +Evaluates the Dirichlet L-function + +.. math :: + + L(s,\chi) = \sum_{k=1}^\infty \frac{\chi(k)}{k^s}. + +where `\chi` is a periodic sequence of length `q` which should be supplied +in the form of a list `[\chi(0), \chi(1), \ldots, \chi(q-1)]`. +Strictly, `\chi` should be a Dirichlet character, but any periodic +sequence will work. + +For example, ``dirichlet(s, [1])`` gives the ordinary +Riemann zeta function and ``dirichlet(s, [-1,1])`` gives +the alternating zeta function (Dirichlet eta function). + +Also the derivative with respect to `s` (currently only a first +derivative) can be evaluated. + +**Examples** + +The ordinary Riemann zeta function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> dirichlet(3, [1]); zeta(3) + 1.202056903159594285399738 + 1.202056903159594285399738 + >>> dirichlet(1, [1]) + +inf + +The alternating zeta function:: + + >>> dirichlet(1, [-1,1]); ln(2) + 0.6931471805599453094172321 + 0.6931471805599453094172321 + +The following defines the Dirichlet beta function +`\beta(s) = \sum_{k=0}^\infty \frac{(-1)^k}{(2k+1)^s}` and verifies +several values of this function:: + + >>> B = lambda s, d=0: dirichlet(s, [0, 1, 0, -1], d) + >>> B(0); 1./2 + 0.5 + 0.5 + >>> B(1); pi/4 + 0.7853981633974483096156609 + 0.7853981633974483096156609 + >>> B(2); +catalan + 0.9159655941772190150546035 + 0.9159655941772190150546035 + >>> B(2,1); diff(B, 2) + 0.08158073611659279510291217 + 0.08158073611659279510291217 + >>> B(-1,1); 2*catalan/pi + 0.5831218080616375602767689 + 0.5831218080616375602767689 + >>> B(0,1); log(gamma(0.25)**2/(2*pi*sqrt(2))) + 0.3915943927068367764719453 + 0.3915943927068367764719454 + >>> B(1,1); 0.25*pi*(euler+2*ln2+3*ln(pi)-4*ln(gamma(0.25))) + 0.1929013167969124293631898 + 0.1929013167969124293631898 + +A custom L-series of period 3:: + + >>> dirichlet(2, [2,0,1]) + 0.7059715047839078092146831 + >>> 2*nsum(lambda k: (3*k)**-2, [1,inf]) + \ + ... nsum(lambda k: (3*k+2)**-2, [0,inf]) + 0.7059715047839078092146831 + +""" + +coulombf = r""" +Calculates the regular Coulomb wave function + +.. math :: + + F_l(\eta,z) = C_l(\eta) z^{l+1} e^{-iz} \,_1F_1(l+1-i\eta, 2l+2, 2iz) + +where the normalization constant `C_l(\eta)` is as calculated by +:func:`~mpmath.coulombc`. This function solves the differential equation + +.. math :: + + f''(z) + \left(1-\frac{2\eta}{z}-\frac{l(l+1)}{z^2}\right) f(z) = 0. + +A second linearly independent solution is given by the irregular +Coulomb wave function `G_l(\eta,z)` (see :func:`~mpmath.coulombg`) +and thus the general solution is +`f(z) = C_1 F_l(\eta,z) + C_2 G_l(\eta,z)` for arbitrary +constants `C_1`, `C_2`. +Physically, the Coulomb wave functions give the radial solution +to the Schrodinger equation for a point particle in a `1/z` potential; `z` is +then the radius and `l`, `\eta` are quantum numbers. + +The Coulomb wave functions with real parameters are defined +in Abramowitz & Stegun, section 14. However, all parameters are permitted +to be complex in this implementation (see references). + +**Plots** + +.. literalinclude :: /plots/coulombf.py +.. image :: /plots/coulombf.png +.. literalinclude :: /plots/coulombf_c.py +.. image :: /plots/coulombf_c.png + +**Examples** + +Evaluation is supported for arbitrary magnitudes of `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> coulombf(2, 1.5, 3.5) + 0.4080998961088761187426445 + >>> coulombf(-2, 1.5, 3.5) + 0.7103040849492536747533465 + >>> coulombf(2, 1.5, '1e-10') + 4.143324917492256448770769e-33 + >>> coulombf(2, 1.5, 1000) + 0.4482623140325567050716179 + >>> coulombf(2, 1.5, 10**10) + -0.066804196437694360046619 + +Verifying the differential equation:: + + >>> l, eta, z = 2, 3, mpf(2.75) + >>> A, B = 1, 2 + >>> f = lambda z: A*coulombf(l,eta,z) + B*coulombg(l,eta,z) + >>> chop(diff(f,z,2) + (1-2*eta/z - l*(l+1)/z**2)*f(z)) + 0.0 + +A Wronskian relation satisfied by the Coulomb wave functions:: + + >>> l = 2 + >>> eta = 1.5 + >>> F = lambda z: coulombf(l,eta,z) + >>> G = lambda z: coulombg(l,eta,z) + >>> for z in [3.5, -1, 2+3j]: + ... chop(diff(F,z)*G(z) - F(z)*diff(G,z)) + ... + 1.0 + 1.0 + 1.0 + +Another Wronskian relation:: + + >>> F = coulombf + >>> G = coulombg + >>> for z in [3.5, -1, 2+3j]: + ... chop(F(l-1,eta,z)*G(l,eta,z)-F(l,eta,z)*G(l-1,eta,z) - l/sqrt(l**2+eta**2)) + ... + 0.0 + 0.0 + 0.0 + +An integral identity connecting the regular and irregular wave functions:: + + >>> l, eta, z = 4+j, 2-j, 5+2j + >>> coulombf(l,eta,z) + j*coulombg(l,eta,z) + (0.7997977752284033239714479 + 0.9294486669502295512503127j) + >>> g = lambda t: exp(-t)*t**(l-j*eta)*(t+2*j*z)**(l+j*eta) + >>> j*exp(-j*z)*z**(-l)/fac(2*l+1)/coulombc(l,eta)*quad(g, [0,inf]) + (0.7997977752284033239714479 + 0.9294486669502295512503127j) + +Some test case with complex parameters, taken from Michel [2]:: + + >>> mp.dps = 15 + >>> coulombf(1+0.1j, 50+50j, 100.156) + (-1.02107292320897e+15 - 2.83675545731519e+15j) + >>> coulombg(1+0.1j, 50+50j, 100.156) + (2.83675545731519e+15 - 1.02107292320897e+15j) + >>> coulombf(1e-5j, 10+1e-5j, 0.1+1e-6j) + (4.30566371247811e-14 - 9.03347835361657e-19j) + >>> coulombg(1e-5j, 10+1e-5j, 0.1+1e-6j) + (778709182061.134 + 18418936.2660553j) + +The following reproduces a table in Abramowitz & Stegun, at twice +the precision:: + + >>> mp.dps = 10 + >>> eta = 2; z = 5 + >>> for l in [5, 4, 3, 2, 1, 0]: + ... print("%s %s %s" % (l, coulombf(l,eta,z), + ... diff(lambda z: coulombf(l,eta,z), z))) + ... + 5 0.09079533488 0.1042553261 + 4 0.2148205331 0.2029591779 + 3 0.4313159311 0.320534053 + 2 0.7212774133 0.3952408216 + 1 0.9935056752 0.3708676452 + 0 1.143337392 0.2937960375 + +**References** + +1. I.J. Thompson & A.R. Barnett, "Coulomb and Bessel Functions of Complex + Arguments and Order", J. Comp. Phys., vol 64, no. 2, June 1986. + +2. N. Michel, "Precise Coulomb wave functions for a wide range of + complex `l`, `\eta` and `z`", http://arxiv.org/abs/physics/0702051v1 + +""" + +coulombg = r""" +Calculates the irregular Coulomb wave function + +.. math :: + + G_l(\eta,z) = \frac{F_l(\eta,z) \cos(\chi) - F_{-l-1}(\eta,z)}{\sin(\chi)} + +where `\chi = \sigma_l - \sigma_{-l-1} - (l+1/2) \pi` +and `\sigma_l(\eta) = (\ln \Gamma(1+l+i\eta)-\ln \Gamma(1+l-i\eta))/(2i)`. + +See :func:`~mpmath.coulombf` for additional information. + +**Plots** + +.. literalinclude :: /plots/coulombg.py +.. image :: /plots/coulombg.png +.. literalinclude :: /plots/coulombg_c.py +.. image :: /plots/coulombg_c.png + +**Examples** + +Evaluation is supported for arbitrary magnitudes of `z`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> coulombg(-2, 1.5, 3.5) + 1.380011900612186346255524 + >>> coulombg(2, 1.5, 3.5) + 1.919153700722748795245926 + >>> coulombg(-2, 1.5, '1e-10') + 201126715824.7329115106793 + >>> coulombg(-2, 1.5, 1000) + 0.1802071520691149410425512 + >>> coulombg(-2, 1.5, 10**10) + 0.652103020061678070929794 + +The following reproduces a table in Abramowitz & Stegun, +at twice the precision:: + + >>> mp.dps = 10 + >>> eta = 2; z = 5 + >>> for l in [1, 2, 3, 4, 5]: + ... print("%s %s %s" % (l, coulombg(l,eta,z), + ... -diff(lambda z: coulombg(l,eta,z), z))) + ... + 1 1.08148276 0.6028279961 + 2 1.496877075 0.5661803178 + 3 2.048694714 0.7959909551 + 4 3.09408669 1.731802374 + 5 5.629840456 4.549343289 + +Evaluation close to the singularity at `z = 0`:: + + >>> mp.dps = 15 + >>> coulombg(0,10,1) + 3088184933.67358 + >>> coulombg(0,10,'1e-10') + 5554866000719.8 + >>> coulombg(0,10,'1e-100') + 5554866221524.1 + +Evaluation with a half-integer value for `l`:: + + >>> coulombg(1.5, 1, 10) + 0.852320038297334 +""" + +coulombc = r""" +Gives the normalizing Gamow constant for Coulomb wave functions, + +.. math :: + + C_l(\eta) = 2^l \exp\left(-\pi \eta/2 + [\ln \Gamma(1+l+i\eta) + + \ln \Gamma(1+l-i\eta)]/2 - \ln \Gamma(2l+2)\right), + +where the log gamma function with continuous imaginary part +away from the negative half axis (see :func:`~mpmath.loggamma`) is implied. + +This function is used internally for the calculation of +Coulomb wave functions, and automatically cached to make multiple +evaluations with fixed `l`, `\eta` fast. +""" + +ellipfun = r""" +Computes any of the Jacobi elliptic functions, defined +in terms of Jacobi theta functions as + +.. math :: + + \mathrm{sn}(u,m) = \frac{\vartheta_3(0,q)}{\vartheta_2(0,q)} + \frac{\vartheta_1(t,q)}{\vartheta_4(t,q)} + + \mathrm{cn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_2(0,q)} + \frac{\vartheta_2(t,q)}{\vartheta_4(t,q)} + + \mathrm{dn}(u,m) = \frac{\vartheta_4(0,q)}{\vartheta_3(0,q)} + \frac{\vartheta_3(t,q)}{\vartheta_4(t,q)}, + +or more generally computes a ratio of two such functions. Here +`t = u/\vartheta_3(0,q)^2`, and `q = q(m)` denotes the nome (see +:func:`~mpmath.nome`). Optionally, you can specify the nome directly +instead of `m` by passing ``q=``, or you can directly +specify the elliptic parameter `k` with ``k=``. + +The first argument should be a two-character string specifying the +function using any combination of ``'s'``, ``'c'``, ``'d'``, ``'n'``. These +letters respectively denote the basic functions +`\mathrm{sn}(u,m)`, `\mathrm{cn}(u,m)`, `\mathrm{dn}(u,m)`, and `1`. +The identifier specifies the ratio of two such functions. +For example, ``'ns'`` identifies the function + +.. math :: + + \mathrm{ns}(u,m) = \frac{1}{\mathrm{sn}(u,m)} + +and ``'cd'`` identifies the function + +.. math :: + + \mathrm{cd}(u,m) = \frac{\mathrm{cn}(u,m)}{\mathrm{dn}(u,m)}. + +If called with only the first argument, a function object +evaluating the chosen function for given arguments is returned. + +**Examples** + +Basic evaluation:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipfun('cd', 3.5, 0.5) + -0.9891101840595543931308394 + >>> ellipfun('cd', 3.5, q=0.25) + 0.07111979240214668158441418 + +The sn-function is doubly periodic in the complex plane with periods +`4 K(m)` and `2 i K(1-m)` (see :func:`~mpmath.ellipk`):: + + >>> sn = ellipfun('sn') + >>> sn(2, 0.25) + 0.9628981775982774425751399 + >>> sn(2+4*ellipk(0.25), 0.25) + 0.9628981775982774425751399 + >>> chop(sn(2+2*j*ellipk(1-0.25), 0.25)) + 0.9628981775982774425751399 + +The cn-function is doubly periodic with periods `4 K(m)` and `2 K(m) + 2 i K(1-m)`:: + + >>> cn = ellipfun('cn') + >>> cn(2, 0.25) + -0.2698649654510865792581416 + >>> cn(2+4*ellipk(0.25), 0.25) + -0.2698649654510865792581416 + >>> chop(cn(2+2*ellipk(0.25)+2*j*ellipk(1-0.25), 0.25)) + -0.2698649654510865792581416 + +The dn-function is doubly periodic with periods `2 K(m)` and `4 i K(1-m)`:: + + >>> dn = ellipfun('dn') + >>> dn(2, 0.25) + 0.8764740583123262286931578 + >>> dn(2+2*ellipk(0.25), 0.25) + 0.8764740583123262286931578 + >>> chop(dn(2+4*j*ellipk(1-0.25), 0.25)) + 0.8764740583123262286931578 + +""" + + +jtheta = r""" +Computes the Jacobi theta function `\vartheta_n(z, q)`, where +`n = 1, 2, 3, 4`, defined by the infinite series: + +.. math :: + + \vartheta_1(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty} + (-1)^n q^{n^2+n\,} \sin((2n+1)z) + + \vartheta_2(z,q) = 2 q^{1/4} \sum_{n=0}^{\infty} + q^{n^{2\,} + n} \cos((2n+1)z) + + \vartheta_3(z,q) = 1 + 2 \sum_{n=1}^{\infty} + q^{n^2\,} \cos(2 n z) + + \vartheta_4(z,q) = 1 + 2 \sum_{n=1}^{\infty} + (-q)^{n^2\,} \cos(2 n z) + +The theta functions are functions of two variables: + +* `z` is the *argument*, an arbitrary real or complex number + +* `q` is the *nome*, which must be a real or complex number + in the unit disk (i.e. `|q| < 1`). For `|q| \ll 1`, the + series converge very quickly, so the Jacobi theta functions + can efficiently be evaluated to high precision. + +The compact notations `\vartheta_n(q) = \vartheta_n(0,q)` +and `\vartheta_n = \vartheta_n(0,q)` are also frequently +encountered. Finally, Jacobi theta functions are frequently +considered as functions of the half-period ratio `\tau` +and then usually denoted by `\vartheta_n(z|\tau)`. + +Optionally, ``jtheta(n, z, q, derivative=d)`` with `d > 0` computes +a `d`-th derivative with respect to `z`. + +**Examples and basic properties** + +Considered as functions of `z`, the Jacobi theta functions may be +viewed as generalizations of the ordinary trigonometric functions +cos and sin. They are periodic functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> jtheta(1, 0.25, '0.2') + 0.2945120798627300045053104 + >>> jtheta(1, 0.25 + 2*pi, '0.2') + 0.2945120798627300045053104 + +Indeed, the series defining the theta functions are essentially +trigonometric Fourier series. The coefficients can be retrieved +using :func:`~mpmath.fourier`:: + + >>> mp.dps = 10 + >>> nprint(fourier(lambda x: jtheta(2, x, 0.5), [-pi, pi], 4)) + ([0.0, 1.68179, 0.0, 0.420448, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0]) + +The Jacobi theta functions are also so-called quasiperiodic +functions of `z` and `\tau`, meaning that for fixed `\tau`, +`\vartheta_n(z, q)` and `\vartheta_n(z+\pi \tau, q)` are the same +except for an exponential factor:: + + >>> mp.dps = 25 + >>> tau = 3*j/10 + >>> q = exp(pi*j*tau) + >>> z = 10 + >>> jtheta(4, z+tau*pi, q) + (-0.682420280786034687520568 + 1.526683999721399103332021j) + >>> -exp(-2*j*z)/q * jtheta(4, z, q) + (-0.682420280786034687520568 + 1.526683999721399103332021j) + +The Jacobi theta functions satisfy a huge number of other +functional equations, such as the following identity (valid for +any `q`):: + + >>> q = mpf(3)/10 + >>> jtheta(3,0,q)**4 + 6.823744089352763305137427 + >>> jtheta(2,0,q)**4 + jtheta(4,0,q)**4 + 6.823744089352763305137427 + +Extensive listings of identities satisfied by the Jacobi theta +functions can be found in standard reference works. + +The Jacobi theta functions are related to the gamma function +for special arguments:: + + >>> jtheta(3, 0, exp(-pi)) + 1.086434811213308014575316 + >>> pi**(1/4.) / gamma(3/4.) + 1.086434811213308014575316 + +:func:`~mpmath.jtheta` supports arbitrary precision evaluation and complex +arguments:: + + >>> mp.dps = 50 + >>> jtheta(4, sqrt(2), 0.5) + 2.0549510717571539127004115835148878097035750653737 + >>> mp.dps = 25 + >>> jtheta(4, 1+2j, (1+j)/5) + (7.180331760146805926356634 - 1.634292858119162417301683j) + +Evaluation of derivatives:: + + >>> mp.dps = 25 + >>> jtheta(1, 7, 0.25, 1); diff(lambda z: jtheta(1, z, 0.25), 7) + 1.209857192844475388637236 + 1.209857192844475388637236 + >>> jtheta(1, 7, 0.25, 2); diff(lambda z: jtheta(1, z, 0.25), 7, 2) + -0.2598718791650217206533052 + -0.2598718791650217206533052 + >>> jtheta(2, 7, 0.25, 1); diff(lambda z: jtheta(2, z, 0.25), 7) + -1.150231437070259644461474 + -1.150231437070259644461474 + >>> jtheta(2, 7, 0.25, 2); diff(lambda z: jtheta(2, z, 0.25), 7, 2) + -0.6226636990043777445898114 + -0.6226636990043777445898114 + >>> jtheta(3, 7, 0.25, 1); diff(lambda z: jtheta(3, z, 0.25), 7) + -0.9990312046096634316587882 + -0.9990312046096634316587882 + >>> jtheta(3, 7, 0.25, 2); diff(lambda z: jtheta(3, z, 0.25), 7, 2) + -0.1530388693066334936151174 + -0.1530388693066334936151174 + >>> jtheta(4, 7, 0.25, 1); diff(lambda z: jtheta(4, z, 0.25), 7) + 0.9820995967262793943571139 + 0.9820995967262793943571139 + >>> jtheta(4, 7, 0.25, 2); diff(lambda z: jtheta(4, z, 0.25), 7, 2) + 0.3936902850291437081667755 + 0.3936902850291437081667755 + +**Possible issues** + +For `|q| \ge 1` or `\Im(\tau) \le 0`, :func:`~mpmath.jtheta` raises +``ValueError``. This exception is also raised for `|q|` extremely +close to 1 (or equivalently `\tau` very close to 0), since the +series would converge too slowly:: + + >>> jtheta(1, 10, 0.99999999 * exp(0.5*j)) + Traceback (most recent call last): + ... + ValueError: abs(q) > THETA_Q_LIM = 1.000000 + +""" + +eulernum = r""" +Gives the `n`-th Euler number, defined as the `n`-th derivative of +`\mathrm{sech}(t) = 1/\cosh(t)` evaluated at `t = 0`. Equivalently, the +Euler numbers give the coefficients of the Taylor series + +.. math :: + + \mathrm{sech}(t) = \sum_{n=0}^{\infty} \frac{E_n}{n!} t^n. + +The Euler numbers are closely related to Bernoulli numbers +and Bernoulli polynomials. They can also be evaluated in terms of +Euler polynomials (see :func:`~mpmath.eulerpoly`) as `E_n = 2^n E_n(1/2)`. + +**Examples** + +Computing the first few Euler numbers and verifying that they +agree with the Taylor series:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> [eulernum(n) for n in range(11)] + [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0] + >>> chop(diffs(sech, 0, 10)) + [1.0, 0.0, -1.0, 0.0, 5.0, 0.0, -61.0, 0.0, 1385.0, 0.0, -50521.0] + +Euler numbers grow very rapidly. :func:`~mpmath.eulernum` efficiently +computes numerical approximations for large indices:: + + >>> eulernum(50) + -6.053285248188621896314384e+54 + >>> eulernum(1000) + 3.887561841253070615257336e+2371 + >>> eulernum(10**20) + 4.346791453661149089338186e+1936958564106659551331 + +Comparing with an asymptotic formula for the Euler numbers:: + + >>> n = 10**5 + >>> (-1)**(n//2) * 8 * sqrt(n/(2*pi)) * (2*n/(pi*e))**n + 3.69919063017432362805663e+436961 + >>> eulernum(n) + 3.699193712834466537941283e+436961 + +Pass ``exact=True`` to obtain exact values of Euler numbers as integers:: + + >>> print(eulernum(50, exact=True)) + -6053285248188621896314383785111649088103498225146815121 + >>> print(eulernum(200, exact=True) % 10**10) + 1925859625 + >>> eulernum(1001, exact=True) + 0 +""" + +eulerpoly = r""" +Evaluates the Euler polynomial `E_n(z)`, defined by the generating function +representation + +.. math :: + + \frac{2e^{zt}}{e^t+1} = \sum_{n=0}^\infty E_n(z) \frac{t^n}{n!}. + +The Euler polynomials may also be represented in terms of +Bernoulli polynomials (see :func:`~mpmath.bernpoly`) using various formulas, for +example + +.. math :: + + E_n(z) = \frac{2}{n+1} \left( + B_n(z)-2^{n+1}B_n\left(\frac{z}{2}\right) + \right). + +Special values include the Euler numbers `E_n = 2^n E_n(1/2)` (see +:func:`~mpmath.eulernum`). + +**Examples** + +Computing the coefficients of the first few Euler polynomials:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> for n in range(6): + ... chop(taylor(lambda z: eulerpoly(n,z), 0, n)) + ... + [1.0] + [-0.5, 1.0] + [0.0, -1.0, 1.0] + [0.25, 0.0, -1.5, 1.0] + [0.0, 1.0, 0.0, -2.0, 1.0] + [-0.5, 0.0, 2.5, 0.0, -2.5, 1.0] + +Evaluation for arbitrary `z`:: + + >>> eulerpoly(2,3) + 6.0 + >>> eulerpoly(5,4) + 423.5 + >>> eulerpoly(35, 11111111112) + 3.994957561486776072734601e+351 + >>> eulerpoly(4, 10+20j) + (-47990.0 - 235980.0j) + >>> eulerpoly(2, '-3.5e-5') + 0.000035001225 + >>> eulerpoly(3, 0.5) + 0.0 + >>> eulerpoly(55, -10**80) + -1.0e+4400 + >>> eulerpoly(5, -inf) + -inf + >>> eulerpoly(6, -inf) + +inf + +Computing Euler numbers:: + + >>> 2**26 * eulerpoly(26,0.5) + -4087072509293123892361.0 + >>> eulernum(26) + -4087072509293123892361.0 + +Evaluation is accurate for large `n` and small `z`:: + + >>> eulerpoly(100, 0.5) + 2.29047999988194114177943e+108 + >>> eulerpoly(1000, 10.5) + 3.628120031122876847764566e+2070 + >>> eulerpoly(10000, 10.5) + 1.149364285543783412210773e+30688 +""" + +spherharm = r""" +Evaluates the spherical harmonic `Y_l^m(\theta,\phi)`, + +.. math :: + + Y_l^m(\theta,\phi) = \sqrt{\frac{2l+1}{4\pi}\frac{(l-m)!}{(l+m)!}} + P_l^m(\cos \theta) e^{i m \phi} + +where `P_l^m` is an associated Legendre function (see :func:`~mpmath.legenp`). + +Here `\theta \in [0, \pi]` denotes the polar coordinate (ranging +from the north pole to the south pole) and `\phi \in [0, 2 \pi]` denotes the +azimuthal coordinate on a sphere. Care should be used since many different +conventions for spherical coordinate variables are used. + +Usually spherical harmonics are considered for `l \in \mathbb{N}`, +`m \in \mathbb{Z}`, `|m| \le l`. More generally, `l,m,\theta,\phi` +are permitted to be complex numbers. + +.. note :: + + :func:`~mpmath.spherharm` returns a complex number, even if the value is + purely real. + +**Plots** + +.. literalinclude :: /plots/spherharm40.py + +`Y_{4,0}`: + +.. image :: /plots/spherharm40.png + +`Y_{4,1}`: + +.. image :: /plots/spherharm41.png + +`Y_{4,2}`: + +.. image :: /plots/spherharm42.png + +`Y_{4,3}`: + +.. image :: /plots/spherharm43.png + +`Y_{4,4}`: + +.. image :: /plots/spherharm44.png + +**Examples** + +Some low-order spherical harmonics with reference values:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> theta = pi/4 + >>> phi = pi/3 + >>> spherharm(0,0,theta,phi); 0.5*sqrt(1/pi)*expj(0) + (0.2820947917738781434740397 + 0.0j) + (0.2820947917738781434740397 + 0.0j) + >>> spherharm(1,-1,theta,phi); 0.5*sqrt(3/(2*pi))*expj(-phi)*sin(theta) + (0.1221506279757299803965962 - 0.2115710938304086076055298j) + (0.1221506279757299803965962 - 0.2115710938304086076055298j) + >>> spherharm(1,0,theta,phi); 0.5*sqrt(3/pi)*cos(theta)*expj(0) + (0.3454941494713354792652446 + 0.0j) + (0.3454941494713354792652446 + 0.0j) + >>> spherharm(1,1,theta,phi); -0.5*sqrt(3/(2*pi))*expj(phi)*sin(theta) + (-0.1221506279757299803965962 - 0.2115710938304086076055298j) + (-0.1221506279757299803965962 - 0.2115710938304086076055298j) + +With the normalization convention used, the spherical harmonics are orthonormal +on the unit sphere:: + + >>> sphere = [0,pi], [0,2*pi] + >>> dS = lambda t,p: fp.sin(t) # differential element + >>> Y1 = lambda t,p: fp.spherharm(l1,m1,t,p) + >>> Y2 = lambda t,p: fp.conj(fp.spherharm(l2,m2,t,p)) + >>> l1 = l2 = 3; m1 = m2 = 2 + >>> fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere)) + 1.0000000000000007 + >>> m2 = 1 # m1 != m2 + >>> print(fp.chop(fp.quad(lambda t,p: Y1(t,p)*Y2(t,p)*dS(t,p), *sphere))) + 0.0 + +Evaluation is accurate for large orders:: + + >>> spherharm(1000,750,0.5,0.25) + (3.776445785304252879026585e-102 - 5.82441278771834794493484e-102j) + +Evaluation works with complex parameter values:: + + >>> spherharm(1+j, 2j, 2+3j, -0.5j) + (64.44922331113759992154992 + 1981.693919841408089681743j) +""" + +scorergi = r""" +Evaluates the Scorer function + +.. math :: + + \operatorname{Gi}(z) = + \operatorname{Ai}(z) \int_0^z \operatorname{Bi}(t) dt + + \operatorname{Bi}(z) \int_z^{\infty} \operatorname{Ai}(t) dt + +which gives a particular solution to the inhomogeneous Airy +differential equation `f''(z) - z f(z) = 1/\pi`. Another +particular solution is given by the Scorer Hi-function +(:func:`~mpmath.scorerhi`). The two functions are related as +`\operatorname{Gi}(z) + \operatorname{Hi}(z) = \operatorname{Bi}(z)`. + +**Plots** + +.. literalinclude :: /plots/gi.py +.. image :: /plots/gi.png +.. literalinclude :: /plots/gi_c.py +.. image :: /plots/gi_c.png + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> scorergi(0); 1/(power(3,'7/6')*gamma('2/3')) + 0.2049755424820002450503075 + 0.2049755424820002450503075 + >>> diff(scorergi, 0); 1/(power(3,'5/6')*gamma('1/3')) + 0.1494294524512754526382746 + 0.1494294524512754526382746 + >>> scorergi(+inf); scorergi(-inf) + 0.0 + 0.0 + >>> scorergi(1) + 0.2352184398104379375986902 + >>> scorergi(-1) + -0.1166722172960152826494198 + +Evaluation for large arguments:: + + >>> scorergi(10) + 0.03189600510067958798062034 + >>> scorergi(100) + 0.003183105228162961476590531 + >>> scorergi(1000000) + 0.0000003183098861837906721743873 + >>> 1/(pi*1000000) + 0.0000003183098861837906715377675 + >>> scorergi(-1000) + -0.08358288400262780392338014 + >>> scorergi(-100000) + 0.02886866118619660226809581 + >>> scorergi(50+10j) + (0.0061214102799778578790984 - 0.001224335676457532180747917j) + >>> scorergi(-50-10j) + (5.236047850352252236372551e+29 - 3.08254224233701381482228e+29j) + >>> scorergi(100000j) + (-8.806659285336231052679025e+6474077 + 8.684731303500835514850962e+6474077j) + +Verifying the connection between Gi and Hi:: + + >>> z = 0.25 + >>> scorergi(z) + scorerhi(z) + 0.7287469039362150078694543 + >>> airybi(z) + 0.7287469039362150078694543 + +Verifying the differential equation:: + + >>> for z in [-3.4, 0, 2.5, 1+2j]: + ... chop(diff(scorergi,z,2) - z*scorergi(z)) + ... + -0.3183098861837906715377675 + -0.3183098861837906715377675 + -0.3183098861837906715377675 + -0.3183098861837906715377675 + +Verifying the integral representation:: + + >>> z = 0.5 + >>> scorergi(z) + 0.2447210432765581976910539 + >>> Ai,Bi = airyai,airybi + >>> Bi(z)*(Ai(inf,-1)-Ai(z,-1)) + Ai(z)*(Bi(z,-1)-Bi(0,-1)) + 0.2447210432765581976910539 + +**References** + +1. [DLMF]_ section 9.12: Scorer Functions + +""" + +scorerhi = r""" +Evaluates the second Scorer function + +.. math :: + + \operatorname{Hi}(z) = + \operatorname{Bi}(z) \int_{-\infty}^z \operatorname{Ai}(t) dt - + \operatorname{Ai}(z) \int_{-\infty}^z \operatorname{Bi}(t) dt + +which gives a particular solution to the inhomogeneous Airy +differential equation `f''(z) - z f(z) = 1/\pi`. See also +:func:`~mpmath.scorergi`. + +**Plots** + +.. literalinclude :: /plots/hi.py +.. image :: /plots/hi.png +.. literalinclude :: /plots/hi_c.py +.. image :: /plots/hi_c.png + +**Examples** + +Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> scorerhi(0); 2/(power(3,'7/6')*gamma('2/3')) + 0.4099510849640004901006149 + 0.4099510849640004901006149 + >>> diff(scorerhi,0); 2/(power(3,'5/6')*gamma('1/3')) + 0.2988589049025509052765491 + 0.2988589049025509052765491 + >>> scorerhi(+inf); scorerhi(-inf) + +inf + 0.0 + >>> scorerhi(1) + 0.9722051551424333218376886 + >>> scorerhi(-1) + 0.2206696067929598945381098 + +Evaluation for large arguments:: + + >>> scorerhi(10) + 455641153.5163291358991077 + >>> scorerhi(100) + 6.041223996670201399005265e+288 + >>> scorerhi(1000000) + 7.138269638197858094311122e+289529652 + >>> scorerhi(-10) + 0.0317685352825022727415011 + >>> scorerhi(-100) + 0.003183092495767499864680483 + >>> scorerhi(100j) + (-6.366197716545672122983857e-9 + 0.003183098861710582761688475j) + >>> scorerhi(50+50j) + (-5.322076267321435669290334e+63 + 1.478450291165243789749427e+65j) + >>> scorerhi(-1000-1000j) + (0.0001591549432510502796565538 - 0.000159154943091895334973109j) + +Verifying the differential equation:: + + >>> for z in [-3.4, 0, 2, 1+2j]: + ... chop(diff(scorerhi,z,2) - z*scorerhi(z)) + ... + 0.3183098861837906715377675 + 0.3183098861837906715377675 + 0.3183098861837906715377675 + 0.3183098861837906715377675 + +Verifying the integral representation:: + + >>> z = 0.5 + >>> scorerhi(z) + 0.6095559998265972956089949 + >>> Ai,Bi = airyai,airybi + >>> Bi(z)*(Ai(z,-1)-Ai(-inf,-1)) - Ai(z)*(Bi(z,-1)-Bi(-inf,-1)) + 0.6095559998265972956089949 + +""" + + +stirling1 = r""" +Gives the Stirling number of the first kind `s(n,k)`, defined by + +.. math :: + + x(x-1)(x-2)\cdots(x-n+1) = \sum_{k=0}^n s(n,k) x^k. + +The value is computed using an integer recurrence. The implementation +is not optimized for approximating large values quickly. + +**Examples** + +Comparing with the generating function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> taylor(lambda x: ff(x, 5), 0, 5) + [0.0, 24.0, -50.0, 35.0, -10.0, 1.0] + >>> [stirling1(5, k) for k in range(6)] + [0.0, 24.0, -50.0, 35.0, -10.0, 1.0] + +Recurrence relation:: + + >>> n, k = 5, 3 + >>> stirling1(n+1,k) + n*stirling1(n,k) - stirling1(n,k-1) + 0.0 + +The matrices of Stirling numbers of first and second kind are inverses +of each other:: + + >>> A = matrix(5, 5); B = matrix(5, 5) + >>> for n in range(5): + ... for k in range(5): + ... A[n,k] = stirling1(n,k) + ... B[n,k] = stirling2(n,k) + ... + >>> A * B + [1.0 0.0 0.0 0.0 0.0] + [0.0 1.0 0.0 0.0 0.0] + [0.0 0.0 1.0 0.0 0.0] + [0.0 0.0 0.0 1.0 0.0] + [0.0 0.0 0.0 0.0 1.0] + +Pass ``exact=True`` to obtain exact values of Stirling numbers as integers:: + + >>> stirling1(42, 5) + -2.864498971768501633736628e+50 + >>> print(stirling1(42, 5, exact=True)) + -286449897176850163373662803014001546235808317440000 + +""" + +stirling2 = r""" +Gives the Stirling number of the second kind `S(n,k)`, defined by + +.. math :: + + x^n = \sum_{k=0}^n S(n,k) x(x-1)(x-2)\cdots(x-k+1) + +The value is computed using integer arithmetic to evaluate a power sum. +The implementation is not optimized for approximating large values quickly. + +**Examples** + +Comparing with the generating function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> taylor(lambda x: sum(stirling2(5,k) * ff(x,k) for k in range(6)), 0, 5) + [0.0, 0.0, 0.0, 0.0, 0.0, 1.0] + +Recurrence relation:: + + >>> n, k = 5, 3 + >>> stirling2(n+1,k) - k*stirling2(n,k) - stirling2(n,k-1) + 0.0 + +Pass ``exact=True`` to obtain exact values of Stirling numbers as integers:: + + >>> stirling2(52, 10) + 2.641822121003543906807485e+45 + >>> print(stirling2(52, 10, exact=True)) + 2641822121003543906807485307053638921722527655 + + +""" + +squarew = r""" +Computes the square wave function using the definition: + +.. math:: + x(t) = A(-1)^{\left\lfloor{2t / P}\right\rfloor} + +where `P` is the period of the wave and `A` is the amplitude. + +**Examples** + +Square wave with period = 2, amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> squarew(0,1,2) + 1.0 + >>> squarew(0.5,1,2) + 1.0 + >>> squarew(1,1,2) + -1.0 + >>> squarew(1.5,1,2) + -1.0 + >>> squarew(2,1,2) + 1.0 +""" + +trianglew = r""" +Computes the triangle wave function using the definition: + +.. math:: + x(t) = 2A\left(\frac{1}{2}-\left|1-2 \operatorname{frac}\left(\frac{x}{P}+\frac{1}{4}\right)\right|\right) + +where :math:`\operatorname{frac}\left(\frac{t}{T}\right) = \frac{t}{T}-\left\lfloor{\frac{t}{T}}\right\rfloor` +, `P` is the period of the wave, and `A` is the amplitude. + +**Examples** + +Triangle wave with period = 2, amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> trianglew(0,1,2) + 0.0 + >>> trianglew(0.25,1,2) + 0.5 + >>> trianglew(0.5,1,2) + 1.0 + >>> trianglew(1,1,2) + 0.0 + >>> trianglew(1.5,1,2) + -1.0 + >>> trianglew(2,1,2) + 0.0 +""" + +sawtoothw = r""" +Computes the sawtooth wave function using the definition: + +.. math:: + x(t) = A\operatorname{frac}\left(\frac{t}{T}\right) + +where :math:`\operatorname{frac}\left(\frac{t}{T}\right) = \frac{t}{T}-\left\lfloor{\frac{t}{T}}\right\rfloor`, +`P` is the period of the wave, and `A` is the amplitude. + +**Examples** + +Sawtooth wave with period = 2, amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sawtoothw(0,1,2) + 0.0 + >>> sawtoothw(0.5,1,2) + 0.25 + >>> sawtoothw(1,1,2) + 0.5 + >>> sawtoothw(1.5,1,2) + 0.75 + >>> sawtoothw(2,1,2) + 0.0 +""" + +unit_triangle = r""" +Computes the unit triangle using the definition: + +.. math:: + x(t) = A(-\left| t \right| + 1) + +where `A` is the amplitude. + +**Examples** + +Unit triangle with amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> unit_triangle(-1,1) + 0.0 + >>> unit_triangle(-0.5,1) + 0.5 + >>> unit_triangle(0,1) + 1.0 + >>> unit_triangle(0.5,1) + 0.5 + >>> unit_triangle(1,1) + 0.0 +""" + +sigmoid = r""" +Computes the sigmoid function using the definition: + +.. math:: + x(t) = \frac{A}{1 + e^{-t}} + +where `A` is the amplitude. + +**Examples** + +Sigmoid function with amplitude = 1 :: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sigmoid(-1,1) + 0.2689414213699951207488408 + >>> sigmoid(-0.5,1) + 0.3775406687981454353610994 + >>> sigmoid(0,1) + 0.5 + >>> sigmoid(0.5,1) + 0.6224593312018545646389006 + >>> sigmoid(1,1) + 0.7310585786300048792511592 + +""" diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/__init__.py b/.venv/lib/python3.11/site-packages/mpmath/functions/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..5896ed0579eceab086dc5c67eaa649b6061a53dc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/__init__.py @@ -0,0 +1,14 @@ +from . import functions +# Hack to update methods +from . import factorials +from . import hypergeometric +from . import expintegrals +from . import bessel +from . import orthogonal +from . import theta +from . import elliptic +from . import signals +from . import zeta +from . import rszeta +from . import zetazeros +from . import qfunctions diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..57d9307ae96ebbb2207eecaab61f569eb89b1aa5 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/orthogonal.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bcce1a3669361567fc02a8bfd50832fdc1199afc Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/qfunctions.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/signals.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/signals.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e573e7fc398cd9c6a0a2a3801db700470a23903 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/signals.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/theta.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/theta.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c036072d311984c8477ebbc6e6daeb275031a0c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/functions/__pycache__/theta.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/bessel.py b/.venv/lib/python3.11/site-packages/mpmath/functions/bessel.py new file mode 100644 index 0000000000000000000000000000000000000000..8b41d87bb0118de61d5561433dabcb181f872f84 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/bessel.py @@ -0,0 +1,1108 @@ +from .functions import defun, defun_wrapped + +@defun +def j0(ctx, x): + """Computes the Bessel function `J_0(x)`. See :func:`~mpmath.besselj`.""" + return ctx.besselj(0, x) + +@defun +def j1(ctx, x): + """Computes the Bessel function `J_1(x)`. See :func:`~mpmath.besselj`.""" + return ctx.besselj(1, x) + +@defun +def besselj(ctx, n, z, derivative=0, **kwargs): + if type(n) is int: + n_isint = True + else: + n = ctx.convert(n) + n_isint = ctx.isint(n) + if n_isint: + n = int(ctx._re(n)) + if n_isint and n < 0: + return (-1)**n * ctx.besselj(-n, z, derivative, **kwargs) + z = ctx.convert(z) + M = ctx.mag(z) + if derivative: + d = ctx.convert(derivative) + # TODO: the integer special-casing shouldn't be necessary. + # However, the hypergeometric series gets inaccurate for large d + # because of inaccurate pole cancellation at a pole far from + # zero (needs to be fixed in hypercomb or hypsum) + if ctx.isint(d) and d >= 0: + d = int(d) + orig = ctx.prec + try: + ctx.prec += 15 + v = ctx.fsum((-1)**k * ctx.binomial(d,k) * ctx.besselj(2*k+n-d,z) + for k in range(d+1)) + finally: + ctx.prec = orig + v *= ctx.mpf(2)**(-d) + else: + def h(n,d): + r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), -0.25, exact=True) + B = [0.5*(n-d+1), 0.5*(n-d+2)] + T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[],B,[(n+1)*0.5,(n+2)*0.5],B+[n+1],r)] + return T + v = ctx.hypercomb(h, [n,d], **kwargs) + else: + # Fast case: J_n(x), n int, appropriate magnitude for fixed-point calculation + if (not derivative) and n_isint and abs(M) < 10 and abs(n) < 20: + try: + return ctx._besselj(n, z) + except NotImplementedError: + pass + if not z: + if not n: + v = ctx.one + n+z + elif ctx.re(n) > 0: + v = n*z + else: + v = ctx.inf + z + n + else: + #v = 0 + orig = ctx.prec + try: + # XXX: workaround for accuracy in low level hypergeometric series + # when alternating, large arguments + ctx.prec += min(3*abs(M), ctx.prec) + w = ctx.fmul(z, 0.5, exact=True) + def h(n): + r = ctx.fneg(ctx.fmul(w, w, prec=max(0,ctx.prec+M)), exact=True) + return [([w], [n], [], [n+1], [], [n+1], r)] + v = ctx.hypercomb(h, [n], **kwargs) + finally: + ctx.prec = orig + v = +v + return v + +@defun +def besseli(ctx, n, z, derivative=0, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + if not z: + if derivative: + raise ValueError + if not n: + # I(0,0) = 1 + return 1+n+z + if ctx.isint(n): + return 0*(n+z) + r = ctx.re(n) + if r == 0: + return ctx.nan*(n+z) + elif r > 0: + return 0*(n+z) + else: + return ctx.inf+(n+z) + M = ctx.mag(z) + if derivative: + d = ctx.convert(derivative) + def h(n,d): + r = ctx.fmul(ctx.fmul(z, z, prec=ctx.prec+M), 0.25, exact=True) + B = [0.5*(n-d+1), 0.5*(n-d+2), n+1] + T = [([2,ctx.pi,z],[d-2*n,0.5,n-d],[n+1],B,[(n+1)*0.5,(n+2)*0.5],B,r)] + return T + v = ctx.hypercomb(h, [n,d], **kwargs) + else: + def h(n): + w = ctx.fmul(z, 0.5, exact=True) + r = ctx.fmul(w, w, prec=max(0,ctx.prec+M)) + return [([w], [n], [], [n+1], [], [n+1], r)] + v = ctx.hypercomb(h, [n], **kwargs) + return v + +@defun_wrapped +def bessely(ctx, n, z, derivative=0, **kwargs): + if not z: + if derivative: + # Not implemented + raise ValueError + if not n: + # ~ log(z/2) + return -ctx.inf + (n+z) + if ctx.im(n): + return ctx.nan * (n+z) + r = ctx.re(n) + q = n+0.5 + if ctx.isint(q): + if n > 0: + return -ctx.inf + (n+z) + else: + return 0 * (n+z) + if r < 0 and int(ctx.floor(q)) % 2: + return ctx.inf + (n+z) + else: + return ctx.ninf + (n+z) + # XXX: use hypercomb + ctx.prec += 10 + m, d = ctx.nint_distance(n) + if d < -ctx.prec: + h = +ctx.eps + ctx.prec *= 2 + n += h + elif d < 0: + ctx.prec -= d + # TODO: avoid cancellation for imaginary arguments + cos, sin = ctx.cospi_sinpi(n) + return (ctx.besselj(n,z,derivative,**kwargs)*cos - \ + ctx.besselj(-n,z,derivative,**kwargs))/sin + +@defun_wrapped +def besselk(ctx, n, z, **kwargs): + if not z: + return ctx.inf + M = ctx.mag(z) + if M < 1: + # Represent as limit definition + def h(n): + r = (z/2)**2 + T1 = [z, 2], [-n, n-1], [n], [], [], [1-n], r + T2 = [z, 2], [n, -n-1], [-n], [], [], [1+n], r + return T1, T2 + # We could use the limit definition always, but it leads + # to very bad cancellation (of exponentially large terms) + # for large real z + # Instead represent in terms of 2F0 + else: + ctx.prec += M + def h(n): + return [([ctx.pi/2, z, ctx.exp(-z)], [0.5,-0.5,1], [], [], \ + [n+0.5, 0.5-n], [], -1/(2*z))] + return ctx.hypercomb(h, [n], **kwargs) + +@defun_wrapped +def hankel1(ctx,n,x,**kwargs): + return ctx.besselj(n,x,**kwargs) + ctx.j*ctx.bessely(n,x,**kwargs) + +@defun_wrapped +def hankel2(ctx,n,x,**kwargs): + return ctx.besselj(n,x,**kwargs) - ctx.j*ctx.bessely(n,x,**kwargs) + +@defun_wrapped +def whitm(ctx,k,m,z,**kwargs): + if z == 0: + # M(k,m,z) = 0^(1/2+m) + if ctx.re(m) > -0.5: + return z + elif ctx.re(m) < -0.5: + return ctx.inf + z + else: + return ctx.nan * z + x = ctx.fmul(-0.5, z, exact=True) + y = 0.5+m + return ctx.exp(x) * z**y * ctx.hyp1f1(y-k, 1+2*m, z, **kwargs) + +@defun_wrapped +def whitw(ctx,k,m,z,**kwargs): + if z == 0: + g = abs(ctx.re(m)) + if g < 0.5: + return z + elif g > 0.5: + return ctx.inf + z + else: + return ctx.nan * z + x = ctx.fmul(-0.5, z, exact=True) + y = 0.5+m + return ctx.exp(x) * z**y * ctx.hyperu(y-k, 1+2*m, z, **kwargs) + +@defun +def hyperu(ctx, a, b, z, **kwargs): + a, atype = ctx._convert_param(a) + b, btype = ctx._convert_param(b) + z = ctx.convert(z) + if not z: + if ctx.re(b) <= 1: + return ctx.gammaprod([1-b],[a-b+1]) + else: + return ctx.inf + z + bb = 1+a-b + bb, bbtype = ctx._convert_param(bb) + try: + orig = ctx.prec + try: + ctx.prec += 10 + v = ctx.hypsum(2, 0, (atype, bbtype), [a, bb], -1/z, maxterms=ctx.prec) + return v / z**a + finally: + ctx.prec = orig + except ctx.NoConvergence: + pass + def h(a,b): + w = ctx.sinpi(b) + T1 = ([ctx.pi,w],[1,-1],[],[a-b+1,b],[a],[b],z) + T2 = ([-ctx.pi,w,z],[1,-1,1-b],[],[a,2-b],[a-b+1],[2-b],z) + return T1, T2 + return ctx.hypercomb(h, [a,b], **kwargs) + +@defun +def struveh(ctx,n,z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/StruveH/26/01/02/ + def h(n): + return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], -(z/2)**2)] + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def struvel(ctx,n,z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/StruveL/26/01/02/ + def h(n): + return [([z/2, 0.5*ctx.sqrt(ctx.pi)], [n+1, -1], [], [n+1.5], [1], [1.5, n+1.5], (z/2)**2)] + return ctx.hypercomb(h, [n], **kwargs) + +def _anger(ctx,which,v,z,**kwargs): + v = ctx._convert_param(v)[0] + z = ctx.convert(z) + def h(v): + b = ctx.mpq_1_2 + u = v*b + m = b*3 + a1,a2,b1,b2 = m-u, m+u, 1-u, 1+u + c, s = ctx.cospi_sinpi(u) + if which == 0: + A, B = [b*z, s], [c] + if which == 1: + A, B = [b*z, -c], [s] + w = ctx.square_exp_arg(z, mult=-0.25) + T1 = A, [1, 1], [], [a1,a2], [1], [a1,a2], w + T2 = B, [1], [], [b1,b2], [1], [b1,b2], w + return T1, T2 + return ctx.hypercomb(h, [v], **kwargs) + +@defun +def angerj(ctx, v, z, **kwargs): + return _anger(ctx, 0, v, z, **kwargs) + +@defun +def webere(ctx, v, z, **kwargs): + return _anger(ctx, 1, v, z, **kwargs) + +@defun +def lommels1(ctx, u, v, z, **kwargs): + u = ctx._convert_param(u)[0] + v = ctx._convert_param(v)[0] + z = ctx.convert(z) + def h(u,v): + b = ctx.mpq_1_2 + w = ctx.square_exp_arg(z, mult=-0.25) + return ([u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], \ + [b*(u-v+3),b*(u+v+3)], w), + return ctx.hypercomb(h, [u,v], **kwargs) + +@defun +def lommels2(ctx, u, v, z, **kwargs): + u = ctx._convert_param(u)[0] + v = ctx._convert_param(v)[0] + z = ctx.convert(z) + # Asymptotic expansion (GR p. 947) -- need to be careful + # not to use for small arguments + # def h(u,v): + # b = ctx.mpq_1_2 + # w = -(z/2)**(-2) + # return ([z], [u-1], [], [], [b*(1-u+v)], [b*(1-u-v)], w), + def h(u,v): + b = ctx.mpq_1_2 + w = ctx.square_exp_arg(z, mult=-0.25) + T1 = [u-v+1, u+v+1, z], [-1, -1, u+1], [], [], [1], [b*(u-v+3),b*(u+v+3)], w + T2 = [2, z], [u+v-1, -v], [v, b*(u+v+1)], [b*(v-u+1)], [], [1-v], w + T3 = [2, z], [u-v-1, v], [-v, b*(u-v+1)], [b*(1-u-v)], [], [1+v], w + #c1 = ctx.cospi((u-v)*b) + #c2 = ctx.cospi((u+v)*b) + #s = ctx.sinpi(v) + #r1 = (u-v+1)*b + #r2 = (u+v+1)*b + #T2 = [c1, s, z, 2], [1, -1, -v, v], [], [-v+1], [], [-v+1], w + #T3 = [-c2, s, z, 2], [1, -1, v, -v], [], [v+1], [], [v+1], w + #T2 = [c1, s, z, 2], [1, -1, -v, v+u-1], [r1, r2], [-v+1], [], [-v+1], w + #T3 = [-c2, s, z, 2], [1, -1, v, -v+u-1], [r1, r2], [v+1], [], [v+1], w + return T1, T2, T3 + return ctx.hypercomb(h, [u,v], **kwargs) + +@defun +def ber(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBer2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos, sin = ctx.cospi_sinpi(-0.75*n) + T1 = [cos, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r + T2 = [sin, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r + return T1, T2 + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def bei(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinBei2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos, sin = ctx.cospi_sinpi(0.75*n) + T1 = [cos, z/2], [1, n+2], [], [n+2], [], [1.5, 0.5*(n+3), 0.5*n+1], r + T2 = [sin, z/2], [1, n], [], [n+1], [], [0.5, 0.5*(n+1), 0.5*n+1], r + return T1, T2 + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def ker(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKer2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos1, sin1 = ctx.cospi_sinpi(0.25*n) + cos2, sin2 = ctx.cospi_sinpi(0.75*n) + T1 = [2, z, 4*cos1], [-n-3, n, 1], [-n], [], [], [0.5, 0.5*(1+n), 0.5*(n+2)], r + T2 = [2, z, -sin1], [-n-3, 2+n, 1], [-n-1], [], [], [1.5, 0.5*(3+n), 0.5*(n+2)], r + T3 = [2, z, 4*cos2], [n-3, -n, 1], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r + T4 = [2, z, -sin2], [n-3, 2-n, 1], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r + return T1, T2, T3, T4 + return ctx.hypercomb(h, [n], **kwargs) + +@defun +def kei(ctx, n, z, **kwargs): + n = ctx.convert(n) + z = ctx.convert(z) + # http://functions.wolfram.com/Bessel-TypeFunctions/KelvinKei2/26/01/02/0001/ + def h(n): + r = -(z/4)**4 + cos1, sin1 = ctx.cospi_sinpi(0.75*n) + cos2, sin2 = ctx.cospi_sinpi(0.25*n) + T1 = [-cos1, 2, z], [1, n-3, 2-n], [n-1], [], [], [1.5, 0.5*(3-n), 1-0.5*n], r + T2 = [-sin1, 2, z], [1, n-1, -n], [n], [], [], [0.5, 0.5*(1-n), 1-0.5*n], r + T3 = [-sin2, 2, z], [1, -n-1, n], [-n], [], [], [0.5, 0.5*(n+1), 0.5*(n+2)], r + T4 = [-cos2, 2, z], [1, -n-3, n+2], [-n-1], [], [], [1.5, 0.5*(n+3), 0.5*(n+2)], r + return T1, T2, T3, T4 + return ctx.hypercomb(h, [n], **kwargs) + +# TODO: do this more generically? +def c_memo(f): + name = f.__name__ + def f_wrapped(ctx): + cache = ctx._misc_const_cache + prec = ctx.prec + p,v = cache.get(name, (-1,0)) + if p >= prec: + return +v + else: + cache[name] = (prec, f(ctx)) + return cache[name][1] + return f_wrapped + +@c_memo +def _airyai_C1(ctx): + return 1 / (ctx.cbrt(9) * ctx.gamma(ctx.mpf(2)/3)) + +@c_memo +def _airyai_C2(ctx): + return -1 / (ctx.cbrt(3) * ctx.gamma(ctx.mpf(1)/3)) + +@c_memo +def _airybi_C1(ctx): + return 1 / (ctx.nthroot(3,6) * ctx.gamma(ctx.mpf(2)/3)) + +@c_memo +def _airybi_C2(ctx): + return ctx.nthroot(3,6) / ctx.gamma(ctx.mpf(1)/3) + +def _airybi_n2_inf(ctx): + prec = ctx.prec + try: + v = ctx.power(3,'2/3')*ctx.gamma('2/3')/(2*ctx.pi) + finally: + ctx.prec = prec + return +v + +# Derivatives at z = 0 +# TODO: could be expressed more elegantly using triple factorials +def _airyderiv_0(ctx, z, n, ntype, which): + if ntype == 'Z': + if n < 0: + return z + r = ctx.mpq_1_3 + prec = ctx.prec + try: + ctx.prec += 10 + v = ctx.gamma((n+1)*r) * ctx.power(3,n*r) / ctx.pi + if which == 0: + v *= ctx.sinpi(2*(n+1)*r) + v /= ctx.power(3,'2/3') + else: + v *= abs(ctx.sinpi(2*(n+1)*r)) + v /= ctx.power(3,'1/6') + finally: + ctx.prec = prec + return +v + z + else: + # singular (does the limit exist?) + raise NotImplementedError + +@defun +def airyai(ctx, z, derivative=0, **kwargs): + z = ctx.convert(z) + if derivative: + n, ntype = ctx._convert_param(derivative) + else: + n = 0 + # Values at infinities + if not ctx.isnormal(z) and z: + if n and ntype == 'Z': + if n == -1: + if z == ctx.inf: + return ctx.mpf(1)/3 + 1/z + if z == ctx.ninf: + return ctx.mpf(-2)/3 + 1/z + if n < -1: + if z == ctx.inf: + return z + if z == ctx.ninf: + return (-1)**n * (-z) + if (not n) and z == ctx.inf or z == ctx.ninf: + return 1/z + # TODO: limits + raise ValueError("essential singularity of Ai(z)") + # Account for exponential scaling + if z: + extraprec = max(0, int(1.5*ctx.mag(z))) + else: + extraprec = 0 + if n: + if n == 1: + def h(): + # http://functions.wolfram.com/03.07.06.0005.01 + if ctx._re(z) > 4: + ctx.prec += extraprec + w = z**1.5; r = -0.75/w; u = -2*w/3 + ctx.prec -= extraprec + C = -ctx.exp(u)/(2*ctx.sqrt(ctx.pi))*ctx.nthroot(z,4) + return ([C],[1],[],[],[(-1,6),(7,6)],[],r), + # http://functions.wolfram.com/03.07.26.0001.01 + else: + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airyai_C1(ctx) * 0.5 + C2 = _airyai_C2(ctx) + T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w + T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + else: + if z == 0: + return _airyderiv_0(ctx, z, n, ntype, 0) + # http://functions.wolfram.com/03.05.20.0004.01 + def h(n): + ctx.prec += extraprec + w = z**3/9 + ctx.prec -= extraprec + q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3 + a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13 + T1 = [3, z], [n-q23, -n], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13 + T2 = [3, z, -z], [n-q43, -n, 1], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + return T1, T2 + v = ctx.hypercomb(h, [n], **kwargs) + if ctx._is_real_type(z) and ctx.isint(n): + v = ctx._re(v) + return v + else: + def h(): + if ctx._re(z) > 4: + # We could use 1F1, but it results in huge cancellation; + # the following expansion is better. + # TODO: asymptotic series for derivatives + ctx.prec += extraprec + w = z**1.5; r = -0.75/w; u = -2*w/3 + ctx.prec -= extraprec + C = ctx.exp(u)/(2*ctx.sqrt(ctx.pi)*ctx.nthroot(z,4)) + return ([C],[1],[],[],[(1,6),(5,6)],[],r), + else: + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airyai_C1(ctx) + C2 = _airyai_C2(ctx) + T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w + T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + +@defun +def airybi(ctx, z, derivative=0, **kwargs): + z = ctx.convert(z) + if derivative: + n, ntype = ctx._convert_param(derivative) + else: + n = 0 + # Values at infinities + if not ctx.isnormal(z) and z: + if n and ntype == 'Z': + if z == ctx.inf: + return z + if z == ctx.ninf: + if n == -1: + return 1/z + if n == -2: + return _airybi_n2_inf(ctx) + if n < -2: + return (-1)**n * (-z) + if not n: + if z == ctx.inf: + return z + if z == ctx.ninf: + return 1/z + # TODO: limits + raise ValueError("essential singularity of Bi(z)") + if z: + extraprec = max(0, int(1.5*ctx.mag(z))) + else: + extraprec = 0 + if n: + if n == 1: + # http://functions.wolfram.com/03.08.26.0001.01 + def h(): + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airybi_C1(ctx)*0.5 + C2 = _airybi_C2(ctx) + T1 = [C1,z],[1,2],[],[],[],[ctx.mpq_5_3],w + T2 = [C2],[1],[],[],[],[ctx.mpq_1_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + else: + if z == 0: + return _airyderiv_0(ctx, z, n, ntype, 1) + def h(n): + ctx.prec += extraprec + w = z**3/9 + ctx.prec -= extraprec + q13,q23,q43 = ctx.mpq_1_3, ctx.mpq_2_3, ctx.mpq_4_3 + q16 = ctx.mpq_1_6 + q56 = ctx.mpq_5_6 + a1=q13; a2=1; b1=(1-n)*q13; b2=(2-n)*q13; b3=1-n*q13 + T1 = [3, z], [n-q16, -n], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + a1=q23; b1=(2-n)*q13; b2=1-n*q13; b3=(4-n)*q13 + T2 = [3, z], [n-q56, 1-n], [a1], [b1,b2,b3], \ + [a1,a2], [b1,b2,b3], w + return T1, T2 + v = ctx.hypercomb(h, [n], **kwargs) + if ctx._is_real_type(z) and ctx.isint(n): + v = ctx._re(v) + return v + else: + def h(): + ctx.prec += extraprec + w = z**3 / 9 + ctx.prec -= extraprec + C1 = _airybi_C1(ctx) + C2 = _airybi_C2(ctx) + T1 = [C1],[1],[],[],[],[ctx.mpq_2_3],w + T2 = [z*C2],[1],[],[],[],[ctx.mpq_4_3],w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + +def _airy_zero(ctx, which, k, derivative, complex=False): + # Asymptotic formulas are given in DLMF section 9.9 + def U(t): return t**(2/3.)*(1-7/(t**2*48)) + def T(t): return t**(2/3.)*(1+5/(t**2*48)) + k = int(k) + if k < 1: + raise ValueError("k cannot be less than 1") + if not derivative in (0,1): + raise ValueError("Derivative should lie between 0 and 1") + if which == 0: + if derivative: + return ctx.findroot(lambda z: ctx.airyai(z,1), + -U(3*ctx.pi*(4*k-3)/8)) + return ctx.findroot(ctx.airyai, -T(3*ctx.pi*(4*k-1)/8)) + if which == 1 and complex == False: + if derivative: + return ctx.findroot(lambda z: ctx.airybi(z,1), + -U(3*ctx.pi*(4*k-1)/8)) + return ctx.findroot(ctx.airybi, -T(3*ctx.pi*(4*k-3)/8)) + if which == 1 and complex == True: + if derivative: + t = 3*ctx.pi*(4*k-3)/8 + 0.75j*ctx.ln2 + s = ctx.expjpi(ctx.mpf(1)/3) * T(t) + return ctx.findroot(lambda z: ctx.airybi(z,1), s) + t = 3*ctx.pi*(4*k-1)/8 + 0.75j*ctx.ln2 + s = ctx.expjpi(ctx.mpf(1)/3) * U(t) + return ctx.findroot(ctx.airybi, s) + +@defun +def airyaizero(ctx, k, derivative=0): + return _airy_zero(ctx, 0, k, derivative, False) + +@defun +def airybizero(ctx, k, derivative=0, complex=False): + return _airy_zero(ctx, 1, k, derivative, complex) + +def _scorer(ctx, z, which, kwargs): + z = ctx.convert(z) + if ctx.isinf(z): + if z == ctx.inf: + if which == 0: return 1/z + if which == 1: return z + if z == ctx.ninf: + return 1/z + raise ValueError("essential singularity") + if z: + extraprec = max(0, int(1.5*ctx.mag(z))) + else: + extraprec = 0 + if kwargs.get('derivative'): + raise NotImplementedError + # Direct asymptotic expansions, to avoid + # exponentially large cancellation + try: + if ctx.mag(z) > 3: + if which == 0 and abs(ctx.arg(z)) < ctx.pi/3 * 0.999: + def h(): + return (([ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),) + return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True) + if which == 1 and abs(ctx.arg(-z)) < 2*ctx.pi/3 * 0.999: + def h(): + return (([-ctx.pi,z],[-1,-1],[],[],[(1,3),(2,3),1],[],9/z**3),) + return ctx.hypercomb(h, [], maxterms=ctx.prec, force_series=True) + except ctx.NoConvergence: + pass + def h(): + A = ctx.airybi(z, **kwargs)/3 + B = -2*ctx.pi + if which == 1: + A *= 2 + B *= -1 + ctx.prec += extraprec + w = z**3/9 + ctx.prec -= extraprec + T1 = [A], [1], [], [], [], [], 0 + T2 = [B,z], [-1,2], [], [], [1], [ctx.mpq_4_3,ctx.mpq_5_3], w + return T1, T2 + return ctx.hypercomb(h, [], **kwargs) + +@defun +def scorergi(ctx, z, **kwargs): + return _scorer(ctx, z, 0, kwargs) + +@defun +def scorerhi(ctx, z, **kwargs): + return _scorer(ctx, z, 1, kwargs) + +@defun_wrapped +def coulombc(ctx, l, eta, _cache={}): + if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec: + return +_cache[l,eta][1] + G3 = ctx.loggamma(2*l+2) + G1 = ctx.loggamma(1+l+ctx.j*eta) + G2 = ctx.loggamma(1+l-ctx.j*eta) + v = 2**l * ctx.exp((-ctx.pi*eta+G1+G2)/2 - G3) + if not (ctx.im(l) or ctx.im(eta)): + v = ctx.re(v) + _cache[l,eta] = (ctx.prec, v) + return v + +@defun_wrapped +def coulombf(ctx, l, eta, z, w=1, chop=True, **kwargs): + # Regular Coulomb wave function + # Note: w can be either 1 or -1; the other may be better in some cases + # TODO: check that chop=True chops when and only when it should + #ctx.prec += 10 + def h(l, eta): + try: + jw = ctx.j*w + jwz = ctx.fmul(jw, z, exact=True) + jwz2 = ctx.fmul(jwz, -2, exact=True) + C = ctx.coulombc(l, eta) + T1 = [C, z, ctx.exp(jwz)], [1, l+1, 1], [], [], [1+l+jw*eta], \ + [2*l+2], jwz2 + except ValueError: + T1 = [0], [-1], [], [], [], [], 0 + return (T1,) + v = ctx.hypercomb(h, [l,eta], **kwargs) + if chop and (not ctx.im(l)) and (not ctx.im(eta)) and (not ctx.im(z)) and \ + (ctx.re(z) >= 0): + v = ctx.re(v) + return v + +@defun_wrapped +def _coulomb_chi(ctx, l, eta, _cache={}): + if (l, eta) in _cache and _cache[l,eta][0] >= ctx.prec: + return _cache[l,eta][1] + def terms(): + l2 = -l-1 + jeta = ctx.j*eta + return [ctx.loggamma(1+l+jeta) * (-0.5j), + ctx.loggamma(1+l-jeta) * (0.5j), + ctx.loggamma(1+l2+jeta) * (0.5j), + ctx.loggamma(1+l2-jeta) * (-0.5j), + -(l+0.5)*ctx.pi] + v = ctx.sum_accurately(terms, 1) + _cache[l,eta] = (ctx.prec, v) + return v + +@defun_wrapped +def coulombg(ctx, l, eta, z, w=1, chop=True, **kwargs): + # Irregular Coulomb wave function + # Note: w can be either 1 or -1; the other may be better in some cases + # TODO: check that chop=True chops when and only when it should + if not ctx._im(l): + l = ctx._re(l) # XXX: for isint + def h(l, eta): + # Force perturbation for integers and half-integers + if ctx.isint(l*2): + T1 = [0], [-1], [], [], [], [], 0 + return (T1,) + l2 = -l-1 + try: + chi = ctx._coulomb_chi(l, eta) + jw = ctx.j*w + s = ctx.sin(chi); c = ctx.cos(chi) + C1 = ctx.coulombc(l,eta) + C2 = ctx.coulombc(l2,eta) + u = ctx.exp(jw*z) + x = -2*jw*z + T1 = [s, C1, z, u, c], [-1, 1, l+1, 1, 1], [], [], \ + [1+l+jw*eta], [2*l+2], x + T2 = [-s, C2, z, u], [-1, 1, l2+1, 1], [], [], \ + [1+l2+jw*eta], [2*l2+2], x + return T1, T2 + except ValueError: + T1 = [0], [-1], [], [], [], [], 0 + return (T1,) + v = ctx.hypercomb(h, [l,eta], **kwargs) + if chop and (not ctx._im(l)) and (not ctx._im(eta)) and (not ctx._im(z)) and \ + (ctx._re(z) >= 0): + v = ctx._re(v) + return v + +def mcmahon(ctx,kind,prime,v,m): + """ + Computes an estimate for the location of the Bessel function zero + j_{v,m}, y_{v,m}, j'_{v,m} or y'_{v,m} using McMahon's asymptotic + expansion (Abramowitz & Stegun 9.5.12-13, DLMF 20.21(vi)). + + Returns (r,err) where r is the estimated location of the root + and err is a positive number estimating the error of the + asymptotic expansion. + """ + u = 4*v**2 + if kind == 1 and not prime: b = (4*m+2*v-1)*ctx.pi/4 + if kind == 2 and not prime: b = (4*m+2*v-3)*ctx.pi/4 + if kind == 1 and prime: b = (4*m+2*v-3)*ctx.pi/4 + if kind == 2 and prime: b = (4*m+2*v-1)*ctx.pi/4 + if not prime: + s1 = b + s2 = -(u-1)/(8*b) + s3 = -4*(u-1)*(7*u-31)/(3*(8*b)**3) + s4 = -32*(u-1)*(83*u**2-982*u+3779)/(15*(8*b)**5) + s5 = -64*(u-1)*(6949*u**3-153855*u**2+1585743*u-6277237)/(105*(8*b)**7) + if prime: + s1 = b + s2 = -(u+3)/(8*b) + s3 = -4*(7*u**2+82*u-9)/(3*(8*b)**3) + s4 = -32*(83*u**3+2075*u**2-3039*u+3537)/(15*(8*b)**5) + s5 = -64*(6949*u**4+296492*u**3-1248002*u**2+7414380*u-5853627)/(105*(8*b)**7) + terms = [s1,s2,s3,s4,s5] + s = s1 + err = 0.0 + for i in range(1,len(terms)): + if abs(terms[i]) < abs(terms[i-1]): + s += terms[i] + else: + err = abs(terms[i]) + if i == len(terms)-1: + err = abs(terms[-1]) + return s, err + +def generalized_bisection(ctx,f,a,b,n): + """ + Given f known to have exactly n simple roots within [a,b], + return a list of n intervals isolating the roots + and having opposite signs at the endpoints. + + TODO: this can be optimized, e.g. by reusing evaluation points. + """ + if n < 1: + raise ValueError("n cannot be less than 1") + N = n+1 + points = [] + signs = [] + while 1: + points = ctx.linspace(a,b,N) + signs = [ctx.sign(f(x)) for x in points] + ok_intervals = [(points[i],points[i+1]) for i in range(N-1) \ + if signs[i]*signs[i+1] == -1] + if len(ok_intervals) == n: + return ok_intervals + N = N*2 + +def find_in_interval(ctx, f, ab): + return ctx.findroot(f, ab, solver='illinois', verify=False) + +def bessel_zero(ctx, kind, prime, v, m, isoltol=0.01, _interval_cache={}): + prec = ctx.prec + workprec = max(prec, ctx.mag(v), ctx.mag(m))+10 + try: + ctx.prec = workprec + v = ctx.mpf(v) + m = int(m) + prime = int(prime) + if v < 0: + raise ValueError("v cannot be negative") + if m < 1: + raise ValueError("m cannot be less than 1") + if not prime in (0,1): + raise ValueError("prime should lie between 0 and 1") + if kind == 1: + if prime: f = lambda x: ctx.besselj(v,x,derivative=1) + else: f = lambda x: ctx.besselj(v,x) + if kind == 2: + if prime: f = lambda x: ctx.bessely(v,x,derivative=1) + else: f = lambda x: ctx.bessely(v,x) + # The first root of J' is very close to 0 for small + # orders, and this needs to be special-cased + if kind == 1 and prime and m == 1: + if v == 0: + return ctx.zero + if v <= 1: + # TODO: use v <= j'_{v,1} < y_{v,1}? + r = 2*ctx.sqrt(v*(1+v)/(v+2)) + return find_in_interval(ctx, f, (r/10, 2*r)) + if (kind,prime,v,m) in _interval_cache: + return find_in_interval(ctx, f, _interval_cache[kind,prime,v,m]) + r, err = mcmahon(ctx, kind, prime, v, m) + if err < isoltol: + return find_in_interval(ctx, f, (r-isoltol, r+isoltol)) + # An x such that 0 < x < r_{v,1} + if kind == 1 and not prime: low = 2.4 + if kind == 1 and prime: low = 1.8 + if kind == 2 and not prime: low = 0.8 + if kind == 2 and prime: low = 2.0 + n = m+1 + while 1: + r1, err = mcmahon(ctx, kind, prime, v, n) + if err < isoltol: + r2, err2 = mcmahon(ctx, kind, prime, v, n+1) + intervals = generalized_bisection(ctx, f, low, 0.5*(r1+r2), n) + for k, ab in enumerate(intervals): + _interval_cache[kind,prime,v,k+1] = ab + return find_in_interval(ctx, f, intervals[m-1]) + else: + n = n*2 + finally: + ctx.prec = prec + +@defun +def besseljzero(ctx, v, m, derivative=0): + r""" + For a real order `\nu \ge 0` and a positive integer `m`, returns + `j_{\nu,m}`, the `m`-th positive zero of the Bessel function of the + first kind `J_{\nu}(z)` (see :func:`~mpmath.besselj`). Alternatively, + with *derivative=1*, gives the first nonnegative simple zero + `j'_{\nu,m}` of `J'_{\nu}(z)`. + + The indexing convention is that used by Abramowitz & Stegun + and the DLMF. Note the special case `j'_{0,1} = 0`, while all other + zeros are positive. In effect, only simple zeros are counted + (all zeros of Bessel functions are simple except possibly `z = 0`) + and `j_{\nu,m}` becomes a monotonic function of both `\nu` + and `m`. + + The zeros are interlaced according to the inequalities + + .. math :: + + j'_{\nu,k} < j_{\nu,k} < j'_{\nu,k+1} + + j_{\nu,1} < j_{\nu+1,2} < j_{\nu,2} < j_{\nu+1,2} < j_{\nu,3} < \cdots + + **Examples** + + Initial zeros of the Bessel functions `J_0(z), J_1(z), J_2(z)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besseljzero(0,1); besseljzero(0,2); besseljzero(0,3) + 2.404825557695772768621632 + 5.520078110286310649596604 + 8.653727912911012216954199 + >>> besseljzero(1,1); besseljzero(1,2); besseljzero(1,3) + 3.831705970207512315614436 + 7.01558666981561875353705 + 10.17346813506272207718571 + >>> besseljzero(2,1); besseljzero(2,2); besseljzero(2,3) + 5.135622301840682556301402 + 8.417244140399864857783614 + 11.61984117214905942709415 + + Initial zeros of `J'_0(z), J'_1(z), J'_2(z)`:: + + 0.0 + 3.831705970207512315614436 + 7.01558666981561875353705 + >>> besseljzero(1,1,1); besseljzero(1,2,1); besseljzero(1,3,1) + 1.84118378134065930264363 + 5.331442773525032636884016 + 8.536316366346285834358961 + >>> besseljzero(2,1,1); besseljzero(2,2,1); besseljzero(2,3,1) + 3.054236928227140322755932 + 6.706133194158459146634394 + 9.969467823087595793179143 + + Zeros with large index:: + + >>> besseljzero(0,100); besseljzero(0,1000); besseljzero(0,10000) + 313.3742660775278447196902 + 3140.807295225078628895545 + 31415.14114171350798533666 + >>> besseljzero(5,100); besseljzero(5,1000); besseljzero(5,10000) + 321.1893195676003157339222 + 3148.657306813047523500494 + 31422.9947255486291798943 + >>> besseljzero(0,100,1); besseljzero(0,1000,1); besseljzero(0,10000,1) + 311.8018681873704508125112 + 3139.236339643802482833973 + 31413.57032947022399485808 + + Zeros of functions with large order:: + + >>> besseljzero(50,1) + 57.11689916011917411936228 + >>> besseljzero(50,2) + 62.80769876483536093435393 + >>> besseljzero(50,100) + 388.6936600656058834640981 + >>> besseljzero(50,1,1) + 52.99764038731665010944037 + >>> besseljzero(50,2,1) + 60.02631933279942589882363 + >>> besseljzero(50,100,1) + 387.1083151608726181086283 + + Zeros of functions with fractional order:: + + >>> besseljzero(0.5,1); besseljzero(1.5,1); besseljzero(2.25,4) + 3.141592653589793238462643 + 4.493409457909064175307881 + 15.15657692957458622921634 + + Both `J_{\nu}(z)` and `J'_{\nu}(z)` can be expressed as infinite + products over their zeros:: + + >>> v,z = 2, mpf(1) + >>> (z/2)**v/gamma(v+1) * \ + ... nprod(lambda k: 1-(z/besseljzero(v,k))**2, [1,inf]) + ... + 0.1149034849319004804696469 + >>> besselj(v,z) + 0.1149034849319004804696469 + >>> (z/2)**(v-1)/2/gamma(v) * \ + ... nprod(lambda k: 1-(z/besseljzero(v,k,1))**2, [1,inf]) + ... + 0.2102436158811325550203884 + >>> besselj(v,z,1) + 0.2102436158811325550203884 + + """ + return +bessel_zero(ctx, 1, derivative, v, m) + +@defun +def besselyzero(ctx, v, m, derivative=0): + r""" + For a real order `\nu \ge 0` and a positive integer `m`, returns + `y_{\nu,m}`, the `m`-th positive zero of the Bessel function of the + second kind `Y_{\nu}(z)` (see :func:`~mpmath.bessely`). Alternatively, + with *derivative=1*, gives the first positive zero `y'_{\nu,m}` of + `Y'_{\nu}(z)`. + + The zeros are interlaced according to the inequalities + + .. math :: + + y_{\nu,k} < y'_{\nu,k} < y_{\nu,k+1} + + y_{\nu,1} < y_{\nu+1,2} < y_{\nu,2} < y_{\nu+1,2} < y_{\nu,3} < \cdots + + **Examples** + + Initial zeros of the Bessel functions `Y_0(z), Y_1(z), Y_2(z)`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> besselyzero(0,1); besselyzero(0,2); besselyzero(0,3) + 0.8935769662791675215848871 + 3.957678419314857868375677 + 7.086051060301772697623625 + >>> besselyzero(1,1); besselyzero(1,2); besselyzero(1,3) + 2.197141326031017035149034 + 5.429681040794135132772005 + 8.596005868331168926429606 + >>> besselyzero(2,1); besselyzero(2,2); besselyzero(2,3) + 3.384241767149593472701426 + 6.793807513268267538291167 + 10.02347797936003797850539 + + Initial zeros of `Y'_0(z), Y'_1(z), Y'_2(z)`:: + + >>> besselyzero(0,1,1); besselyzero(0,2,1); besselyzero(0,3,1) + 2.197141326031017035149034 + 5.429681040794135132772005 + 8.596005868331168926429606 + >>> besselyzero(1,1,1); besselyzero(1,2,1); besselyzero(1,3,1) + 3.683022856585177699898967 + 6.941499953654175655751944 + 10.12340465543661307978775 + >>> besselyzero(2,1,1); besselyzero(2,2,1); besselyzero(2,3,1) + 5.002582931446063945200176 + 8.350724701413079526349714 + 11.57419546521764654624265 + + Zeros with large index:: + + >>> besselyzero(0,100); besselyzero(0,1000); besselyzero(0,10000) + 311.8034717601871549333419 + 3139.236498918198006794026 + 31413.57034538691205229188 + >>> besselyzero(5,100); besselyzero(5,1000); besselyzero(5,10000) + 319.6183338562782156235062 + 3147.086508524556404473186 + 31421.42392920214673402828 + >>> besselyzero(0,100,1); besselyzero(0,1000,1); besselyzero(0,10000,1) + 313.3726705426359345050449 + 3140.807136030340213610065 + 31415.14112579761578220175 + + Zeros of functions with large order:: + + >>> besselyzero(50,1) + 53.50285882040036394680237 + >>> besselyzero(50,2) + 60.11244442774058114686022 + >>> besselyzero(50,100) + 387.1096509824943957706835 + >>> besselyzero(50,1,1) + 56.96290427516751320063605 + >>> besselyzero(50,2,1) + 62.74888166945933944036623 + >>> besselyzero(50,100,1) + 388.6923300548309258355475 + + Zeros of functions with fractional order:: + + >>> besselyzero(0.5,1); besselyzero(1.5,1); besselyzero(2.25,4) + 1.570796326794896619231322 + 2.798386045783887136720249 + 13.56721208770735123376018 + + """ + return +bessel_zero(ctx, 2, derivative, v, m) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/elliptic.py b/.venv/lib/python3.11/site-packages/mpmath/functions/elliptic.py new file mode 100644 index 0000000000000000000000000000000000000000..1e198697fa042b7cc8bcba9e9e770f5c8106dad6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/elliptic.py @@ -0,0 +1,1431 @@ +r""" +Elliptic functions historically comprise the elliptic integrals +and their inverses, and originate from the problem of computing the +arc length of an ellipse. From a more modern point of view, +an elliptic function is defined as a doubly periodic function, i.e. +a function which satisfies + +.. math :: + + f(z + 2 \omega_1) = f(z + 2 \omega_2) = f(z) + +for some half-periods `\omega_1, \omega_2` with +`\mathrm{Im}[\omega_1 / \omega_2] > 0`. The canonical elliptic +functions are the Jacobi elliptic functions. More broadly, this section +includes quasi-doubly periodic functions (such as the Jacobi theta +functions) and other functions useful in the study of elliptic functions. + +Many different conventions for the arguments of +elliptic functions are in use. It is even standard to use +different parameterizations for different functions in the same +text or software (and mpmath is no exception). +The usual parameters are the elliptic nome `q`, which usually +must satisfy `|q| < 1`; the elliptic parameter `m` (an arbitrary +complex number); the elliptic modulus `k` (an arbitrary complex +number); and the half-period ratio `\tau`, which usually must +satisfy `\mathrm{Im}[\tau] > 0`. +These quantities can be expressed in terms of each other +using the following relations: + +.. math :: + + m = k^2 + +.. math :: + + \tau = i \frac{K(1-m)}{K(m)} + +.. math :: + + q = e^{i \pi \tau} + +.. math :: + + k = \frac{\vartheta_2^2(q)}{\vartheta_3^2(q)} + +In addition, an alternative definition is used for the nome in +number theory, which we here denote by q-bar: + +.. math :: + + \bar{q} = q^2 = e^{2 i \pi \tau} + +For convenience, mpmath provides functions to convert +between the various parameters (:func:`~mpmath.qfrom`, :func:`~mpmath.mfrom`, +:func:`~mpmath.kfrom`, :func:`~mpmath.taufrom`, :func:`~mpmath.qbarfrom`). + +**References** + +1. [AbramowitzStegun]_ + +2. [WhittakerWatson]_ + +""" + +from .functions import defun, defun_wrapped + +@defun_wrapped +def eta(ctx, tau): + r""" + Returns the Dedekind eta function of tau in the upper half-plane. + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> eta(1j); gamma(0.25) / (2*pi**0.75) + (0.7682254223260566590025942 + 0.0j) + 0.7682254223260566590025942 + >>> tau = sqrt(2) + sqrt(5)*1j + >>> eta(-1/tau); sqrt(-1j*tau) * eta(tau) + (0.9022859908439376463573294 + 0.07985093673948098408048575j) + (0.9022859908439376463573295 + 0.07985093673948098408048575j) + >>> eta(tau+1); exp(pi*1j/12) * eta(tau) + (0.4493066139717553786223114 + 0.3290014793877986663915939j) + (0.4493066139717553786223114 + 0.3290014793877986663915939j) + >>> f = lambda z: diff(eta, z) / eta(z) + >>> chop(36*diff(f,tau)**2 - 24*diff(f,tau,2)*f(tau) + diff(f,tau,3)) + 0.0 + + """ + if ctx.im(tau) <= 0.0: + raise ValueError("eta is only defined in the upper half-plane") + q = ctx.expjpi(tau/12) + return q * ctx.qp(q**24) + +def nome(ctx, m): + m = ctx.convert(m) + if not m: + return m + if m == ctx.one: + return m + if ctx.isnan(m): + return m + if ctx.isinf(m): + if m == ctx.ninf: + return type(m)(-1) + else: + return ctx.mpc(-1) + a = ctx.ellipk(ctx.one-m) + b = ctx.ellipk(m) + v = ctx.exp(-ctx.pi*a/b) + if not ctx._im(m) and ctx._re(m) < 1: + if ctx._is_real_type(m): + return v.real + else: + return v.real + 0j + elif m == 2: + v = ctx.mpc(0, v.imag) + return v + +@defun_wrapped +def qfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic nome `q`, given any of `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qfrom(q=0.25) + 0.25 + >>> qfrom(m=mfrom(q=0.25)) + 0.25 + >>> qfrom(k=kfrom(q=0.25)) + 0.25 + >>> qfrom(tau=taufrom(q=0.25)) + (0.25 + 0.0j) + >>> qfrom(qbar=qbarfrom(q=0.25)) + 0.25 + + """ + if q is not None: + return ctx.convert(q) + if m is not None: + return nome(ctx, m) + if k is not None: + return nome(ctx, ctx.convert(k)**2) + if tau is not None: + return ctx.expjpi(tau) + if qbar is not None: + return ctx.sqrt(qbar) + +@defun_wrapped +def qbarfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the number-theoretic nome `\bar q`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qbarfrom(qbar=0.25) + 0.25 + >>> qbarfrom(q=qfrom(qbar=0.25)) + 0.25 + >>> qbarfrom(m=extraprec(20)(mfrom)(qbar=0.25)) # ill-conditioned + 0.25 + >>> qbarfrom(k=extraprec(20)(kfrom)(qbar=0.25)) # ill-conditioned + 0.25 + >>> qbarfrom(tau=taufrom(qbar=0.25)) + (0.25 + 0.0j) + + """ + if qbar is not None: + return ctx.convert(qbar) + if q is not None: + return ctx.convert(q) ** 2 + if m is not None: + return nome(ctx, m) ** 2 + if k is not None: + return nome(ctx, ctx.convert(k)**2) ** 2 + if tau is not None: + return ctx.expjpi(2*tau) + +@defun_wrapped +def taufrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic half-period ratio `\tau`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> taufrom(tau=0.5j) + (0.0 + 0.5j) + >>> taufrom(q=qfrom(tau=0.5j)) + (0.0 + 0.5j) + >>> taufrom(m=mfrom(tau=0.5j)) + (0.0 + 0.5j) + >>> taufrom(k=kfrom(tau=0.5j)) + (0.0 + 0.5j) + >>> taufrom(qbar=qbarfrom(tau=0.5j)) + (0.0 + 0.5j) + + """ + if tau is not None: + return ctx.convert(tau) + if m is not None: + m = ctx.convert(m) + return ctx.j*ctx.ellipk(1-m)/ctx.ellipk(m) + if k is not None: + k = ctx.convert(k) + return ctx.j*ctx.ellipk(1-k**2)/ctx.ellipk(k**2) + if q is not None: + return ctx.log(q) / (ctx.pi*ctx.j) + if qbar is not None: + qbar = ctx.convert(qbar) + return ctx.log(qbar) / (2*ctx.pi*ctx.j) + +@defun_wrapped +def kfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic modulus `k`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> kfrom(k=0.25) + 0.25 + >>> kfrom(m=mfrom(k=0.25)) + 0.25 + >>> kfrom(q=qfrom(k=0.25)) + 0.25 + >>> kfrom(tau=taufrom(k=0.25)) + (0.25 + 0.0j) + >>> kfrom(qbar=qbarfrom(k=0.25)) + 0.25 + + As `q \to 1` and `q \to -1`, `k` rapidly approaches + `1` and `i \infty` respectively:: + + >>> kfrom(q=0.75) + 0.9999999999999899166471767 + >>> kfrom(q=-0.75) + (0.0 + 7041781.096692038332790615j) + >>> kfrom(q=1) + 1 + >>> kfrom(q=-1) + (0.0 + +infj) + """ + if k is not None: + return ctx.convert(k) + if m is not None: + return ctx.sqrt(m) + if tau is not None: + q = ctx.expjpi(tau) + if qbar is not None: + q = ctx.sqrt(qbar) + if q == 1: + return q + if q == -1: + return ctx.mpc(0,'inf') + return (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**2 + +@defun_wrapped +def mfrom(ctx, q=None, m=None, k=None, tau=None, qbar=None): + r""" + Returns the elliptic parameter `m`, given any of + `q, m, k, \tau, \bar{q}`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> mfrom(m=0.25) + 0.25 + >>> mfrom(q=qfrom(m=0.25)) + 0.25 + >>> mfrom(k=kfrom(m=0.25)) + 0.25 + >>> mfrom(tau=taufrom(m=0.25)) + (0.25 + 0.0j) + >>> mfrom(qbar=qbarfrom(m=0.25)) + 0.25 + + As `q \to 1` and `q \to -1`, `m` rapidly approaches + `1` and `-\infty` respectively:: + + >>> mfrom(q=0.75) + 0.9999999999999798332943533 + >>> mfrom(q=-0.75) + -49586681013729.32611558353 + >>> mfrom(q=1) + 1.0 + >>> mfrom(q=-1) + -inf + + The inverse nome as a function of `q` has an integer + Taylor series expansion:: + + >>> taylor(lambda q: mfrom(q), 0, 7) + [0.0, 16.0, -128.0, 704.0, -3072.0, 11488.0, -38400.0, 117632.0] + + """ + if m is not None: + return m + if k is not None: + return k**2 + if tau is not None: + q = ctx.expjpi(tau) + if qbar is not None: + q = ctx.sqrt(qbar) + if q == 1: + return ctx.convert(q) + if q == -1: + return q*ctx.inf + v = (ctx.jtheta(2,0,q)/ctx.jtheta(3,0,q))**4 + if ctx._is_real_type(q) and q < 0: + v = v.real + return v + +jacobi_spec = { + 'sn' : ([3],[2],[1],[4], 'sin', 'tanh'), + 'cn' : ([4],[2],[2],[4], 'cos', 'sech'), + 'dn' : ([4],[3],[3],[4], '1', 'sech'), + 'ns' : ([2],[3],[4],[1], 'csc', 'coth'), + 'nc' : ([2],[4],[4],[2], 'sec', 'cosh'), + 'nd' : ([3],[4],[4],[3], '1', 'cosh'), + 'sc' : ([3],[4],[1],[2], 'tan', 'sinh'), + 'sd' : ([3,3],[2,4],[1],[3], 'sin', 'sinh'), + 'cd' : ([3],[2],[2],[3], 'cos', '1'), + 'cs' : ([4],[3],[2],[1], 'cot', 'csch'), + 'dc' : ([2],[3],[3],[2], 'sec', '1'), + 'ds' : ([2,4],[3,3],[3],[1], 'csc', 'csch'), + 'cc' : None, + 'ss' : None, + 'nn' : None, + 'dd' : None +} + +@defun +def ellipfun(ctx, kind, u=None, m=None, q=None, k=None, tau=None): + try: + S = jacobi_spec[kind] + except KeyError: + raise ValueError("First argument must be a two-character string " + "containing 's', 'c', 'd' or 'n', e.g.: 'sn'") + if u is None: + def f(*args, **kwargs): + return ctx.ellipfun(kind, *args, **kwargs) + f.__name__ = kind + return f + prec = ctx.prec + try: + ctx.prec += 10 + u = ctx.convert(u) + q = ctx.qfrom(m=m, q=q, k=k, tau=tau) + if S is None: + v = ctx.one + 0*q*u + elif q == ctx.zero: + if S[4] == '1': v = ctx.one + else: v = getattr(ctx, S[4])(u) + v += 0*q*u + elif q == ctx.one: + if S[5] == '1': v = ctx.one + else: v = getattr(ctx, S[5])(u) + v += 0*q*u + else: + t = u / ctx.jtheta(3, 0, q)**2 + v = ctx.one + for a in S[0]: v *= ctx.jtheta(a, 0, q) + for b in S[1]: v /= ctx.jtheta(b, 0, q) + for c in S[2]: v *= ctx.jtheta(c, t, q) + for d in S[3]: v /= ctx.jtheta(d, t, q) + finally: + ctx.prec = prec + return +v + +@defun_wrapped +def kleinj(ctx, tau=None, **kwargs): + r""" + Evaluates the Klein j-invariant, which is a modular function defined for + `\tau` in the upper half-plane as + + .. math :: + + J(\tau) = \frac{g_2^3(\tau)}{g_2^3(\tau) - 27 g_3^2(\tau)} + + where `g_2` and `g_3` are the modular invariants of the Weierstrass + elliptic function, + + .. math :: + + g_2(\tau) = 60 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-4} + + g_3(\tau) = 140 \sum_{(m,n) \in \mathbb{Z}^2 \setminus (0,0)} (m \tau+n)^{-6}. + + An alternative, common notation is that of the j-function + `j(\tau) = 1728 J(\tau)`. + + **Plots** + + .. literalinclude :: /plots/kleinj.py + .. image :: /plots/kleinj.png + .. literalinclude :: /plots/kleinj2.py + .. image :: /plots/kleinj2.png + + **Examples** + + Verifying the functional equation `J(\tau) = J(\tau+1) = J(-\tau^{-1})`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> tau = 0.625+0.75*j + >>> tau = 0.625+0.75*j + >>> kleinj(tau) + (-0.1507492166511182267125242 + 0.07595948379084571927228948j) + >>> kleinj(tau+1) + (-0.1507492166511182267125242 + 0.07595948379084571927228948j) + >>> kleinj(-1/tau) + (-0.1507492166511182267125242 + 0.07595948379084571927228946j) + + The j-function has a famous Laurent series expansion in terms of the nome + `\bar{q}`, `j(\tau) = \bar{q}^{-1} + 744 + 196884\bar{q} + \ldots`:: + + >>> mp.dps = 15 + >>> taylor(lambda q: 1728*q*kleinj(qbar=q), 0, 5, singular=True) + [1.0, 744.0, 196884.0, 21493760.0, 864299970.0, 20245856256.0] + + The j-function admits exact evaluation at special algebraic points + related to the Heegner numbers 1, 2, 3, 7, 11, 19, 43, 67, 163:: + + >>> @extraprec(10) + ... def h(n): + ... v = (1+sqrt(n)*j) + ... if n > 2: + ... v *= 0.5 + ... return v + ... + >>> mp.dps = 25 + >>> for n in [1,2,3,7,11,19,43,67,163]: + ... n, chop(1728*kleinj(h(n))) + ... + (1, 1728.0) + (2, 8000.0) + (3, 0.0) + (7, -3375.0) + (11, -32768.0) + (19, -884736.0) + (43, -884736000.0) + (67, -147197952000.0) + (163, -262537412640768000.0) + + Also at other special points, the j-function assumes explicit + algebraic values, e.g.:: + + >>> chop(1728*kleinj(j*sqrt(5))) + 1264538.909475140509320227 + >>> identify(cbrt(_)) # note: not simplified + '((100+sqrt(13520))/2)' + >>> (50+26*sqrt(5))**3 + 1264538.909475140509320227 + + """ + q = ctx.qfrom(tau=tau, **kwargs) + t2 = ctx.jtheta(2,0,q) + t3 = ctx.jtheta(3,0,q) + t4 = ctx.jtheta(4,0,q) + P = (t2**8 + t3**8 + t4**8)**3 + Q = 54*(t2*t3*t4)**8 + return P/Q + + +def RF_calc(ctx, x, y, z, r): + if y == z: return RC_calc(ctx, x, y, r) + if x == z: return RC_calc(ctx, y, x, r) + if x == y: return RC_calc(ctx, z, x, r) + if not (ctx.isnormal(x) and ctx.isnormal(y) and ctx.isnormal(z)): + if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z): + return x*y*z + if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z): + return ctx.zero + xm,ym,zm = x,y,z + A0 = Am = (x+y+z)/3 + Q = ctx.root(3*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z)) + g = ctx.mpf(0.25) + pow4 = ctx.one + while 1: + xs = ctx.sqrt(xm) + ys = ctx.sqrt(ym) + zs = ctx.sqrt(zm) + lm = xs*ys + xs*zs + ys*zs + Am1 = (Am+lm)*g + xm, ym, zm = (xm+lm)*g, (ym+lm)*g, (zm+lm)*g + if pow4 * Q < abs(Am): + break + Am = Am1 + pow4 *= g + t = pow4/Am + X = (A0-x)*t + Y = (A0-y)*t + Z = -X-Y + E2 = X*Y-Z**2 + E3 = X*Y*Z + return ctx.power(Am,-0.5) * (9240-924*E2+385*E2**2+660*E3-630*E2*E3)/9240 + +def RC_calc(ctx, x, y, r, pv=True): + if not (ctx.isnormal(x) and ctx.isnormal(y)): + if ctx.isinf(x) or ctx.isinf(y): + return 1/(x*y) + if y == 0: + return ctx.inf + if x == 0: + return ctx.pi / ctx.sqrt(y) / 2 + raise ValueError + # Cauchy principal value + if pv and ctx._im(y) == 0 and ctx._re(y) < 0: + return ctx.sqrt(x/(x-y)) * RC_calc(ctx, x-y, -y, r) + if x == y: + return 1/ctx.sqrt(x) + extraprec = 2*max(0,-ctx.mag(x-y)+ctx.mag(x)) + ctx.prec += extraprec + if ctx._is_real_type(x) and ctx._is_real_type(y): + x = ctx._re(x) + y = ctx._re(y) + a = ctx.sqrt(x/y) + if x < y: + b = ctx.sqrt(y-x) + v = ctx.acos(a)/b + else: + b = ctx.sqrt(x-y) + v = ctx.acosh(a)/b + else: + sx = ctx.sqrt(x) + sy = ctx.sqrt(y) + v = ctx.acos(sx/sy)/(ctx.sqrt((1-x/y))*sy) + ctx.prec -= extraprec + return v + +def RJ_calc(ctx, x, y, z, p, r, integration): + """ + With integration == 0, computes RJ only using Carlson's algorithm + (may be wrong for some values). + With integration == 1, uses an initial integration to make sure + Carlson's algorithm is correct. + With integration == 2, uses only integration. + """ + if not (ctx.isnormal(x) and ctx.isnormal(y) and \ + ctx.isnormal(z) and ctx.isnormal(p)): + if ctx.isnan(x) or ctx.isnan(y) or ctx.isnan(z) or ctx.isnan(p): + return x*y*z + if ctx.isinf(x) or ctx.isinf(y) or ctx.isinf(z) or ctx.isinf(p): + return ctx.zero + if not p: + return ctx.inf + if (not x) + (not y) + (not z) > 1: + return ctx.inf + # Check conditions and fall back on integration for argument + # reduction if needed. The following conditions might be needlessly + # restrictive. + initial_integral = ctx.zero + if integration >= 1: + ok = (x.real >= 0 and y.real >= 0 and z.real >= 0 and p.real > 0) + if not ok: + if x == p or y == p or z == p: + ok = True + if not ok: + if p.imag != 0 or p.real >= 0: + if (x.imag == 0 and x.real >= 0 and ctx.conj(y) == z): + ok = True + if (y.imag == 0 and y.real >= 0 and ctx.conj(x) == z): + ok = True + if (z.imag == 0 and z.real >= 0 and ctx.conj(x) == y): + ok = True + if not ok or (integration == 2): + N = ctx.ceil(-min(x.real, y.real, z.real, p.real)) + 1 + # Integrate around any singularities + if all((t.imag >= 0 or t.real > 0) for t in [x, y, z, p]): + margin = ctx.j + elif all((t.imag < 0 or t.real > 0) for t in [x, y, z, p]): + margin = -ctx.j + else: + margin = 1 + # Go through the upper half-plane, but low enough that any + # parameter starting in the lower plane doesn't cross the + # branch cut + for t in [x, y, z, p]: + if t.imag >= 0 or t.real > 0: + continue + margin = min(margin, abs(t.imag) * 0.5) + margin *= ctx.j + N += margin + F = lambda t: 1/(ctx.sqrt(t+x)*ctx.sqrt(t+y)*ctx.sqrt(t+z)*(t+p)) + if integration == 2: + return 1.5 * ctx.quadsubdiv(F, [0, N, ctx.inf]) + initial_integral = 1.5 * ctx.quadsubdiv(F, [0, N]) + x += N; y += N; z += N; p += N + xm,ym,zm,pm = x,y,z,p + A0 = Am = (x + y + z + 2*p)/5 + delta = (p-x)*(p-y)*(p-z) + Q = ctx.root(0.25*r, -6) * max(abs(A0-x),abs(A0-y),abs(A0-z),abs(A0-p)) + g = ctx.mpf(0.25) + pow4 = ctx.one + S = 0 + while 1: + sx = ctx.sqrt(xm) + sy = ctx.sqrt(ym) + sz = ctx.sqrt(zm) + sp = ctx.sqrt(pm) + lm = sx*sy + sx*sz + sy*sz + Am1 = (Am+lm)*g + xm = (xm+lm)*g; ym = (ym+lm)*g; zm = (zm+lm)*g; pm = (pm+lm)*g + dm = (sp+sx) * (sp+sy) * (sp+sz) + em = delta * pow4**3 / dm**2 + if pow4 * Q < abs(Am): + break + T = RC_calc(ctx, ctx.one, ctx.one+em, r) * pow4 / dm + S += T + pow4 *= g + Am = Am1 + t = pow4 / Am + X = (A0-x)*t + Y = (A0-y)*t + Z = (A0-z)*t + P = (-X-Y-Z)/2 + E2 = X*Y + X*Z + Y*Z - 3*P**2 + E3 = X*Y*Z + 2*E2*P + 4*P**3 + E4 = (2*X*Y*Z + E2*P + 3*P**3)*P + E5 = X*Y*Z*P**2 + P = 24024 - 5148*E2 + 2457*E2**2 + 4004*E3 - 4158*E2*E3 - 3276*E4 + 2772*E5 + Q = 24024 + v1 = pow4 * ctx.power(Am, -1.5) * P/Q + v2 = 6*S + return initial_integral + v1 + v2 + +@defun +def elliprf(ctx, x, y, z): + r""" + Evaluates the Carlson symmetric elliptic integral of the first kind + + .. math :: + + R_F(x,y,z) = \frac{1}{2} + \int_0^{\infty} \frac{dt}{\sqrt{(t+x)(t+y)(t+z)}} + + which is defined for `x,y,z \notin (-\infty,0)`, and with + at most one of `x,y,z` being zero. + + For real `x,y,z \ge 0`, the principal square root is taken in the integrand. + For complex `x,y,z`, the principal square root is taken as `t \to \infty` + and as `t \to 0` non-principal branches are chosen as necessary so as to + make the integrand continuous. + + **Examples** + + Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprf(0,1,1); pi/2 + 1.570796326794896619231322 + 1.570796326794896619231322 + >>> elliprf(0,1,inf) + 0.0 + >>> elliprf(1,1,1) + 1.0 + >>> elliprf(2,2,2)**2 + 0.5 + >>> elliprf(1,0,0); elliprf(0,0,1); elliprf(0,1,0); elliprf(0,0,0) + +inf + +inf + +inf + +inf + + Representing complete elliptic integrals in terms of `R_F`:: + + >>> m = mpf(0.75) + >>> ellipk(m); elliprf(0,1-m,1) + 2.156515647499643235438675 + 2.156515647499643235438675 + >>> ellipe(m); elliprf(0,1-m,1)-m*elliprd(0,1-m,1)/3 + 1.211056027568459524803563 + 1.211056027568459524803563 + + Some symmetries and argument transformations:: + + >>> x,y,z = 2,3,4 + >>> elliprf(x,y,z); elliprf(y,x,z); elliprf(z,y,x) + 0.5840828416771517066928492 + 0.5840828416771517066928492 + 0.5840828416771517066928492 + >>> k = mpf(100000) + >>> elliprf(k*x,k*y,k*z); k**(-0.5) * elliprf(x,y,z) + 0.001847032121923321253219284 + 0.001847032121923321253219284 + >>> l = sqrt(x*y) + sqrt(y*z) + sqrt(z*x) + >>> elliprf(x,y,z); 2*elliprf(x+l,y+l,z+l) + 0.5840828416771517066928492 + 0.5840828416771517066928492 + >>> elliprf((x+l)/4,(y+l)/4,(z+l)/4) + 0.5840828416771517066928492 + + Comparing with numerical integration:: + + >>> x,y,z = 2,3,4 + >>> elliprf(x,y,z) + 0.5840828416771517066928492 + >>> f = lambda t: 0.5*((t+x)*(t+y)*(t+z))**(-0.5) + >>> q = extradps(25)(quad) + >>> q(f, [0,inf]) + 0.5840828416771517066928492 + + With the following arguments, the square root in the integrand becomes + discontinuous at `t = 1/2` if the principal branch is used. To obtain + the right value, `-\sqrt{r}` must be taken instead of `\sqrt{r}` + on `t \in (0, 1/2)`:: + + >>> x,y,z = j-1,j,0 + >>> elliprf(x,y,z) + (0.7961258658423391329305694 - 1.213856669836495986430094j) + >>> -q(f, [0,0.5]) + q(f, [0.5,inf]) + (0.7961258658423391329305694 - 1.213856669836495986430094j) + + The so-called *first lemniscate constant*, a transcendental number:: + + >>> elliprf(0,1,2) + 1.31102877714605990523242 + >>> extradps(25)(quad)(lambda t: 1/sqrt(1-t**4), [0,1]) + 1.31102877714605990523242 + >>> gamma('1/4')**2/(4*sqrt(2*pi)) + 1.31102877714605990523242 + + **References** + + 1. [Carlson]_ + 2. [DLMF]_ Chapter 19. Elliptic Integrals + + """ + x = ctx.convert(x) + y = ctx.convert(y) + z = ctx.convert(z) + prec = ctx.prec + try: + ctx.prec += 20 + tol = ctx.eps * 2**10 + v = RF_calc(ctx, x, y, z, tol) + finally: + ctx.prec = prec + return +v + +@defun +def elliprc(ctx, x, y, pv=True): + r""" + Evaluates the degenerate Carlson symmetric elliptic integral + of the first kind + + .. math :: + + R_C(x,y) = R_F(x,y,y) = + \frac{1}{2} \int_0^{\infty} \frac{dt}{(t+y) \sqrt{(t+x)}}. + + If `y \in (-\infty,0)`, either a value defined by continuity, + or with *pv=True* the Cauchy principal value, can be computed. + + If `x \ge 0, y > 0`, the value can be expressed in terms of + elementary functions as + + .. math :: + + R_C(x,y) = + \begin{cases} + \dfrac{1}{\sqrt{y-x}} + \cos^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x < y \\ + \dfrac{1}{\sqrt{y}}, & x = y \\ + \dfrac{1}{\sqrt{x-y}} + \cosh^{-1}\left(\sqrt{\dfrac{x}{y}}\right), & x > y \\ + \end{cases}. + + **Examples** + + Some special values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprc(1,2)*4; elliprc(0,1)*2; +pi + 3.141592653589793238462643 + 3.141592653589793238462643 + 3.141592653589793238462643 + >>> elliprc(1,0) + +inf + >>> elliprc(5,5)**2 + 0.2 + >>> elliprc(1,inf); elliprc(inf,1); elliprc(inf,inf) + 0.0 + 0.0 + 0.0 + + Comparing with the elementary closed-form solution:: + + >>> elliprc('1/3', '1/5'); sqrt(7.5)*acosh(sqrt('5/3')) + 2.041630778983498390751238 + 2.041630778983498390751238 + >>> elliprc('1/5', '1/3'); sqrt(7.5)*acos(sqrt('3/5')) + 1.875180765206547065111085 + 1.875180765206547065111085 + + Comparing with numerical integration:: + + >>> q = extradps(25)(quad) + >>> elliprc(2, -3, pv=True) + 0.3333969101113672670749334 + >>> elliprc(2, -3, pv=False) + (0.3333969101113672670749334 + 0.7024814731040726393156375j) + >>> 0.5*q(lambda t: 1/(sqrt(t+2)*(t-3)), [0,3-j,6,inf]) + (0.3333969101113672670749334 + 0.7024814731040726393156375j) + + """ + x = ctx.convert(x) + y = ctx.convert(y) + prec = ctx.prec + try: + ctx.prec += 20 + tol = ctx.eps * 2**10 + v = RC_calc(ctx, x, y, tol, pv) + finally: + ctx.prec = prec + return +v + +@defun +def elliprj(ctx, x, y, z, p, integration=1): + r""" + Evaluates the Carlson symmetric elliptic integral of the third kind + + .. math :: + + R_J(x,y,z,p) = \frac{3}{2} + \int_0^{\infty} \frac{dt}{(t+p)\sqrt{(t+x)(t+y)(t+z)}}. + + Like :func:`~mpmath.elliprf`, the branch of the square root in the integrand + is defined so as to be continuous along the path of integration for + complex values of the arguments. + + **Examples** + + Some values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprj(1,1,1,1) + 1.0 + >>> elliprj(2,2,2,2); 1/(2*sqrt(2)) + 0.3535533905932737622004222 + 0.3535533905932737622004222 + >>> elliprj(0,1,2,2) + 1.067937989667395702268688 + >>> 3*(2*gamma('5/4')**2-pi**2/gamma('1/4')**2)/(sqrt(2*pi)) + 1.067937989667395702268688 + >>> elliprj(0,1,1,2); 3*pi*(2-sqrt(2))/4 + 1.380226776765915172432054 + 1.380226776765915172432054 + >>> elliprj(1,3,2,0); elliprj(0,1,1,0); elliprj(0,0,0,0) + +inf + +inf + +inf + >>> elliprj(1,inf,1,0); elliprj(1,1,1,inf) + 0.0 + 0.0 + >>> chop(elliprj(1+j, 1-j, 1, 1)) + 0.8505007163686739432927844 + + Scale transformation:: + + >>> x,y,z,p = 2,3,4,5 + >>> k = mpf(100000) + >>> elliprj(k*x,k*y,k*z,k*p); k**(-1.5)*elliprj(x,y,z,p) + 4.521291677592745527851168e-9 + 4.521291677592745527851168e-9 + + Comparing with numerical integration:: + + >>> elliprj(1,2,3,4) + 0.2398480997495677621758617 + >>> f = lambda t: 1/((t+4)*sqrt((t+1)*(t+2)*(t+3))) + >>> 1.5*quad(f, [0,inf]) + 0.2398480997495677621758617 + >>> elliprj(1,2+1j,3,4-2j) + (0.216888906014633498739952 + 0.04081912627366673332369512j) + >>> f = lambda t: 1/((t+4-2j)*sqrt((t+1)*(t+2+1j)*(t+3))) + >>> 1.5*quad(f, [0,inf]) + (0.216888906014633498739952 + 0.04081912627366673332369511j) + + """ + x = ctx.convert(x) + y = ctx.convert(y) + z = ctx.convert(z) + p = ctx.convert(p) + prec = ctx.prec + try: + ctx.prec += 20 + tol = ctx.eps * 2**10 + v = RJ_calc(ctx, x, y, z, p, tol, integration) + finally: + ctx.prec = prec + return +v + +@defun +def elliprd(ctx, x, y, z): + r""" + Evaluates the degenerate Carlson symmetric elliptic integral + of the third kind or Carlson elliptic integral of the + second kind `R_D(x,y,z) = R_J(x,y,z,z)`. + + See :func:`~mpmath.elliprj` for additional information. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprd(1,2,3) + 0.2904602810289906442326534 + >>> elliprj(1,2,3,3) + 0.2904602810289906442326534 + + The so-called *second lemniscate constant*, a transcendental number:: + + >>> elliprd(0,2,1)/3 + 0.5990701173677961037199612 + >>> extradps(25)(quad)(lambda t: t**2/sqrt(1-t**4), [0,1]) + 0.5990701173677961037199612 + >>> gamma('3/4')**2/sqrt(2*pi) + 0.5990701173677961037199612 + + """ + return ctx.elliprj(x,y,z,z) + +@defun +def elliprg(ctx, x, y, z): + r""" + Evaluates the Carlson completely symmetric elliptic integral + of the second kind + + .. math :: + + R_G(x,y,z) = \frac{1}{4} \int_0^{\infty} + \frac{t}{\sqrt{(t+x)(t+y)(t+z)}} + \left( \frac{x}{t+x} + \frac{y}{t+y} + \frac{z}{t+z}\right) dt. + + **Examples** + + Evaluation for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> elliprg(0,1,1)*4; +pi + 3.141592653589793238462643 + 3.141592653589793238462643 + >>> elliprg(0,0.5,1) + 0.6753219405238377512600874 + >>> chop(elliprg(1+j, 1-j, 2)) + 1.172431327676416604532822 + + A double integral that can be evaluated in terms of `R_G`:: + + >>> x,y,z = 2,3,4 + >>> def f(t,u): + ... st = fp.sin(t); ct = fp.cos(t) + ... su = fp.sin(u); cu = fp.cos(u) + ... return (x*(st*cu)**2 + y*(st*su)**2 + z*ct**2)**0.5 * st + ... + >>> nprint(mpf(fp.quad(f, [0,fp.pi], [0,2*fp.pi])/(4*fp.pi)), 13) + 1.725503028069 + >>> nprint(elliprg(x,y,z), 13) + 1.725503028069 + + """ + x = ctx.convert(x) + y = ctx.convert(y) + z = ctx.convert(z) + zeros = (not x) + (not y) + (not z) + if zeros == 3: + return (x+y+z)*0 + if zeros == 2: + if x: return 0.5*ctx.sqrt(x) + if y: return 0.5*ctx.sqrt(y) + return 0.5*ctx.sqrt(z) + if zeros == 1: + if not z: + x, z = z, x + def terms(): + T1 = 0.5*z*ctx.elliprf(x,y,z) + T2 = -0.5*(x-z)*(y-z)*ctx.elliprd(x,y,z)/3 + T3 = 0.5*ctx.sqrt(x)*ctx.sqrt(y)/ctx.sqrt(z) + return T1,T2,T3 + return ctx.sum_accurately(terms) + + +@defun_wrapped +def ellipf(ctx, phi, m): + r""" + Evaluates the Legendre incomplete elliptic integral of the first kind + + .. math :: + + F(\phi,m) = \int_0^{\phi} \frac{dt}{\sqrt{1-m \sin^2 t}} + + or equivalently + + .. math :: + + F(\phi,m) = \int_0^{\sin \phi} + \frac{dt}{\left(\sqrt{1-t^2}\right)\left(\sqrt{1-mt^2}\right)}. + + The function reduces to a complete elliptic integral of the first kind + (see :func:`~mpmath.ellipk`) when `\phi = \frac{\pi}{2}`; that is, + + .. math :: + + F\left(\frac{\pi}{2}, m\right) = K(m). + + In the defining integral, it is assumed that the principal branch + of the square root is taken and that the path of integration avoids + crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`, + the function extends quasi-periodically as + + .. math :: + + F(\phi + n \pi, m) = 2 n K(m) + F(\phi,m), n \in \mathbb{Z}. + + **Plots** + + .. literalinclude :: /plots/ellipf.py + .. image :: /plots/ellipf.png + + **Examples** + + Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipf(0,1) + 0.0 + >>> ellipf(0,0) + 0.0 + >>> ellipf(1,0); ellipf(2+3j,0) + 1.0 + (2.0 + 3.0j) + >>> ellipf(1,1); log(sec(1)+tan(1)) + 1.226191170883517070813061 + 1.226191170883517070813061 + >>> ellipf(pi/2, -0.5); ellipk(-0.5) + 1.415737208425956198892166 + 1.415737208425956198892166 + >>> ellipf(pi/2+eps, 1); ellipf(-pi/2-eps, 1) + +inf + +inf + >>> ellipf(1.5, 1) + 3.340677542798311003320813 + + Comparing with numerical integration:: + + >>> z,m = 0.5, 1.25 + >>> ellipf(z,m) + 0.5287219202206327872978255 + >>> quad(lambda t: (1-m*sin(t)**2)**(-0.5), [0,z]) + 0.5287219202206327872978255 + + The arguments may be complex numbers:: + + >>> ellipf(3j, 0.5) + (0.0 + 1.713602407841590234804143j) + >>> ellipf(3+4j, 5-6j) + (1.269131241950351323305741 - 0.3561052815014558335412538j) + >>> z,m = 2+3j, 1.25 + >>> k = 1011 + >>> ellipf(z+pi*k,m); ellipf(z,m) + 2*k*ellipk(m) + (4086.184383622179764082821 - 3003.003538923749396546871j) + (4086.184383622179764082821 - 3003.003538923749396546871j) + + For `|\Re(z)| < \pi/2`, the function can be expressed as a + hypergeometric series of two variables + (see :func:`~mpmath.appellf1`):: + + >>> z,m = 0.5, 0.25 + >>> ellipf(z,m) + 0.5050887275786480788831083 + >>> sin(z)*appellf1(0.5,0.5,0.5,1.5,sin(z)**2,m*sin(z)**2) + 0.5050887275786480788831083 + + """ + z = phi + if not (ctx.isnormal(z) and ctx.isnormal(m)): + if m == 0: + return z + m + if z == 0: + return z * m + if m == ctx.inf or m == ctx.ninf: return z/m + raise ValueError + x = z.real + ctx.prec += max(0, ctx.mag(x)) + pi = +ctx.pi + away = abs(x) > pi/2 + if m == 1: + if away: + return ctx.inf + if away: + d = ctx.nint(x/pi) + z = z-pi*d + P = 2*d*ctx.ellipk(m) + else: + P = 0 + c, s = ctx.cos_sin(z) + return s * ctx.elliprf(c**2, 1-m*s**2, 1) + P + +@defun_wrapped +def ellipe(ctx, *args): + r""" + Called with a single argument `m`, evaluates the Legendre complete + elliptic integral of the second kind, `E(m)`, defined by + + .. math :: E(m) = \int_0^{\pi/2} \sqrt{1-m \sin^2 t} \, dt \,=\, + \frac{\pi}{2} + \,_2F_1\left(\frac{1}{2}, -\frac{1}{2}, 1, m\right). + + Called with two arguments `\phi, m`, evaluates the incomplete elliptic + integral of the second kind + + .. math :: + + E(\phi,m) = \int_0^{\phi} \sqrt{1-m \sin^2 t} \, dt = + \int_0^{\sin z} + \frac{\sqrt{1-mt^2}}{\sqrt{1-t^2}} \, dt. + + The incomplete integral reduces to a complete integral when + `\phi = \frac{\pi}{2}`; that is, + + .. math :: + + E\left(\frac{\pi}{2}, m\right) = E(m). + + In the defining integral, it is assumed that the principal branch + of the square root is taken and that the path of integration avoids + crossing any branch cuts. Outside `-\pi/2 \le \Re(z) \le \pi/2`, + the function extends quasi-periodically as + + .. math :: + + E(\phi + n \pi, m) = 2 n E(m) + E(\phi,m), n \in \mathbb{Z}. + + **Plots** + + .. literalinclude :: /plots/ellipe.py + .. image :: /plots/ellipe.png + + **Examples for the complete integral** + + Basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellipe(0) + 1.570796326794896619231322 + >>> ellipe(1) + 1.0 + >>> ellipe(-1) + 1.910098894513856008952381 + >>> ellipe(2) + (0.5990701173677961037199612 + 0.5990701173677961037199612j) + >>> ellipe(inf) + (0.0 + +infj) + >>> ellipe(-inf) + +inf + + Verifying the defining integral and hypergeometric + representation:: + + >>> ellipe(0.5) + 1.350643881047675502520175 + >>> quad(lambda t: sqrt(1-0.5*sin(t)**2), [0, pi/2]) + 1.350643881047675502520175 + >>> pi/2*hyp2f1(0.5,-0.5,1,0.5) + 1.350643881047675502520175 + + Evaluation is supported for arbitrary complex `m`:: + + >>> ellipe(0.5+0.25j) + (1.360868682163129682716687 - 0.1238733442561786843557315j) + >>> ellipe(3+4j) + (1.499553520933346954333612 - 1.577879007912758274533309j) + + A definite integral:: + + >>> quad(ellipe, [0,1]) + 1.333333333333333333333333 + + **Examples for the incomplete integral** + + Basic values and limits:: + + >>> ellipe(0,1) + 0.0 + >>> ellipe(0,0) + 0.0 + >>> ellipe(1,0) + 1.0 + >>> ellipe(2+3j,0) + (2.0 + 3.0j) + >>> ellipe(1,1); sin(1) + 0.8414709848078965066525023 + 0.8414709848078965066525023 + >>> ellipe(pi/2, -0.5); ellipe(-0.5) + 1.751771275694817862026502 + 1.751771275694817862026502 + >>> ellipe(pi/2, 1); ellipe(-pi/2, 1) + 1.0 + -1.0 + >>> ellipe(1.5, 1) + 0.9974949866040544309417234 + + Comparing with numerical integration:: + + >>> z,m = 0.5, 1.25 + >>> ellipe(z,m) + 0.4740152182652628394264449 + >>> quad(lambda t: sqrt(1-m*sin(t)**2), [0,z]) + 0.4740152182652628394264449 + + The arguments may be complex numbers:: + + >>> ellipe(3j, 0.5) + (0.0 + 7.551991234890371873502105j) + >>> ellipe(3+4j, 5-6j) + (24.15299022574220502424466 + 75.2503670480325997418156j) + >>> k = 35 + >>> z,m = 2+3j, 1.25 + >>> ellipe(z+pi*k,m); ellipe(z,m) + 2*k*ellipe(m) + (48.30138799412005235090766 + 17.47255216721987688224357j) + (48.30138799412005235090766 + 17.47255216721987688224357j) + + For `|\Re(z)| < \pi/2`, the function can be expressed as a + hypergeometric series of two variables + (see :func:`~mpmath.appellf1`):: + + >>> z,m = 0.5, 0.25 + >>> ellipe(z,m) + 0.4950017030164151928870375 + >>> sin(z)*appellf1(0.5,0.5,-0.5,1.5,sin(z)**2,m*sin(z)**2) + 0.4950017030164151928870376 + + """ + if len(args) == 1: + return ctx._ellipe(args[0]) + else: + phi, m = args + z = phi + if not (ctx.isnormal(z) and ctx.isnormal(m)): + if m == 0: + return z + m + if z == 0: + return z * m + if m == ctx.inf or m == ctx.ninf: + return ctx.inf + raise ValueError + x = z.real + ctx.prec += max(0, ctx.mag(x)) + pi = +ctx.pi + away = abs(x) > pi/2 + if away: + d = ctx.nint(x/pi) + z = z-pi*d + P = 2*d*ctx.ellipe(m) + else: + P = 0 + def terms(): + c, s = ctx.cos_sin(z) + x = c**2 + y = 1-m*s**2 + RF = ctx.elliprf(x, y, 1) + RD = ctx.elliprd(x, y, 1) + return s*RF, -m*s**3*RD/3 + return ctx.sum_accurately(terms) + P + +@defun_wrapped +def ellippi(ctx, *args): + r""" + Called with three arguments `n, \phi, m`, evaluates the Legendre + incomplete elliptic integral of the third kind + + .. math :: + + \Pi(n; \phi, m) = \int_0^{\phi} + \frac{dt}{(1-n \sin^2 t) \sqrt{1-m \sin^2 t}} = + \int_0^{\sin \phi} + \frac{dt}{(1-nt^2) \sqrt{1-t^2} \sqrt{1-mt^2}}. + + Called with two arguments `n, m`, evaluates the complete + elliptic integral of the third kind + `\Pi(n,m) = \Pi(n; \frac{\pi}{2},m)`. + + In the defining integral, it is assumed that the principal branch + of the square root is taken and that the path of integration avoids + crossing any branch cuts. Outside `-\pi/2 \le \Re(\phi) \le \pi/2`, + the function extends quasi-periodically as + + .. math :: + + \Pi(n,\phi+k\pi,m) = 2k\Pi(n,m) + \Pi(n,\phi,m), k \in \mathbb{Z}. + + **Plots** + + .. literalinclude :: /plots/ellippi.py + .. image :: /plots/ellippi.png + + **Examples for the complete integral** + + Some basic values and limits:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> ellippi(0,-5); ellipk(-5) + 0.9555039270640439337379334 + 0.9555039270640439337379334 + >>> ellippi(inf,2) + 0.0 + >>> ellippi(2,inf) + 0.0 + >>> abs(ellippi(1,5)) + +inf + >>> abs(ellippi(0.25,1)) + +inf + + Evaluation in terms of simpler functions:: + + >>> ellippi(0.25,0.25); ellipe(0.25)/(1-0.25) + 1.956616279119236207279727 + 1.956616279119236207279727 + >>> ellippi(3,0); pi/(2*sqrt(-2)) + (0.0 - 1.11072073453959156175397j) + (0.0 - 1.11072073453959156175397j) + >>> ellippi(-3,0); pi/(2*sqrt(4)) + 0.7853981633974483096156609 + 0.7853981633974483096156609 + + **Examples for the incomplete integral** + + Basic values and limits:: + + >>> ellippi(0.25,-0.5); ellippi(0.25,pi/2,-0.5) + 1.622944760954741603710555 + 1.622944760954741603710555 + >>> ellippi(1,0,1) + 0.0 + >>> ellippi(inf,0,1) + 0.0 + >>> ellippi(0,0.25,0.5); ellipf(0.25,0.5) + 0.2513040086544925794134591 + 0.2513040086544925794134591 + >>> ellippi(1,1,1); (log(sec(1)+tan(1))+sec(1)*tan(1))/2 + 2.054332933256248668692452 + 2.054332933256248668692452 + >>> ellippi(0.25, 53*pi/2, 0.75); 53*ellippi(0.25,0.75) + 135.240868757890840755058 + 135.240868757890840755058 + >>> ellippi(0.5,pi/4,0.5); 2*ellipe(pi/4,0.5)-1/sqrt(3) + 0.9190227391656969903987269 + 0.9190227391656969903987269 + + Complex arguments are supported:: + + >>> ellippi(0.5, 5+6j-2*pi, -7-8j) + (-0.3612856620076747660410167 + 0.5217735339984807829755815j) + + Some degenerate cases:: + + >>> ellippi(1,1) + +inf + >>> ellippi(1,0) + +inf + >>> ellippi(1,2,0) + +inf + >>> ellippi(1,2,1) + +inf + >>> ellippi(1,0,1) + 0.0 + + """ + if len(args) == 2: + n, m = args + complete = True + z = phi = ctx.pi/2 + else: + n, phi, m = args + complete = False + z = phi + if not (ctx.isnormal(n) and ctx.isnormal(z) and ctx.isnormal(m)): + if ctx.isnan(n) or ctx.isnan(z) or ctx.isnan(m): + raise ValueError + if complete: + if m == 0: + if n == 1: + return ctx.inf + return ctx.pi/(2*ctx.sqrt(1-n)) + if n == 0: return ctx.ellipk(m) + if ctx.isinf(n) or ctx.isinf(m): return ctx.zero + else: + if z == 0: return z + if ctx.isinf(n): return ctx.zero + if ctx.isinf(m): return ctx.zero + if ctx.isinf(n) or ctx.isinf(z) or ctx.isinf(m): + raise ValueError + if complete: + if m == 1: + if n == 1: + return ctx.inf + return -ctx.inf/ctx.sign(n-1) + away = False + else: + x = z.real + ctx.prec += max(0, ctx.mag(x)) + pi = +ctx.pi + away = abs(x) > pi/2 + if away: + d = ctx.nint(x/pi) + z = z-pi*d + P = 2*d*ctx.ellippi(n,m) + if ctx.isinf(P): + return ctx.inf + else: + P = 0 + def terms(): + if complete: + c, s = ctx.zero, ctx.one + else: + c, s = ctx.cos_sin(z) + x = c**2 + y = 1-m*s**2 + RF = ctx.elliprf(x, y, 1) + RJ = ctx.elliprj(x, y, 1, 1-n*s**2) + return s*RF, n*s**3*RJ/3 + return ctx.sum_accurately(terms) + P diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/expintegrals.py b/.venv/lib/python3.11/site-packages/mpmath/functions/expintegrals.py new file mode 100644 index 0000000000000000000000000000000000000000..0dee8356c0386819d8f0421fded476ee77229359 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/expintegrals.py @@ -0,0 +1,425 @@ +from .functions import defun, defun_wrapped + +@defun_wrapped +def _erf_complex(ctx, z): + z2 = ctx.square_exp_arg(z, -1) + #z2 = -z**2 + v = (2/ctx.sqrt(ctx.pi))*z * ctx.hyp1f1((1,2),(3,2), z2) + if not ctx._re(z): + v = ctx._im(v)*ctx.j + return v + +@defun_wrapped +def _erfc_complex(ctx, z): + if ctx.re(z) > 2: + z2 = ctx.square_exp_arg(z) + nz2 = ctx.fneg(z2, exact=True) + v = ctx.exp(nz2)/ctx.sqrt(ctx.pi) * ctx.hyperu((1,2),(1,2), z2) + else: + v = 1 - ctx._erf_complex(z) + if not ctx._re(z): + v = 1+ctx._im(v)*ctx.j + return v + +@defun +def erf(ctx, z): + z = ctx.convert(z) + if ctx._is_real_type(z): + try: + return ctx._erf(z) + except NotImplementedError: + pass + if ctx._is_complex_type(z) and not z.imag: + try: + return type(z)(ctx._erf(z.real)) + except NotImplementedError: + pass + return ctx._erf_complex(z) + +@defun +def erfc(ctx, z): + z = ctx.convert(z) + if ctx._is_real_type(z): + try: + return ctx._erfc(z) + except NotImplementedError: + pass + if ctx._is_complex_type(z) and not z.imag: + try: + return type(z)(ctx._erfc(z.real)) + except NotImplementedError: + pass + return ctx._erfc_complex(z) + +@defun +def square_exp_arg(ctx, z, mult=1, reciprocal=False): + prec = ctx.prec*4+20 + if reciprocal: + z2 = ctx.fmul(z, z, prec=prec) + z2 = ctx.fdiv(ctx.one, z2, prec=prec) + else: + z2 = ctx.fmul(z, z, prec=prec) + if mult != 1: + z2 = ctx.fmul(z2, mult, exact=True) + return z2 + +@defun_wrapped +def erfi(ctx, z): + if not z: + return z + z2 = ctx.square_exp_arg(z) + v = (2/ctx.sqrt(ctx.pi)*z) * ctx.hyp1f1((1,2), (3,2), z2) + if not ctx._re(z): + v = ctx._im(v)*ctx.j + return v + +@defun_wrapped +def erfinv(ctx, x): + xre = ctx._re(x) + if (xre != x) or (xre < -1) or (xre > 1): + return ctx.bad_domain("erfinv(x) is defined only for -1 <= x <= 1") + x = xre + #if ctx.isnan(x): return x + if not x: return x + if x == 1: return ctx.inf + if x == -1: return ctx.ninf + if abs(x) < 0.9: + a = 0.53728*x**3 + 0.813198*x + else: + # An asymptotic formula + u = ctx.ln(2/ctx.pi/(abs(x)-1)**2) + a = ctx.sign(x) * ctx.sqrt(u - ctx.ln(u))/ctx.sqrt(2) + ctx.prec += 10 + return ctx.findroot(lambda t: ctx.erf(t)-x, a) + +@defun_wrapped +def npdf(ctx, x, mu=0, sigma=1): + sigma = ctx.convert(sigma) + return ctx.exp(-(x-mu)**2/(2*sigma**2)) / (sigma*ctx.sqrt(2*ctx.pi)) + +@defun_wrapped +def ncdf(ctx, x, mu=0, sigma=1): + a = (x-mu)/(sigma*ctx.sqrt(2)) + if a < 0: + return ctx.erfc(-a)/2 + else: + return (1+ctx.erf(a))/2 + +@defun_wrapped +def betainc(ctx, a, b, x1=0, x2=1, regularized=False): + if x1 == x2: + v = 0 + elif not x1: + if x1 == 0 and x2 == 1: + v = ctx.beta(a, b) + else: + v = x2**a * ctx.hyp2f1(a, 1-b, a+1, x2) / a + else: + m, d = ctx.nint_distance(a) + if m <= 0: + if d < -ctx.prec: + h = +ctx.eps + ctx.prec *= 2 + a += h + elif d < -4: + ctx.prec -= d + s1 = x2**a * ctx.hyp2f1(a,1-b,a+1,x2) + s2 = x1**a * ctx.hyp2f1(a,1-b,a+1,x1) + v = (s1 - s2) / a + if regularized: + v /= ctx.beta(a,b) + return v + +@defun +def gammainc(ctx, z, a=0, b=None, regularized=False): + regularized = bool(regularized) + z = ctx.convert(z) + if a is None: + a = ctx.zero + lower_modified = False + else: + a = ctx.convert(a) + lower_modified = a != ctx.zero + if b is None: + b = ctx.inf + upper_modified = False + else: + b = ctx.convert(b) + upper_modified = b != ctx.inf + # Complete gamma function + if not (upper_modified or lower_modified): + if regularized: + if ctx.re(z) < 0: + return ctx.inf + elif ctx.re(z) > 0: + return ctx.one + else: + return ctx.nan + return ctx.gamma(z) + if a == b: + return ctx.zero + # Standardize + if ctx.re(a) > ctx.re(b): + return -ctx.gammainc(z, b, a, regularized) + # Generalized gamma + if upper_modified and lower_modified: + return +ctx._gamma3(z, a, b, regularized) + # Upper gamma + elif lower_modified: + return ctx._upper_gamma(z, a, regularized) + # Lower gamma + elif upper_modified: + return ctx._lower_gamma(z, b, regularized) + +@defun +def _lower_gamma(ctx, z, b, regularized=False): + # Pole + if ctx.isnpint(z): + return type(z)(ctx.inf) + G = [z] * regularized + negb = ctx.fneg(b, exact=True) + def h(z): + T1 = [ctx.exp(negb), b, z], [1, z, -1], [], G, [1], [1+z], b + return (T1,) + return ctx.hypercomb(h, [z]) + +@defun +def _upper_gamma(ctx, z, a, regularized=False): + # Fast integer case, when available + if ctx.isint(z): + try: + if regularized: + # Gamma pole + if ctx.isnpint(z): + return type(z)(ctx.zero) + orig = ctx.prec + try: + ctx.prec += 10 + return ctx._gamma_upper_int(z, a) / ctx.gamma(z) + finally: + ctx.prec = orig + else: + return ctx._gamma_upper_int(z, a) + except NotImplementedError: + pass + # hypercomb is unable to detect the exact zeros, so handle them here + if z == 2 and a == -1: + return (z+a)*0 + if z == 3 and (a == -1-1j or a == -1+1j): + return (z+a)*0 + nega = ctx.fneg(a, exact=True) + G = [z] * regularized + # Use 2F0 series when possible; fall back to lower gamma representation + try: + def h(z): + r = z-1 + return [([ctx.exp(nega), a], [1, r], [], G, [1, -r], [], 1/nega)] + return ctx.hypercomb(h, [z], force_series=True) + except ctx.NoConvergence: + def h(z): + T1 = [], [1, z-1], [z], G, [], [], 0 + T2 = [-ctx.exp(nega), a, z], [1, z, -1], [], G, [1], [1+z], a + return T1, T2 + return ctx.hypercomb(h, [z]) + +@defun +def _gamma3(ctx, z, a, b, regularized=False): + pole = ctx.isnpint(z) + if regularized and pole: + return ctx.zero + try: + ctx.prec += 15 + # We don't know in advance whether it's better to write as a difference + # of lower or upper gamma functions, so try both + T1 = ctx.gammainc(z, a, regularized=regularized) + T2 = ctx.gammainc(z, b, regularized=regularized) + R = T1 - T2 + if ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10: + return R + if not pole: + T1 = ctx.gammainc(z, 0, b, regularized=regularized) + T2 = ctx.gammainc(z, 0, a, regularized=regularized) + R = T1 - T2 + # May be ok, but should probably at least print a warning + # about possible cancellation + if 1: #ctx.mag(R) - max(ctx.mag(T1), ctx.mag(T2)) > -10: + return R + finally: + ctx.prec -= 15 + raise NotImplementedError + +@defun_wrapped +def expint(ctx, n, z): + if ctx.isint(n) and ctx._is_real_type(z): + try: + return ctx._expint_int(n, z) + except NotImplementedError: + pass + if ctx.isnan(n) or ctx.isnan(z): + return z*n + if z == ctx.inf: + return 1/z + if z == 0: + # integral from 1 to infinity of t^n + if ctx.re(n) <= 1: + # TODO: reasonable sign of infinity + return type(z)(ctx.inf) + else: + return ctx.one/(n-1) + if n == 0: + return ctx.exp(-z)/z + if n == -1: + return ctx.exp(-z)*(z+1)/z**2 + return z**(n-1) * ctx.gammainc(1-n, z) + +@defun_wrapped +def li(ctx, z, offset=False): + if offset: + if z == 2: + return ctx.zero + return ctx.ei(ctx.ln(z)) - ctx.ei(ctx.ln2) + if not z: + return z + if z == 1: + return ctx.ninf + return ctx.ei(ctx.ln(z)) + +@defun +def ei(ctx, z): + try: + return ctx._ei(z) + except NotImplementedError: + return ctx._ei_generic(z) + +@defun_wrapped +def _ei_generic(ctx, z): + # Note: the following is currently untested because mp and fp + # both use special-case ei code + if z == ctx.inf: + return z + if z == ctx.ninf: + return ctx.zero + if ctx.mag(z) > 1: + try: + r = ctx.one/z + v = ctx.exp(z)*ctx.hyper([1,1],[],r, + maxterms=ctx.prec, force_series=True)/z + im = ctx._im(z) + if im > 0: + v += ctx.pi*ctx.j + if im < 0: + v -= ctx.pi*ctx.j + return v + except ctx.NoConvergence: + pass + v = z*ctx.hyp2f2(1,1,2,2,z) + ctx.euler + if ctx._im(z): + v += 0.5*(ctx.log(z) - ctx.log(ctx.one/z)) + else: + v += ctx.log(abs(z)) + return v + +@defun +def e1(ctx, z): + try: + return ctx._e1(z) + except NotImplementedError: + return ctx.expint(1, z) + +@defun +def ci(ctx, z): + try: + return ctx._ci(z) + except NotImplementedError: + return ctx._ci_generic(z) + +@defun_wrapped +def _ci_generic(ctx, z): + if ctx.isinf(z): + if z == ctx.inf: return ctx.zero + if z == ctx.ninf: return ctx.pi*1j + jz = ctx.fmul(ctx.j,z,exact=True) + njz = ctx.fneg(jz,exact=True) + v = 0.5*(ctx.ei(jz) + ctx.ei(njz)) + zreal = ctx._re(z) + zimag = ctx._im(z) + if zreal == 0: + if zimag > 0: v += ctx.pi*0.5j + if zimag < 0: v -= ctx.pi*0.5j + if zreal < 0: + if zimag >= 0: v += ctx.pi*1j + if zimag < 0: v -= ctx.pi*1j + if ctx._is_real_type(z) and zreal > 0: + v = ctx._re(v) + return v + +@defun +def si(ctx, z): + try: + return ctx._si(z) + except NotImplementedError: + return ctx._si_generic(z) + +@defun_wrapped +def _si_generic(ctx, z): + if ctx.isinf(z): + if z == ctx.inf: return 0.5*ctx.pi + if z == ctx.ninf: return -0.5*ctx.pi + # Suffers from cancellation near 0 + if ctx.mag(z) >= -1: + jz = ctx.fmul(ctx.j,z,exact=True) + njz = ctx.fneg(jz,exact=True) + v = (-0.5j)*(ctx.ei(jz) - ctx.ei(njz)) + zreal = ctx._re(z) + if zreal > 0: + v -= 0.5*ctx.pi + if zreal < 0: + v += 0.5*ctx.pi + if ctx._is_real_type(z): + v = ctx._re(v) + return v + else: + return z*ctx.hyp1f2((1,2),(3,2),(3,2),-0.25*z*z) + +@defun_wrapped +def chi(ctx, z): + nz = ctx.fneg(z, exact=True) + v = 0.5*(ctx.ei(z) + ctx.ei(nz)) + zreal = ctx._re(z) + zimag = ctx._im(z) + if zimag > 0: + v += ctx.pi*0.5j + elif zimag < 0: + v -= ctx.pi*0.5j + elif zreal < 0: + v += ctx.pi*1j + return v + +@defun_wrapped +def shi(ctx, z): + # Suffers from cancellation near 0 + if ctx.mag(z) >= -1: + nz = ctx.fneg(z, exact=True) + v = 0.5*(ctx.ei(z) - ctx.ei(nz)) + zimag = ctx._im(z) + if zimag > 0: v -= 0.5j*ctx.pi + if zimag < 0: v += 0.5j*ctx.pi + return v + else: + return z * ctx.hyp1f2((1,2),(3,2),(3,2),0.25*z*z) + +@defun_wrapped +def fresnels(ctx, z): + if z == ctx.inf: + return ctx.mpf(0.5) + if z == ctx.ninf: + return ctx.mpf(-0.5) + return ctx.pi*z**3/6*ctx.hyp1f2((3,4),(3,2),(7,4),-ctx.pi**2*z**4/16) + +@defun_wrapped +def fresnelc(ctx, z): + if z == ctx.inf: + return ctx.mpf(0.5) + if z == ctx.ninf: + return ctx.mpf(-0.5) + return z*ctx.hyp1f2((1,4),(1,2),(5,4),-ctx.pi**2*z**4/16) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/factorials.py b/.venv/lib/python3.11/site-packages/mpmath/functions/factorials.py new file mode 100644 index 0000000000000000000000000000000000000000..9259e40b95bf1c908a7ad98b59bbb33528606b07 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/factorials.py @@ -0,0 +1,187 @@ +from ..libmp.backend import xrange +from .functions import defun, defun_wrapped + +@defun +def gammaprod(ctx, a, b, _infsign=False): + a = [ctx.convert(x) for x in a] + b = [ctx.convert(x) for x in b] + poles_num = [] + poles_den = [] + regular_num = [] + regular_den = [] + for x in a: [regular_num, poles_num][ctx.isnpint(x)].append(x) + for x in b: [regular_den, poles_den][ctx.isnpint(x)].append(x) + # One more pole in numerator or denominator gives 0 or inf + if len(poles_num) < len(poles_den): return ctx.zero + if len(poles_num) > len(poles_den): + # Get correct sign of infinity for x+h, h -> 0 from above + # XXX: hack, this should be done properly + if _infsign: + a = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_num] + b = [x and x*(1+ctx.eps) or x+ctx.eps for x in poles_den] + return ctx.sign(ctx.gammaprod(a+regular_num,b+regular_den)) * ctx.inf + else: + return ctx.inf + # All poles cancel + # lim G(i)/G(j) = (-1)**(i+j) * gamma(1-j) / gamma(1-i) + p = ctx.one + orig = ctx.prec + try: + ctx.prec = orig + 15 + while poles_num: + i = poles_num.pop() + j = poles_den.pop() + p *= (-1)**(i+j) * ctx.gamma(1-j) / ctx.gamma(1-i) + for x in regular_num: p *= ctx.gamma(x) + for x in regular_den: p /= ctx.gamma(x) + finally: + ctx.prec = orig + return +p + +@defun +def beta(ctx, x, y): + x = ctx.convert(x) + y = ctx.convert(y) + if ctx.isinf(y): + x, y = y, x + if ctx.isinf(x): + if x == ctx.inf and not ctx._im(y): + if y == ctx.ninf: + return ctx.nan + if y > 0: + return ctx.zero + if ctx.isint(y): + return ctx.nan + if y < 0: + return ctx.sign(ctx.gamma(y)) * ctx.inf + return ctx.nan + xy = ctx.fadd(x, y, prec=2*ctx.prec) + return ctx.gammaprod([x, y], [xy]) + +@defun +def binomial(ctx, n, k): + n1 = ctx.fadd(n, 1, prec=2*ctx.prec) + k1 = ctx.fadd(k, 1, prec=2*ctx.prec) + nk1 = ctx.fsub(n1, k, prec=2*ctx.prec) + return ctx.gammaprod([n1], [k1, nk1]) + +@defun +def rf(ctx, x, n): + xn = ctx.fadd(x, n, prec=2*ctx.prec) + return ctx.gammaprod([xn], [x]) + +@defun +def ff(ctx, x, n): + x1 = ctx.fadd(x, 1, prec=2*ctx.prec) + xn1 = ctx.fadd(ctx.fsub(x, n, prec=2*ctx.prec), 1, prec=2*ctx.prec) + return ctx.gammaprod([x1], [xn1]) + +@defun_wrapped +def fac2(ctx, x): + if ctx.isinf(x): + if x == ctx.inf: + return x + return ctx.nan + return 2**(x/2)*(ctx.pi/2)**((ctx.cospi(x)-1)/4)*ctx.gamma(x/2+1) + +@defun_wrapped +def barnesg(ctx, z): + if ctx.isinf(z): + if z == ctx.inf: + return z + return ctx.nan + if ctx.isnan(z): + return z + if (not ctx._im(z)) and ctx._re(z) <= 0 and ctx.isint(ctx._re(z)): + return z*0 + # Account for size (would not be needed if computing log(G)) + if abs(z) > 5: + ctx.dps += 2*ctx.log(abs(z),2) + # Reflection formula + if ctx.re(z) < -ctx.dps: + w = 1-z + pi2 = 2*ctx.pi + u = ctx.expjpi(2*w) + v = ctx.j*ctx.pi/12 - ctx.j*ctx.pi*w**2/2 + w*ctx.ln(1-u) - \ + ctx.j*ctx.polylog(2, u)/pi2 + v = ctx.barnesg(2-z)*ctx.exp(v)/pi2**w + if ctx._is_real_type(z): + v = ctx._re(v) + return v + # Estimate terms for asymptotic expansion + # TODO: fixme, obviously + N = ctx.dps // 2 + 5 + G = 1 + while abs(z) < N or ctx.re(z) < 1: + G /= ctx.gamma(z) + z += 1 + z -= 1 + s = ctx.mpf(1)/12 + s -= ctx.log(ctx.glaisher) + s += z*ctx.log(2*ctx.pi)/2 + s += (z**2/2-ctx.mpf(1)/12)*ctx.log(z) + s -= 3*z**2/4 + z2k = z2 = z**2 + for k in xrange(1, N+1): + t = ctx.bernoulli(2*k+2) / (4*k*(k+1)*z2k) + if abs(t) < ctx.eps: + #print k, N # check how many terms were needed + break + z2k *= z2 + s += t + #if k == N: + # print "warning: series for barnesg failed to converge", ctx.dps + return G*ctx.exp(s) + +@defun +def superfac(ctx, z): + return ctx.barnesg(z+2) + +@defun_wrapped +def hyperfac(ctx, z): + # XXX: estimate needed extra bits accurately + if z == ctx.inf: + return z + if abs(z) > 5: + extra = 4*int(ctx.log(abs(z),2)) + else: + extra = 0 + ctx.prec += extra + if not ctx._im(z) and ctx._re(z) < 0 and ctx.isint(ctx._re(z)): + n = int(ctx.re(z)) + h = ctx.hyperfac(-n-1) + if ((n+1)//2) & 1: + h = -h + if ctx._is_complex_type(z): + return h + 0j + return h + zp1 = z+1 + # Wrong branch cut + #v = ctx.gamma(zp1)**z + #ctx.prec -= extra + #return v / ctx.barnesg(zp1) + v = ctx.exp(z*ctx.loggamma(zp1)) + ctx.prec -= extra + return v / ctx.barnesg(zp1) + +''' +@defun +def psi0(ctx, z): + """Shortcut for psi(0,z) (the digamma function)""" + return ctx.psi(0, z) + +@defun +def psi1(ctx, z): + """Shortcut for psi(1,z) (the trigamma function)""" + return ctx.psi(1, z) + +@defun +def psi2(ctx, z): + """Shortcut for psi(2,z) (the tetragamma function)""" + return ctx.psi(2, z) + +@defun +def psi3(ctx, z): + """Shortcut for psi(3,z) (the pentagamma function)""" + return ctx.psi(3, z) +''' diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/functions.py b/.venv/lib/python3.11/site-packages/mpmath/functions/functions.py new file mode 100644 index 0000000000000000000000000000000000000000..4cdf5dd921418db10847ea75b32f8e6dfacdba64 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/functions.py @@ -0,0 +1,645 @@ +from ..libmp.backend import xrange + +class SpecialFunctions(object): + """ + This class implements special functions using high-level code. + + Elementary and some other functions (e.g. gamma function, basecase + hypergeometric series) are assumed to be predefined by the context as + "builtins" or "low-level" functions. + """ + defined_functions = {} + + # The series for the Jacobi theta functions converge for |q| < 1; + # in the current implementation they throw a ValueError for + # abs(q) > THETA_Q_LIM + THETA_Q_LIM = 1 - 10**-7 + + def __init__(self): + cls = self.__class__ + for name in cls.defined_functions: + f, wrap = cls.defined_functions[name] + cls._wrap_specfun(name, f, wrap) + + self.mpq_1 = self._mpq((1,1)) + self.mpq_0 = self._mpq((0,1)) + self.mpq_1_2 = self._mpq((1,2)) + self.mpq_3_2 = self._mpq((3,2)) + self.mpq_1_4 = self._mpq((1,4)) + self.mpq_1_16 = self._mpq((1,16)) + self.mpq_3_16 = self._mpq((3,16)) + self.mpq_5_2 = self._mpq((5,2)) + self.mpq_3_4 = self._mpq((3,4)) + self.mpq_7_4 = self._mpq((7,4)) + self.mpq_5_4 = self._mpq((5,4)) + self.mpq_1_3 = self._mpq((1,3)) + self.mpq_2_3 = self._mpq((2,3)) + self.mpq_4_3 = self._mpq((4,3)) + self.mpq_1_6 = self._mpq((1,6)) + self.mpq_5_6 = self._mpq((5,6)) + self.mpq_5_3 = self._mpq((5,3)) + + self._misc_const_cache = {} + + self._aliases.update({ + 'phase' : 'arg', + 'conjugate' : 'conj', + 'nthroot' : 'root', + 'polygamma' : 'psi', + 'hurwitz' : 'zeta', + #'digamma' : 'psi0', + #'trigamma' : 'psi1', + #'tetragamma' : 'psi2', + #'pentagamma' : 'psi3', + 'fibonacci' : 'fib', + 'factorial' : 'fac', + }) + + self.zetazero_memoized = self.memoize(self.zetazero) + + # Default -- do nothing + @classmethod + def _wrap_specfun(cls, name, f, wrap): + setattr(cls, name, f) + + # Optional fast versions of common functions in common cases. + # If not overridden, default (generic hypergeometric series) + # implementations will be used + def _besselj(ctx, n, z): raise NotImplementedError + def _erf(ctx, z): raise NotImplementedError + def _erfc(ctx, z): raise NotImplementedError + def _gamma_upper_int(ctx, z, a): raise NotImplementedError + def _expint_int(ctx, n, z): raise NotImplementedError + def _zeta(ctx, s): raise NotImplementedError + def _zetasum_fast(ctx, s, a, n, derivatives, reflect): raise NotImplementedError + def _ei(ctx, z): raise NotImplementedError + def _e1(ctx, z): raise NotImplementedError + def _ci(ctx, z): raise NotImplementedError + def _si(ctx, z): raise NotImplementedError + def _altzeta(ctx, s): raise NotImplementedError + +def defun_wrapped(f): + SpecialFunctions.defined_functions[f.__name__] = f, True + return f + +def defun(f): + SpecialFunctions.defined_functions[f.__name__] = f, False + return f + +def defun_static(f): + setattr(SpecialFunctions, f.__name__, f) + return f + +@defun_wrapped +def cot(ctx, z): return ctx.one / ctx.tan(z) + +@defun_wrapped +def sec(ctx, z): return ctx.one / ctx.cos(z) + +@defun_wrapped +def csc(ctx, z): return ctx.one / ctx.sin(z) + +@defun_wrapped +def coth(ctx, z): return ctx.one / ctx.tanh(z) + +@defun_wrapped +def sech(ctx, z): return ctx.one / ctx.cosh(z) + +@defun_wrapped +def csch(ctx, z): return ctx.one / ctx.sinh(z) + +@defun_wrapped +def acot(ctx, z): + if not z: + return ctx.pi * 0.5 + else: + return ctx.atan(ctx.one / z) + +@defun_wrapped +def asec(ctx, z): return ctx.acos(ctx.one / z) + +@defun_wrapped +def acsc(ctx, z): return ctx.asin(ctx.one / z) + +@defun_wrapped +def acoth(ctx, z): + if not z: + return ctx.pi * 0.5j + else: + return ctx.atanh(ctx.one / z) + + +@defun_wrapped +def asech(ctx, z): return ctx.acosh(ctx.one / z) + +@defun_wrapped +def acsch(ctx, z): return ctx.asinh(ctx.one / z) + +@defun +def sign(ctx, x): + x = ctx.convert(x) + if not x or ctx.isnan(x): + return x + if ctx._is_real_type(x): + if x > 0: + return ctx.one + else: + return -ctx.one + return x / abs(x) + +@defun +def agm(ctx, a, b=1): + if b == 1: + return ctx.agm1(a) + a = ctx.convert(a) + b = ctx.convert(b) + return ctx._agm(a, b) + +@defun_wrapped +def sinc(ctx, x): + if ctx.isinf(x): + return 1/x + if not x: + return x+1 + return ctx.sin(x)/x + +@defun_wrapped +def sincpi(ctx, x): + if ctx.isinf(x): + return 1/x + if not x: + return x+1 + return ctx.sinpi(x)/(ctx.pi*x) + +# TODO: tests; improve implementation +@defun_wrapped +def expm1(ctx, x): + if not x: + return ctx.zero + # exp(x) - 1 ~ x + if ctx.mag(x) < -ctx.prec: + return x + 0.5*x**2 + # TODO: accurately eval the smaller of the real/imag parts + return ctx.sum_accurately(lambda: iter([ctx.exp(x),-1]),1) + +@defun_wrapped +def log1p(ctx, x): + if not x: + return ctx.zero + if ctx.mag(x) < -ctx.prec: + return x - 0.5*x**2 + return ctx.log(ctx.fadd(1, x, prec=2*ctx.prec)) + +@defun_wrapped +def powm1(ctx, x, y): + mag = ctx.mag + one = ctx.one + w = x**y - one + M = mag(w) + # Only moderate cancellation + if M > -8: + return w + # Check for the only possible exact cases + if not w: + if (not y) or (x in (1, -1, 1j, -1j) and ctx.isint(y)): + return w + x1 = x - one + magy = mag(y) + lnx = ctx.ln(x) + # Small y: x^y - 1 ~ log(x)*y + O(log(x)^2 * y^2) + if magy + mag(lnx) < -ctx.prec: + return lnx*y + (lnx*y)**2/2 + # TODO: accurately eval the smaller of the real/imag part + return ctx.sum_accurately(lambda: iter([x**y, -1]), 1) + +@defun +def _rootof1(ctx, k, n): + k = int(k) + n = int(n) + k %= n + if not k: + return ctx.one + elif 2*k == n: + return -ctx.one + elif 4*k == n: + return ctx.j + elif 4*k == 3*n: + return -ctx.j + return ctx.expjpi(2*ctx.mpf(k)/n) + +@defun +def root(ctx, x, n, k=0): + n = int(n) + x = ctx.convert(x) + if k: + # Special case: there is an exact real root + if (n & 1 and 2*k == n-1) and (not ctx.im(x)) and (ctx.re(x) < 0): + return -ctx.root(-x, n) + # Multiply by root of unity + prec = ctx.prec + try: + ctx.prec += 10 + v = ctx.root(x, n, 0) * ctx._rootof1(k, n) + finally: + ctx.prec = prec + return +v + return ctx._nthroot(x, n) + +@defun +def unitroots(ctx, n, primitive=False): + gcd = ctx._gcd + prec = ctx.prec + try: + ctx.prec += 10 + if primitive: + v = [ctx._rootof1(k,n) for k in range(n) if gcd(k,n) == 1] + else: + # TODO: this can be done *much* faster + v = [ctx._rootof1(k,n) for k in range(n)] + finally: + ctx.prec = prec + return [+x for x in v] + +@defun +def arg(ctx, x): + x = ctx.convert(x) + re = ctx._re(x) + im = ctx._im(x) + return ctx.atan2(im, re) + +@defun +def fabs(ctx, x): + return abs(ctx.convert(x)) + +@defun +def re(ctx, x): + x = ctx.convert(x) + if hasattr(x, "real"): # py2.5 doesn't have .real/.imag for all numbers + return x.real + return x + +@defun +def im(ctx, x): + x = ctx.convert(x) + if hasattr(x, "imag"): # py2.5 doesn't have .real/.imag for all numbers + return x.imag + return ctx.zero + +@defun +def conj(ctx, x): + x = ctx.convert(x) + try: + return x.conjugate() + except AttributeError: + return x + +@defun +def polar(ctx, z): + return (ctx.fabs(z), ctx.arg(z)) + +@defun_wrapped +def rect(ctx, r, phi): + return r * ctx.mpc(*ctx.cos_sin(phi)) + +@defun +def log(ctx, x, b=None): + if b is None: + return ctx.ln(x) + wp = ctx.prec + 20 + return ctx.ln(x, prec=wp) / ctx.ln(b, prec=wp) + +@defun +def log10(ctx, x): + return ctx.log(x, 10) + +@defun +def fmod(ctx, x, y): + return ctx.convert(x) % ctx.convert(y) + +@defun +def degrees(ctx, x): + return x / ctx.degree + +@defun +def radians(ctx, x): + return x * ctx.degree + +def _lambertw_special(ctx, z, k): + # W(0,0) = 0; all other branches are singular + if not z: + if not k: + return z + return ctx.ninf + z + if z == ctx.inf: + if k == 0: + return z + else: + return z + 2*k*ctx.pi*ctx.j + if z == ctx.ninf: + return (-z) + (2*k+1)*ctx.pi*ctx.j + # Some kind of nan or complex inf/nan? + return ctx.ln(z) + +import math +import cmath + +def _lambertw_approx_hybrid(z, k): + imag_sign = 0 + if hasattr(z, "imag"): + x = float(z.real) + y = z.imag + if y: + imag_sign = (-1) ** (y < 0) + y = float(y) + else: + x = float(z) + y = 0.0 + imag_sign = 0 + # hack to work regardless of whether Python supports -0.0 + if not y: + y = 0.0 + z = complex(x,y) + if k == 0: + if -4.0 < y < 4.0 and -1.0 < x < 2.5: + if imag_sign: + # Taylor series in upper/lower half-plane + if y > 1.00: return (0.876+0.645j) + (0.118-0.174j)*(z-(0.75+2.5j)) + if y > 0.25: return (0.505+0.204j) + (0.375-0.132j)*(z-(0.75+0.5j)) + if y < -1.00: return (0.876-0.645j) + (0.118+0.174j)*(z-(0.75-2.5j)) + if y < -0.25: return (0.505-0.204j) + (0.375+0.132j)*(z-(0.75-0.5j)) + # Taylor series near -1 + if x < -0.5: + if imag_sign >= 0: + return (-0.318+1.34j) + (-0.697-0.593j)*(z+1) + else: + return (-0.318-1.34j) + (-0.697+0.593j)*(z+1) + # return real type + r = -0.367879441171442 + if (not imag_sign) and x > r: + z = x + # Singularity near -1/e + if x < -0.2: + return -1 + 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r) + # Taylor series near 0 + if x < 0.5: return z + # Simple linear approximation + return 0.2 + 0.3*z + if (not imag_sign) and x > 0.0: + L1 = math.log(x); L2 = math.log(L1) + else: + L1 = cmath.log(z); L2 = cmath.log(L1) + elif k == -1: + # return real type + r = -0.367879441171442 + if (not imag_sign) and r < x < 0.0: + z = x + if (imag_sign >= 0) and y < 0.1 and -0.6 < x < -0.2: + return -1 - 2.33164398159712*(z-r)**0.5 - 1.81218788563936*(z-r) + if (not imag_sign) and -0.2 <= x < 0.0: + L1 = math.log(-x) + return L1 - math.log(-L1) + else: + if imag_sign == -1 and (not y) and x < 0.0: + L1 = cmath.log(z) - 3.1415926535897932j + else: + L1 = cmath.log(z) - 6.2831853071795865j + L2 = cmath.log(L1) + return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2) + +def _lambertw_series(ctx, z, k, tol): + """ + Return rough approximation for W_k(z) from an asymptotic series, + sufficiently accurate for the Halley iteration to converge to + the correct value. + """ + magz = ctx.mag(z) + if (-10 < magz < 900) and (-1000 < k < 1000): + # Near the branch point at -1/e + if magz < 1 and abs(z+0.36787944117144) < 0.05: + if k == 0 or (k == -1 and ctx._im(z) >= 0) or \ + (k == 1 and ctx._im(z) < 0): + delta = ctx.sum_accurately(lambda: [z, ctx.exp(-1)]) + cancellation = -ctx.mag(delta) + ctx.prec += cancellation + # Use series given in Corless et al. + p = ctx.sqrt(2*(ctx.e*z+1)) + ctx.prec -= cancellation + u = {0:ctx.mpf(-1), 1:ctx.mpf(1)} + a = {0:ctx.mpf(2), 1:ctx.mpf(-1)} + if k != 0: + p = -p + s = ctx.zero + # The series converges, so we could use it directly, but unless + # *extremely* close, it is better to just use the first few + # terms to get a good approximation for the iteration + for l in xrange(max(2,cancellation)): + if l not in u: + a[l] = ctx.fsum(u[j]*u[l+1-j] for j in xrange(2,l)) + u[l] = (l-1)*(u[l-2]/2+a[l-2]/4)/(l+1)-a[l]/2-u[l-1]/(l+1) + term = u[l] * p**l + s += term + if ctx.mag(term) < -tol: + return s, True + l += 1 + ctx.prec += cancellation//2 + return s, False + if k == 0 or k == -1: + return _lambertw_approx_hybrid(z, k), False + if k == 0: + if magz < -1: + return z*(1-z), False + L1 = ctx.ln(z) + L2 = ctx.ln(L1) + elif k == -1 and (not ctx._im(z)) and (-0.36787944117144 < ctx._re(z) < 0): + L1 = ctx.ln(-z) + return L1 - ctx.ln(-L1), False + else: + # This holds both as z -> 0 and z -> inf. + # Relative error is O(1/log(z)). + L1 = ctx.ln(z) + 2j*ctx.pi*k + L2 = ctx.ln(L1) + return L1 - L2 + L2/L1 + L2*(L2-2)/(2*L1**2), False + +@defun +def lambertw(ctx, z, k=0): + z = ctx.convert(z) + k = int(k) + if not ctx.isnormal(z): + return _lambertw_special(ctx, z, k) + prec = ctx.prec + ctx.prec += 20 + ctx.mag(k or 1) + wp = ctx.prec + tol = wp - 5 + w, done = _lambertw_series(ctx, z, k, tol) + if not done: + # Use Halley iteration to solve w*exp(w) = z + two = ctx.mpf(2) + for i in xrange(100): + ew = ctx.exp(w) + wew = w*ew + wewz = wew-z + wn = w - wewz/(wew+ew-(w+two)*wewz/(two*w+two)) + if ctx.mag(wn-w) <= ctx.mag(wn) - tol: + w = wn + break + else: + w = wn + if i == 100: + ctx.warn("Lambert W iteration failed to converge for z = %s" % z) + ctx.prec = prec + return +w + +@defun_wrapped +def bell(ctx, n, x=1): + x = ctx.convert(x) + if not n: + if ctx.isnan(x): + return x + return type(x)(1) + if ctx.isinf(x) or ctx.isinf(n) or ctx.isnan(x) or ctx.isnan(n): + return x**n + if n == 1: return x + if n == 2: return x*(x+1) + if x == 0: return ctx.sincpi(n) + return _polyexp(ctx, n, x, True) / ctx.exp(x) + +def _polyexp(ctx, n, x, extra=False): + def _terms(): + if extra: + yield ctx.sincpi(n) + t = x + k = 1 + while 1: + yield k**n * t + k += 1 + t = t*x/k + return ctx.sum_accurately(_terms, check_step=4) + +@defun_wrapped +def polyexp(ctx, s, z): + if ctx.isinf(z) or ctx.isinf(s) or ctx.isnan(z) or ctx.isnan(s): + return z**s + if z == 0: return z*s + if s == 0: return ctx.expm1(z) + if s == 1: return ctx.exp(z)*z + if s == 2: return ctx.exp(z)*z*(z+1) + return _polyexp(ctx, s, z) + +@defun_wrapped +def cyclotomic(ctx, n, z): + n = int(n) + if n < 0: + raise ValueError("n cannot be negative") + p = ctx.one + if n == 0: + return p + if n == 1: + return z - p + if n == 2: + return z + p + # Use divisor product representation. Unfortunately, this sometimes + # includes singularities for roots of unity, which we have to cancel out. + # Matching zeros/poles pairwise, we have (1-z^a)/(1-z^b) ~ a/b + O(z-1). + a_prod = 1 + b_prod = 1 + num_zeros = 0 + num_poles = 0 + for d in range(1,n+1): + if not n % d: + w = ctx.moebius(n//d) + # Use powm1 because it is important that we get 0 only + # if it really is exactly 0 + b = -ctx.powm1(z, d) + if b: + p *= b**w + else: + if w == 1: + a_prod *= d + num_zeros += 1 + elif w == -1: + b_prod *= d + num_poles += 1 + #print n, num_zeros, num_poles + if num_zeros: + if num_zeros > num_poles: + p *= 0 + else: + p *= a_prod + p /= b_prod + return p + +@defun +def mangoldt(ctx, n): + r""" + Evaluates the von Mangoldt function `\Lambda(n) = \log p` + if `n = p^k` a power of a prime, and `\Lambda(n) = 0` otherwise. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> [mangoldt(n) for n in range(-2,3)] + [0.0, 0.0, 0.0, 0.0, 0.6931471805599453094172321] + >>> mangoldt(6) + 0.0 + >>> mangoldt(7) + 1.945910149055313305105353 + >>> mangoldt(8) + 0.6931471805599453094172321 + >>> fsum(mangoldt(n) for n in range(101)) + 94.04531122935739224600493 + >>> fsum(mangoldt(n) for n in range(10001)) + 10013.39669326311478372032 + + """ + n = int(n) + if n < 2: + return ctx.zero + if n % 2 == 0: + # Must be a power of two + if n & (n-1) == 0: + return +ctx.ln2 + else: + return ctx.zero + # TODO: the following could be generalized into a perfect + # power testing function + # --- + # Look for a small factor + for p in (3,5,7,11,13,17,19,23,29,31): + if not n % p: + q, r = n // p, 0 + while q > 1: + q, r = divmod(q, p) + if r: + return ctx.zero + return ctx.ln(p) + if ctx.isprime(n): + return ctx.ln(n) + # Obviously, we could use arbitrary-precision arithmetic for this... + if n > 10**30: + raise NotImplementedError + k = 2 + while 1: + p = int(n**(1./k) + 0.5) + if p < 2: + return ctx.zero + if p ** k == n: + if ctx.isprime(p): + return ctx.ln(p) + k += 1 + +@defun +def stirling1(ctx, n, k, exact=False): + v = ctx._stirling1(int(n), int(k)) + if exact: + return int(v) + else: + return ctx.mpf(v) + +@defun +def stirling2(ctx, n, k, exact=False): + v = ctx._stirling2(int(n), int(k)) + if exact: + return int(v) + else: + return ctx.mpf(v) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/hypergeometric.py b/.venv/lib/python3.11/site-packages/mpmath/functions/hypergeometric.py new file mode 100644 index 0000000000000000000000000000000000000000..ddb50cbf3ea6daa5982678d3c26157a67a7d7945 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/hypergeometric.py @@ -0,0 +1,1413 @@ +from ..libmp.backend import xrange +from .functions import defun, defun_wrapped + +def _check_need_perturb(ctx, terms, prec, discard_known_zeros): + perturb = recompute = False + extraprec = 0 + discard = [] + for term_index, term in enumerate(terms): + w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term + have_singular_nongamma_weight = False + # Avoid division by zero in leading factors (TODO: + # also check for near division by zero?) + for k, w in enumerate(w_s): + if not w: + if ctx.re(c_s[k]) <= 0 and c_s[k]: + perturb = recompute = True + have_singular_nongamma_weight = True + pole_count = [0, 0, 0] + # Check for gamma and series poles and near-poles + for data_index, data in enumerate([alpha_s, beta_s, b_s]): + for i, x in enumerate(data): + n, d = ctx.nint_distance(x) + # Poles + if n > 0: + continue + if d == ctx.ninf: + # OK if we have a polynomial + # ------------------------------ + ok = False + if data_index == 2: + for u in a_s: + if ctx.isnpint(u) and u >= int(n): + ok = True + break + if ok: + continue + pole_count[data_index] += 1 + # ------------------------------ + #perturb = recompute = True + #return perturb, recompute, extraprec + elif d < -4: + extraprec += -d + recompute = True + if discard_known_zeros and pole_count[1] > pole_count[0] + pole_count[2] \ + and not have_singular_nongamma_weight: + discard.append(term_index) + elif sum(pole_count): + perturb = recompute = True + return perturb, recompute, extraprec, discard + +_hypercomb_msg = """ +hypercomb() failed to converge to the requested %i bits of accuracy +using a working precision of %i bits. The function value may be zero or +infinite; try passing zeroprec=N or infprec=M to bound finite values between +2^(-N) and 2^M. Otherwise try a higher maxprec or maxterms. +""" + +@defun +def hypercomb(ctx, function, params=[], discard_known_zeros=True, **kwargs): + orig = ctx.prec + sumvalue = ctx.zero + dist = ctx.nint_distance + ninf = ctx.ninf + orig_params = params[:] + verbose = kwargs.get('verbose', False) + maxprec = kwargs.get('maxprec', ctx._default_hyper_maxprec(orig)) + kwargs['maxprec'] = maxprec # For calls to hypsum + zeroprec = kwargs.get('zeroprec') + infprec = kwargs.get('infprec') + perturbed_reference_value = None + hextra = 0 + try: + while 1: + ctx.prec += 10 + if ctx.prec > maxprec: + raise ValueError(_hypercomb_msg % (orig, ctx.prec)) + orig2 = ctx.prec + params = orig_params[:] + terms = function(*params) + if verbose: + print() + print("ENTERING hypercomb main loop") + print("prec =", ctx.prec) + print("hextra", hextra) + perturb, recompute, extraprec, discard = \ + _check_need_perturb(ctx, terms, orig, discard_known_zeros) + ctx.prec += extraprec + if perturb: + if "hmag" in kwargs: + hmag = kwargs["hmag"] + elif ctx._fixed_precision: + hmag = int(ctx.prec*0.3) + else: + hmag = orig + 10 + hextra + h = ctx.ldexp(ctx.one, -hmag) + ctx.prec = orig2 + 10 + hmag + 10 + for k in range(len(params)): + params[k] += h + # Heuristically ensure that the perturbations + # are "independent" so that two perturbations + # don't accidentally cancel each other out + # in a subtraction. + h += h/(k+1) + if recompute: + terms = function(*params) + if discard_known_zeros: + terms = [term for (i, term) in enumerate(terms) if i not in discard] + if not terms: + return ctx.zero + evaluated_terms = [] + for term_index, term_data in enumerate(terms): + w_s, c_s, alpha_s, beta_s, a_s, b_s, z = term_data + if verbose: + print() + print(" Evaluating term %i/%i : %iF%i" % \ + (term_index+1, len(terms), len(a_s), len(b_s))) + print(" powers", ctx.nstr(w_s), ctx.nstr(c_s)) + print(" gamma", ctx.nstr(alpha_s), ctx.nstr(beta_s)) + print(" hyper", ctx.nstr(a_s), ctx.nstr(b_s)) + print(" z", ctx.nstr(z)) + #v = ctx.hyper(a_s, b_s, z, **kwargs) + #for a in alpha_s: v *= ctx.gamma(a) + #for b in beta_s: v *= ctx.rgamma(b) + #for w, c in zip(w_s, c_s): v *= ctx.power(w, c) + v = ctx.fprod([ctx.hyper(a_s, b_s, z, **kwargs)] + \ + [ctx.gamma(a) for a in alpha_s] + \ + [ctx.rgamma(b) for b in beta_s] + \ + [ctx.power(w,c) for (w,c) in zip(w_s,c_s)]) + if verbose: + print(" Value:", v) + evaluated_terms.append(v) + + if len(terms) == 1 and (not perturb): + sumvalue = evaluated_terms[0] + break + + if ctx._fixed_precision: + sumvalue = ctx.fsum(evaluated_terms) + break + + sumvalue = ctx.fsum(evaluated_terms) + term_magnitudes = [ctx.mag(x) for x in evaluated_terms] + max_magnitude = max(term_magnitudes) + sum_magnitude = ctx.mag(sumvalue) + cancellation = max_magnitude - sum_magnitude + if verbose: + print() + print(" Cancellation:", cancellation, "bits") + print(" Increased precision:", ctx.prec - orig, "bits") + + precision_ok = cancellation < ctx.prec - orig + + if zeroprec is None: + zero_ok = False + else: + zero_ok = max_magnitude - ctx.prec < -zeroprec + if infprec is None: + inf_ok = False + else: + inf_ok = max_magnitude > infprec + + if precision_ok and (not perturb) or ctx.isnan(cancellation): + break + elif precision_ok: + if perturbed_reference_value is None: + hextra += 20 + perturbed_reference_value = sumvalue + continue + elif ctx.mag(sumvalue - perturbed_reference_value) <= \ + ctx.mag(sumvalue) - orig: + break + elif zero_ok: + sumvalue = ctx.zero + break + elif inf_ok: + sumvalue = ctx.inf + break + elif 'hmag' in kwargs: + break + else: + hextra *= 2 + perturbed_reference_value = sumvalue + # Increase precision + else: + increment = min(max(cancellation, orig//2), max(extraprec,orig)) + ctx.prec += increment + if verbose: + print(" Must start over with increased precision") + continue + finally: + ctx.prec = orig + return +sumvalue + +@defun +def hyper(ctx, a_s, b_s, z, **kwargs): + """ + Hypergeometric function, general case. + """ + z = ctx.convert(z) + p = len(a_s) + q = len(b_s) + a_s = [ctx._convert_param(a) for a in a_s] + b_s = [ctx._convert_param(b) for b in b_s] + # Reduce degree by eliminating common parameters + if kwargs.get('eliminate', True): + elim_nonpositive = kwargs.get('eliminate_all', False) + i = 0 + while i < q and a_s: + b = b_s[i] + if b in a_s and (elim_nonpositive or not ctx.isnpint(b[0])): + a_s.remove(b) + b_s.remove(b) + p -= 1 + q -= 1 + else: + i += 1 + # Handle special cases + if p == 0: + if q == 1: return ctx._hyp0f1(b_s, z, **kwargs) + elif q == 0: return ctx.exp(z) + elif p == 1: + if q == 1: return ctx._hyp1f1(a_s, b_s, z, **kwargs) + elif q == 2: return ctx._hyp1f2(a_s, b_s, z, **kwargs) + elif q == 0: return ctx._hyp1f0(a_s[0][0], z) + elif p == 2: + if q == 1: return ctx._hyp2f1(a_s, b_s, z, **kwargs) + elif q == 2: return ctx._hyp2f2(a_s, b_s, z, **kwargs) + elif q == 3: return ctx._hyp2f3(a_s, b_s, z, **kwargs) + elif q == 0: return ctx._hyp2f0(a_s, b_s, z, **kwargs) + elif p == q+1: + return ctx._hypq1fq(p, q, a_s, b_s, z, **kwargs) + elif p > q+1 and not kwargs.get('force_series'): + return ctx._hyp_borel(p, q, a_s, b_s, z, **kwargs) + coeffs, types = zip(*(a_s+b_s)) + return ctx.hypsum(p, q, types, coeffs, z, **kwargs) + +@defun +def hyp0f1(ctx,b,z,**kwargs): + return ctx.hyper([],[b],z,**kwargs) + +@defun +def hyp1f1(ctx,a,b,z,**kwargs): + return ctx.hyper([a],[b],z,**kwargs) + +@defun +def hyp1f2(ctx,a1,b1,b2,z,**kwargs): + return ctx.hyper([a1],[b1,b2],z,**kwargs) + +@defun +def hyp2f1(ctx,a,b,c,z,**kwargs): + return ctx.hyper([a,b],[c],z,**kwargs) + +@defun +def hyp2f2(ctx,a1,a2,b1,b2,z,**kwargs): + return ctx.hyper([a1,a2],[b1,b2],z,**kwargs) + +@defun +def hyp2f3(ctx,a1,a2,b1,b2,b3,z,**kwargs): + return ctx.hyper([a1,a2],[b1,b2,b3],z,**kwargs) + +@defun +def hyp2f0(ctx,a,b,z,**kwargs): + return ctx.hyper([a,b],[],z,**kwargs) + +@defun +def hyp3f2(ctx,a1,a2,a3,b1,b2,z,**kwargs): + return ctx.hyper([a1,a2,a3],[b1,b2],z,**kwargs) + +@defun_wrapped +def _hyp1f0(ctx, a, z): + return (1-z) ** (-a) + +@defun +def _hyp0f1(ctx, b_s, z, **kwargs): + (b, btype), = b_s + if z: + magz = ctx.mag(z) + else: + magz = 0 + if magz >= 8 and not kwargs.get('force_series'): + try: + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric0F1/06/02/03/0004/ + # TODO: handle the all-real case more efficiently! + # TODO: figure out how much precision is needed (exponential growth) + orig = ctx.prec + try: + ctx.prec += 12 + magz//2 + def h(): + w = ctx.sqrt(-z) + jw = ctx.j*w + u = 1/(4*jw) + c = ctx.mpq_1_2 - b + E = ctx.exp(2*jw) + T1 = ([-jw,E], [c,-1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], -u) + T2 = ([jw,E], [c,1], [], [], [b-ctx.mpq_1_2, ctx.mpq_3_2-b], [], u) + return T1, T2 + v = ctx.hypercomb(h, [], force_series=True) + v = ctx.gamma(b)/(2*ctx.sqrt(ctx.pi))*v + finally: + ctx.prec = orig + if ctx._is_real_type(b) and ctx._is_real_type(z): + v = ctx._re(v) + return +v + except ctx.NoConvergence: + pass + return ctx.hypsum(0, 1, (btype,), [b], z, **kwargs) + +@defun +def _hyp1f1(ctx, a_s, b_s, z, **kwargs): + (a, atype), = a_s + (b, btype), = b_s + if not z: + return ctx.one+z + magz = ctx.mag(z) + if magz >= 7 and not (ctx.isint(a) and ctx.re(a) <= 0): + if ctx.isinf(z): + if ctx.sign(a) == ctx.sign(b) == ctx.sign(z) == 1: + return ctx.inf + return ctx.nan * z + try: + try: + ctx.prec += magz + sector = ctx._im(z) < 0 + def h(a,b): + if sector: + E = ctx.expjpi(ctx.fneg(a, exact=True)) + else: + E = ctx.expjpi(a) + rz = 1/z + T1 = ([E,z], [1,-a], [b], [b-a], [a, 1+a-b], [], -rz) + T2 = ([ctx.exp(z),z], [1,a-b], [b], [a], [b-a, 1-a], [], rz) + return T1, T2 + v = ctx.hypercomb(h, [a,b], force_series=True) + if ctx._is_real_type(a) and ctx._is_real_type(b) and ctx._is_real_type(z): + v = ctx._re(v) + return +v + except ctx.NoConvergence: + pass + finally: + ctx.prec -= magz + v = ctx.hypsum(1, 1, (atype, btype), [a, b], z, **kwargs) + return v + +def _hyp2f1_gosper(ctx,a,b,c,z,**kwargs): + # Use Gosper's recurrence + # See http://www.math.utexas.edu/pipermail/maxima/2006/000126.html + _a,_b,_c,_z = a, b, c, z + orig = ctx.prec + maxprec = kwargs.get('maxprec', 100*orig) + extra = 10 + while 1: + ctx.prec = orig + extra + #a = ctx.convert(_a) + #b = ctx.convert(_b) + #c = ctx.convert(_c) + z = ctx.convert(_z) + d = ctx.mpf(0) + e = ctx.mpf(1) + f = ctx.mpf(0) + k = 0 + # Common subexpression elimination, unfortunately making + # things a bit unreadable. The formula is quite messy to begin + # with, though... + abz = a*b*z + ch = c * ctx.mpq_1_2 + c1h = (c+1) * ctx.mpq_1_2 + nz = 1-z + g = z/nz + abg = a*b*g + cba = c-b-a + z2 = z-2 + tol = -ctx.prec - 10 + nstr = ctx.nstr + nprint = ctx.nprint + mag = ctx.mag + maxmag = ctx.ninf + while 1: + kch = k+ch + kakbz = (k+a)*(k+b)*z / (4*(k+1)*kch*(k+c1h)) + d1 = kakbz*(e-(k+cba)*d*g) + e1 = kakbz*(d*abg+(k+c)*e) + ft = d*(k*(cba*z+k*z2-c)-abz)/(2*kch*nz) + f1 = f + e - ft + maxmag = max(maxmag, mag(f1)) + if mag(f1-f) < tol: + break + d, e, f = d1, e1, f1 + k += 1 + cancellation = maxmag - mag(f1) + if cancellation < extra: + break + else: + extra += cancellation + if extra > maxprec: + raise ctx.NoConvergence + return f1 + +@defun +def _hyp2f1(ctx, a_s, b_s, z, **kwargs): + (a, atype), (b, btype) = a_s + (c, ctype), = b_s + if z == 1: + # TODO: the following logic can be simplified + convergent = ctx.re(c-a-b) > 0 + finite = (ctx.isint(a) and a <= 0) or (ctx.isint(b) and b <= 0) + zerodiv = ctx.isint(c) and c <= 0 and not \ + ((ctx.isint(a) and c <= a <= 0) or (ctx.isint(b) and c <= b <= 0)) + #print "bz", a, b, c, z, convergent, finite, zerodiv + # Gauss's theorem gives the value if convergent + if (convergent or finite) and not zerodiv: + return ctx.gammaprod([c, c-a-b], [c-a, c-b], _infsign=True) + # Otherwise, there is a pole and we take the + # sign to be that when approaching from below + # XXX: this evaluation is not necessarily correct in all cases + return ctx.hyp2f1(a,b,c,1-ctx.eps*2) * ctx.inf + + # Equal to 1 (first term), unless there is a subsequent + # division by zero + if not z: + # Division by zero but power of z is higher than + # first order so cancels + if c or a == 0 or b == 0: + return 1+z + # Indeterminate + return ctx.nan + + # Hit zero denominator unless numerator goes to 0 first + if ctx.isint(c) and c <= 0: + if (ctx.isint(a) and c <= a <= 0) or \ + (ctx.isint(b) and c <= b <= 0): + pass + else: + # Pole in series + return ctx.inf + + absz = abs(z) + + # Fast case: standard series converges rapidly, + # possibly in finitely many terms + if absz <= 0.8 or (ctx.isint(a) and a <= 0 and a >= -1000) or \ + (ctx.isint(b) and b <= 0 and b >= -1000): + return ctx.hypsum(2, 1, (atype, btype, ctype), [a, b, c], z, **kwargs) + + orig = ctx.prec + try: + ctx.prec += 10 + + # Use 1/z transformation + if absz >= 1.3: + def h(a,b): + t = ctx.mpq_1-c; ab = a-b; rz = 1/z + T1 = ([-z],[-a], [c,-ab],[b,c-a], [a,t+a],[ctx.mpq_1+ab], rz) + T2 = ([-z],[-b], [c,ab],[a,c-b], [b,t+b],[ctx.mpq_1-ab], rz) + return T1, T2 + v = ctx.hypercomb(h, [a,b], **kwargs) + + # Use 1-z transformation + elif abs(1-z) <= 0.75: + def h(a,b): + t = c-a-b; ca = c-a; cb = c-b; rz = 1-z + T1 = [], [], [c,t], [ca,cb], [a,b], [1-t], rz + T2 = [rz], [t], [c,a+b-c], [a,b], [ca,cb], [1+t], rz + return T1, T2 + v = ctx.hypercomb(h, [a,b], **kwargs) + + # Use z/(z-1) transformation + elif abs(z/(z-1)) <= 0.75: + v = ctx.hyp2f1(a, c-b, c, z/(z-1)) / (1-z)**a + + # Remaining part of unit circle + else: + v = _hyp2f1_gosper(ctx,a,b,c,z,**kwargs) + + finally: + ctx.prec = orig + return +v + +@defun +def _hypq1fq(ctx, p, q, a_s, b_s, z, **kwargs): + r""" + Evaluates 3F2, 4F3, 5F4, ... + """ + a_s, a_types = zip(*a_s) + b_s, b_types = zip(*b_s) + a_s = list(a_s) + b_s = list(b_s) + absz = abs(z) + ispoly = False + for a in a_s: + if ctx.isint(a) and a <= 0: + ispoly = True + break + # Direct summation + if absz < 1 or ispoly: + try: + return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs) + except ctx.NoConvergence: + if absz > 1.1 or ispoly: + raise + # Use expansion at |z-1| -> 0. + # Reference: Wolfgang Buhring, "Generalized Hypergeometric Functions at + # Unit Argument", Proc. Amer. Math. Soc., Vol. 114, No. 1 (Jan. 1992), + # pp.145-153 + # The current implementation has several problems: + # 1. We only implement it for 3F2. The expansion coefficients are + # given by extremely messy nested sums in the higher degree cases + # (see reference). Is efficient sequential generation of the coefficients + # possible in the > 3F2 case? + # 2. Although the series converges, it may do so slowly, so we need + # convergence acceleration. The acceleration implemented by + # nsum does not always help, so results returned are sometimes + # inaccurate! Can we do better? + # 3. We should check conditions for convergence, and possibly + # do a better job of cancelling out gamma poles if possible. + if z == 1: + # XXX: should also check for division by zero in the + # denominator of the series (cf. hyp2f1) + S = ctx.re(sum(b_s)-sum(a_s)) + if S <= 0: + #return ctx.hyper(a_s, b_s, 1-ctx.eps*2, **kwargs) * ctx.inf + return ctx.hyper(a_s, b_s, 0.9, **kwargs) * ctx.inf + if (p,q) == (3,2) and abs(z-1) < 0.05: # and kwargs.get('sum1') + #print "Using alternate summation (experimental)" + a1,a2,a3 = a_s + b1,b2 = b_s + u = b1+b2-a3 + initial = ctx.gammaprod([b2-a3,b1-a3,a1,a2],[b2-a3,b1-a3,1,u]) + def term(k, _cache={0:initial}): + u = b1+b2-a3+k + if k in _cache: + t = _cache[k] + else: + t = _cache[k-1] + t *= (b1+k-a3-1)*(b2+k-a3-1) + t /= k*(u-1) + _cache[k] = t + return t * ctx.hyp2f1(a1,a2,u,z) + try: + S = ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'), + strict=kwargs.get('strict', True)) + return S * ctx.gammaprod([b1,b2],[a1,a2,a3]) + except ctx.NoConvergence: + pass + # Try to use convergence acceleration on and close to the unit circle. + # Problem: the convergence acceleration degenerates as |z-1| -> 0, + # except for special cases. Everywhere else, the Shanks transformation + # is very efficient. + if absz < 1.1 and ctx._re(z) <= 1: + + def term(kk, _cache={0:ctx.one}): + k = int(kk) + if k != kk: + t = z ** ctx.mpf(kk) / ctx.fac(kk) + for a in a_s: t *= ctx.rf(a,kk) + for b in b_s: t /= ctx.rf(b,kk) + return t + if k in _cache: + return _cache[k] + t = term(k-1) + m = k-1 + for j in xrange(p): t *= (a_s[j]+m) + for j in xrange(q): t /= (b_s[j]+m) + t *= z + t /= k + _cache[k] = t + return t + + sum_method = kwargs.get('sum_method', 'r+s+e') + + try: + return ctx.nsum(term, [0,ctx.inf], verbose=kwargs.get('verbose'), + strict=kwargs.get('strict', True), + method=sum_method.replace('e','')) + except ctx.NoConvergence: + if 'e' not in sum_method: + raise + pass + + if kwargs.get('verbose'): + print("Attempting Euler-Maclaurin summation") + + + """ + Somewhat slower version (one diffs_exp for each factor). + However, this would be faster with fast direct derivatives + of the gamma function. + + def power_diffs(k0): + r = 0 + l = ctx.log(z) + while 1: + yield z**ctx.mpf(k0) * l**r + r += 1 + + def loggamma_diffs(x, reciprocal=False): + sign = (-1) ** reciprocal + yield sign * ctx.loggamma(x) + i = 0 + while 1: + yield sign * ctx.psi(i,x) + i += 1 + + def hyper_diffs(k0): + b2 = b_s + [1] + A = [ctx.diffs_exp(loggamma_diffs(a+k0)) for a in a_s] + B = [ctx.diffs_exp(loggamma_diffs(b+k0,True)) for b in b2] + Z = [power_diffs(k0)] + C = ctx.gammaprod([b for b in b2], [a for a in a_s]) + for d in ctx.diffs_prod(A + B + Z): + v = C * d + yield v + """ + + def log_diffs(k0): + b2 = b_s + [1] + yield sum(ctx.loggamma(a+k0) for a in a_s) - \ + sum(ctx.loggamma(b+k0) for b in b2) + k0*ctx.log(z) + i = 0 + while 1: + v = sum(ctx.psi(i,a+k0) for a in a_s) - \ + sum(ctx.psi(i,b+k0) for b in b2) + if i == 0: + v += ctx.log(z) + yield v + i += 1 + + def hyper_diffs(k0): + C = ctx.gammaprod([b for b in b_s], [a for a in a_s]) + for d in ctx.diffs_exp(log_diffs(k0)): + v = C * d + yield v + + tol = ctx.eps / 1024 + prec = ctx.prec + try: + trunc = 50 * ctx.dps + ctx.prec += 20 + for i in xrange(5): + head = ctx.fsum(term(k) for k in xrange(trunc)) + tail, err = ctx.sumem(term, [trunc, ctx.inf], tol=tol, + adiffs=hyper_diffs(trunc), + verbose=kwargs.get('verbose'), + error=True, + _fast_abort=True) + if err < tol: + v = head + tail + break + trunc *= 2 + # Need to increase precision because calculation of + # derivatives may be inaccurate + ctx.prec += ctx.prec//2 + if i == 4: + raise ctx.NoConvergence(\ + "Euler-Maclaurin summation did not converge") + finally: + ctx.prec = prec + return +v + + # Use 1/z transformation + # http://functions.wolfram.com/HypergeometricFunctions/ + # HypergeometricPFQ/06/01/05/02/0004/ + def h(*args): + a_s = list(args[:p]) + b_s = list(args[p:]) + Ts = [] + recz = ctx.one/z + negz = ctx.fneg(z, exact=True) + for k in range(q+1): + ak = a_s[k] + C = [negz] + Cp = [-ak] + Gn = b_s + [ak] + [a_s[j]-ak for j in range(q+1) if j != k] + Gd = a_s + [b_s[j]-ak for j in range(q)] + Fn = [ak] + [ak-b_s[j]+1 for j in range(q)] + Fd = [1-a_s[j]+ak for j in range(q+1) if j != k] + Ts.append((C, Cp, Gn, Gd, Fn, Fd, recz)) + return Ts + return ctx.hypercomb(h, a_s+b_s, **kwargs) + +@defun +def _hyp_borel(ctx, p, q, a_s, b_s, z, **kwargs): + if a_s: + a_s, a_types = zip(*a_s) + a_s = list(a_s) + else: + a_s, a_types = [], () + if b_s: + b_s, b_types = zip(*b_s) + b_s = list(b_s) + else: + b_s, b_types = [], () + kwargs['maxterms'] = kwargs.get('maxterms', ctx.prec) + try: + return ctx.hypsum(p, q, a_types+b_types, a_s+b_s, z, **kwargs) + except ctx.NoConvergence: + pass + prec = ctx.prec + try: + tol = kwargs.get('asymp_tol', ctx.eps/4) + ctx.prec += 10 + # hypsum is has a conservative tolerance. So we try again: + def term(k, cache={0:ctx.one}): + if k in cache: + return cache[k] + t = term(k-1) + for a in a_s: t *= (a+(k-1)) + for b in b_s: t /= (b+(k-1)) + t *= z + t /= k + cache[k] = t + return t + s = ctx.one + for k in xrange(1, ctx.prec): + t = term(k) + s += t + if abs(t) <= tol: + return s + finally: + ctx.prec = prec + if p <= q+3: + contour = kwargs.get('contour') + if not contour: + if ctx.arg(z) < 0.25: + u = z / max(1, abs(z)) + if ctx.arg(z) >= 0: + contour = [0, 2j, (2j+2)/u, 2/u, ctx.inf] + else: + contour = [0, -2j, (-2j+2)/u, 2/u, ctx.inf] + #contour = [0, 2j/z, 2/z, ctx.inf] + #contour = [0, 2j, 2/z, ctx.inf] + #contour = [0, 2j, ctx.inf] + else: + contour = [0, ctx.inf] + quad_kwargs = kwargs.get('quad_kwargs', {}) + def g(t): + return ctx.exp(-t)*ctx.hyper(a_s, b_s+[1], t*z) + I, err = ctx.quad(g, contour, error=True, **quad_kwargs) + if err <= abs(I)*ctx.eps*8: + return I + raise ctx.NoConvergence + + +@defun +def _hyp2f2(ctx, a_s, b_s, z, **kwargs): + (a1, a1type), (a2, a2type) = a_s + (b1, b1type), (b2, b2type) = b_s + + absz = abs(z) + magz = ctx.mag(z) + orig = ctx.prec + + # Asymptotic expansion is ~ exp(z) + asymp_extraprec = magz + + # Asymptotic series is in terms of 3F1 + can_use_asymptotic = (not kwargs.get('force_series')) and \ + (ctx.mag(absz) > 3) + + # TODO: much of the following could be shared with 2F3 instead of + # copypasted + if can_use_asymptotic: + #print "using asymp" + try: + try: + ctx.prec += asymp_extraprec + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric2F2/06/02/02/0002/ + def h(a1,a2,b1,b2): + X = a1+a2-b1-b2 + A2 = a1+a2 + B2 = b1+b2 + c = {} + c[0] = ctx.one + c[1] = (A2-1)*X+b1*b2-a1*a2 + s1 = 0 + k = 0 + tprev = 0 + while 1: + if k not in c: + uu1 = 1-B2+2*a1+a1**2+2*a2+a2**2-A2*B2+a1*a2+b1*b2+(2*B2-3*(A2+1))*k+2*k**2 + uu2 = (k-A2+b1-1)*(k-A2+b2-1)*(k-X-2) + c[k] = ctx.one/k * (uu1*c[k-1]-uu2*c[k-2]) + t1 = c[k] * z**(-k) + if abs(t1) < 0.1*ctx.eps: + #print "Convergence :)" + break + # Quit if the series doesn't converge quickly enough + if k > 5 and abs(tprev) / abs(t1) < 1.5: + #print "No convergence :(" + raise ctx.NoConvergence + s1 += t1 + tprev = t1 + k += 1 + S = ctx.exp(z)*s1 + T1 = [z,S], [X,1], [b1,b2],[a1,a2],[],[],0 + T2 = [-z],[-a1],[b1,b2,a2-a1],[a2,b1-a1,b2-a1],[a1,a1-b1+1,a1-b2+1],[a1-a2+1],-1/z + T3 = [-z],[-a2],[b1,b2,a1-a2],[a1,b1-a2,b2-a2],[a2,a2-b1+1,a2-b2+1],[-a1+a2+1],-1/z + return T1, T2, T3 + v = ctx.hypercomb(h, [a1,a2,b1,b2], force_series=True, maxterms=4*ctx.prec) + if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,z]) == 5: + v = ctx.re(v) + return v + except ctx.NoConvergence: + pass + finally: + ctx.prec = orig + + return ctx.hypsum(2, 2, (a1type, a2type, b1type, b2type), [a1, a2, b1, b2], z, **kwargs) + + + +@defun +def _hyp1f2(ctx, a_s, b_s, z, **kwargs): + (a1, a1type), = a_s + (b1, b1type), (b2, b2type) = b_s + + absz = abs(z) + magz = ctx.mag(z) + orig = ctx.prec + + # Asymptotic expansion is ~ exp(sqrt(z)) + asymp_extraprec = z and magz//2 + + # Asymptotic series is in terms of 3F0 + can_use_asymptotic = (not kwargs.get('force_series')) and \ + (ctx.mag(absz) > 19) and \ + (ctx.sqrt(absz) > 1.5*orig) # and \ + # ctx._hyp_check_convergence([a1, a1-b1+1, a1-b2+1], [], + # 1/absz, orig+40+asymp_extraprec) + + # TODO: much of the following could be shared with 2F3 instead of + # copypasted + if can_use_asymptotic: + #print "using asymp" + try: + try: + ctx.prec += asymp_extraprec + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric1F2/06/02/03/ + def h(a1,b1,b2): + X = ctx.mpq_1_2*(a1-b1-b2+ctx.mpq_1_2) + c = {} + c[0] = ctx.one + c[1] = 2*(ctx.mpq_1_4*(3*a1+b1+b2-2)*(a1-b1-b2)+b1*b2-ctx.mpq_3_16) + c[2] = 2*(b1*b2+ctx.mpq_1_4*(a1-b1-b2)*(3*a1+b1+b2-2)-ctx.mpq_3_16)**2+\ + ctx.mpq_1_16*(-16*(2*a1-3)*b1*b2 + \ + 4*(a1-b1-b2)*(-8*a1**2+11*a1+b1+b2-2)-3) + s1 = 0 + s2 = 0 + k = 0 + tprev = 0 + while 1: + if k not in c: + uu1 = (3*k**2+(-6*a1+2*b1+2*b2-4)*k + 3*a1**2 - \ + (b1-b2)**2 - 2*a1*(b1+b2-2) + ctx.mpq_1_4) + uu2 = (k-a1+b1-b2-ctx.mpq_1_2)*(k-a1-b1+b2-ctx.mpq_1_2)*\ + (k-a1+b1+b2-ctx.mpq_5_2) + c[k] = ctx.one/(2*k)*(uu1*c[k-1]-uu2*c[k-2]) + w = c[k] * (-z)**(-0.5*k) + t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w + t2 = ctx.j**k * ctx.mpf(2)**(-k) * w + if abs(t1) < 0.1*ctx.eps: + #print "Convergence :)" + break + # Quit if the series doesn't converge quickly enough + if k > 5 and abs(tprev) / abs(t1) < 1.5: + #print "No convergence :(" + raise ctx.NoConvergence + s1 += t1 + s2 += t2 + tprev = t1 + k += 1 + S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \ + ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2 + T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2], [a1],\ + [], [], 0 + T2 = [-z], [-a1], [b1,b2],[b1-a1,b2-a1], \ + [a1,a1-b1+1,a1-b2+1], [], 1/z + return T1, T2 + v = ctx.hypercomb(h, [a1,b1,b2], force_series=True, maxterms=4*ctx.prec) + if sum(ctx._is_real_type(u) for u in [a1,b1,b2,z]) == 4: + v = ctx.re(v) + return v + except ctx.NoConvergence: + pass + finally: + ctx.prec = orig + + #print "not using asymp" + return ctx.hypsum(1, 2, (a1type, b1type, b2type), [a1, b1, b2], z, **kwargs) + + + +@defun +def _hyp2f3(ctx, a_s, b_s, z, **kwargs): + (a1, a1type), (a2, a2type) = a_s + (b1, b1type), (b2, b2type), (b3, b3type) = b_s + + absz = abs(z) + magz = ctx.mag(z) + + # Asymptotic expansion is ~ exp(sqrt(z)) + asymp_extraprec = z and magz//2 + orig = ctx.prec + + # Asymptotic series is in terms of 4F1 + # The square root below empirically provides a plausible criterion + # for the leading series to converge + can_use_asymptotic = (not kwargs.get('force_series')) and \ + (ctx.mag(absz) > 19) and (ctx.sqrt(absz) > 1.5*orig) + + if can_use_asymptotic: + #print "using asymp" + try: + try: + ctx.prec += asymp_extraprec + # http://functions.wolfram.com/HypergeometricFunctions/ + # Hypergeometric2F3/06/02/03/01/0002/ + def h(a1,a2,b1,b2,b3): + X = ctx.mpq_1_2*(a1+a2-b1-b2-b3+ctx.mpq_1_2) + A2 = a1+a2 + B3 = b1+b2+b3 + A = a1*a2 + B = b1*b2+b3*b2+b1*b3 + R = b1*b2*b3 + c = {} + c[0] = ctx.one + c[1] = 2*(B - A + ctx.mpq_1_4*(3*A2+B3-2)*(A2-B3) - ctx.mpq_3_16) + c[2] = ctx.mpq_1_2*c[1]**2 + ctx.mpq_1_16*(-16*(2*A2-3)*(B-A) + 32*R +\ + 4*(-8*A2**2 + 11*A2 + 8*A + B3 - 2)*(A2-B3)-3) + s1 = 0 + s2 = 0 + k = 0 + tprev = 0 + while 1: + if k not in c: + uu1 = (k-2*X-3)*(k-2*X-2*b1-1)*(k-2*X-2*b2-1)*\ + (k-2*X-2*b3-1) + uu2 = (4*(k-1)**3 - 6*(4*X+B3)*(k-1)**2 + \ + 2*(24*X**2+12*B3*X+4*B+B3-1)*(k-1) - 32*X**3 - \ + 24*B3*X**2 - 4*B - 8*R - 4*(4*B+B3-1)*X + 2*B3-1) + uu3 = (5*(k-1)**2+2*(-10*X+A2-3*B3+3)*(k-1)+2*c[1]) + c[k] = ctx.one/(2*k)*(uu1*c[k-3]-uu2*c[k-2]+uu3*c[k-1]) + w = c[k] * ctx.power(-z, -0.5*k) + t1 = (-ctx.j)**k * ctx.mpf(2)**(-k) * w + t2 = ctx.j**k * ctx.mpf(2)**(-k) * w + if abs(t1) < 0.1*ctx.eps: + break + # Quit if the series doesn't converge quickly enough + if k > 5 and abs(tprev) / abs(t1) < 1.5: + raise ctx.NoConvergence + s1 += t1 + s2 += t2 + tprev = t1 + k += 1 + S = ctx.expj(ctx.pi*X+2*ctx.sqrt(-z))*s1 + \ + ctx.expj(-(ctx.pi*X+2*ctx.sqrt(-z)))*s2 + T1 = [0.5*S, ctx.pi, -z], [1, -0.5, X], [b1, b2, b3], [a1, a2],\ + [], [], 0 + T2 = [-z], [-a1], [b1,b2,b3,a2-a1],[a2,b1-a1,b2-a1,b3-a1], \ + [a1,a1-b1+1,a1-b2+1,a1-b3+1], [a1-a2+1], 1/z + T3 = [-z], [-a2], [b1,b2,b3,a1-a2],[a1,b1-a2,b2-a2,b3-a2], \ + [a2,a2-b1+1,a2-b2+1,a2-b3+1],[-a1+a2+1], 1/z + return T1, T2, T3 + v = ctx.hypercomb(h, [a1,a2,b1,b2,b3], force_series=True, maxterms=4*ctx.prec) + if sum(ctx._is_real_type(u) for u in [a1,a2,b1,b2,b3,z]) == 6: + v = ctx.re(v) + return v + except ctx.NoConvergence: + pass + finally: + ctx.prec = orig + + return ctx.hypsum(2, 3, (a1type, a2type, b1type, b2type, b3type), [a1, a2, b1, b2, b3], z, **kwargs) + +@defun +def _hyp2f0(ctx, a_s, b_s, z, **kwargs): + (a, atype), (b, btype) = a_s + # We want to try aggressively to use the asymptotic expansion, + # and fall back only when absolutely necessary + try: + kwargsb = kwargs.copy() + kwargsb['maxterms'] = kwargsb.get('maxterms', ctx.prec) + return ctx.hypsum(2, 0, (atype,btype), [a,b], z, **kwargsb) + except ctx.NoConvergence: + if kwargs.get('force_series'): + raise + pass + def h(a, b): + w = ctx.sinpi(b) + rz = -1/z + T1 = ([ctx.pi,w,rz],[1,-1,a],[],[a-b+1,b],[a],[b],rz) + T2 = ([-ctx.pi,w,rz],[1,-1,1+a-b],[],[a,2-b],[a-b+1],[2-b],rz) + return T1, T2 + return ctx.hypercomb(h, [a, 1+a-b], **kwargs) + +@defun +def meijerg(ctx, a_s, b_s, z, r=1, series=None, **kwargs): + an, ap = a_s + bm, bq = b_s + n = len(an) + p = n + len(ap) + m = len(bm) + q = m + len(bq) + a = an+ap + b = bm+bq + a = [ctx.convert(_) for _ in a] + b = [ctx.convert(_) for _ in b] + z = ctx.convert(z) + if series is None: + if p < q: series = 1 + if p > q: series = 2 + if p == q: + if m+n == p and abs(z) > 1: + series = 2 + else: + series = 1 + if kwargs.get('verbose'): + print("Meijer G m,n,p,q,series =", m,n,p,q,series) + if series == 1: + def h(*args): + a = args[:p] + b = args[p:] + terms = [] + for k in range(m): + bases = [z] + expts = [b[k]/r] + gn = [b[j]-b[k] for j in range(m) if j != k] + gn += [1-a[j]+b[k] for j in range(n)] + gd = [a[j]-b[k] for j in range(n,p)] + gd += [1-b[j]+b[k] for j in range(m,q)] + hn = [1-a[j]+b[k] for j in range(p)] + hd = [1-b[j]+b[k] for j in range(q) if j != k] + hz = (-ctx.one)**(p-m-n) * z**(ctx.one/r) + terms.append((bases, expts, gn, gd, hn, hd, hz)) + return terms + else: + def h(*args): + a = args[:p] + b = args[p:] + terms = [] + for k in range(n): + bases = [z] + if r == 1: + expts = [a[k]-1] + else: + expts = [(a[k]-1)/ctx.convert(r)] + gn = [a[k]-a[j] for j in range(n) if j != k] + gn += [1-a[k]+b[j] for j in range(m)] + gd = [a[k]-b[j] for j in range(m,q)] + gd += [1-a[k]+a[j] for j in range(n,p)] + hn = [1-a[k]+b[j] for j in range(q)] + hd = [1+a[j]-a[k] for j in range(p) if j != k] + hz = (-ctx.one)**(q-m-n) / z**(ctx.one/r) + terms.append((bases, expts, gn, gd, hn, hd, hz)) + return terms + return ctx.hypercomb(h, a+b, **kwargs) + +@defun_wrapped +def appellf1(ctx,a,b1,b2,c,x,y,**kwargs): + # Assume x smaller + # We will use x for the outer loop + if abs(x) > abs(y): + x, y = y, x + b1, b2 = b2, b1 + def ok(x): + return abs(x) < 0.99 + # Finite cases + if ctx.isnpint(a): + pass + elif ctx.isnpint(b1): + pass + elif ctx.isnpint(b2): + x, y, b1, b2 = y, x, b2, b1 + else: + #print x, y + # Note: ok if |y| > 1, because + # 2F1 implements analytic continuation + if not ok(x): + u1 = (x-y)/(x-1) + if not ok(u1): + raise ValueError("Analytic continuation not implemented") + #print "Using analytic continuation" + return (1-x)**(-b1)*(1-y)**(c-a-b2)*\ + ctx.appellf1(c-a,b1,c-b1-b2,c,u1,y,**kwargs) + return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, {'m+n':[c]}, x,y, **kwargs) + +@defun +def appellf2(ctx,a,b1,b2,c1,c2,x,y,**kwargs): + # TODO: continuation + return ctx.hyper2d({'m+n':[a],'m':[b1],'n':[b2]}, + {'m':[c1],'n':[c2]}, x,y, **kwargs) + +@defun +def appellf3(ctx,a1,a2,b1,b2,c,x,y,**kwargs): + outer_polynomial = ctx.isnpint(a1) or ctx.isnpint(b1) + inner_polynomial = ctx.isnpint(a2) or ctx.isnpint(b2) + if not outer_polynomial: + if inner_polynomial or abs(x) > abs(y): + x, y = y, x + a1,a2,b1,b2 = a2,a1,b2,b1 + return ctx.hyper2d({'m':[a1,b1],'n':[a2,b2]}, {'m+n':[c]},x,y,**kwargs) + +@defun +def appellf4(ctx,a,b,c1,c2,x,y,**kwargs): + # TODO: continuation + return ctx.hyper2d({'m+n':[a,b]}, {'m':[c1],'n':[c2]},x,y,**kwargs) + +@defun +def hyper2d(ctx, a, b, x, y, **kwargs): + r""" + Sums the generalized 2D hypergeometric series + + .. math :: + + \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{P((a),m,n)}{Q((b),m,n)} + \frac{x^m y^n} {m! n!} + + where `(a) = (a_1,\ldots,a_r)`, `(b) = (b_1,\ldots,b_s)` and where + `P` and `Q` are products of rising factorials such as `(a_j)_n` or + `(a_j)_{m+n}`. `P` and `Q` are specified in the form of dicts, with + the `m` and `n` dependence as keys and parameter lists as values. + The supported rising factorials are given in the following table + (note that only a few are supported in `Q`): + + +------------+-------------------+--------+ + | Key | Rising factorial | `Q` | + +============+===================+========+ + | ``'m'`` | `(a_j)_m` | Yes | + +------------+-------------------+--------+ + | ``'n'`` | `(a_j)_n` | Yes | + +------------+-------------------+--------+ + | ``'m+n'`` | `(a_j)_{m+n}` | Yes | + +------------+-------------------+--------+ + | ``'m-n'`` | `(a_j)_{m-n}` | No | + +------------+-------------------+--------+ + | ``'n-m'`` | `(a_j)_{n-m}` | No | + +------------+-------------------+--------+ + | ``'2m+n'`` | `(a_j)_{2m+n}` | No | + +------------+-------------------+--------+ + | ``'2m-n'`` | `(a_j)_{2m-n}` | No | + +------------+-------------------+--------+ + | ``'2n-m'`` | `(a_j)_{2n-m}` | No | + +------------+-------------------+--------+ + + For example, the Appell F1 and F4 functions + + .. math :: + + F_1 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b)_m (c)_n}{(d)_{m+n}} + \frac{x^m y^n}{m! n!} + + F_4 = \sum_{m=0}^{\infty} \sum_{n=0}^{\infty} + \frac{(a)_{m+n} (b)_{m+n}}{(c)_m (d)_{n}} + \frac{x^m y^n}{m! n!} + + can be represented respectively as + + ``hyper2d({'m+n':[a], 'm':[b], 'n':[c]}, {'m+n':[d]}, x, y)`` + + ``hyper2d({'m+n':[a,b]}, {'m':[c], 'n':[d]}, x, y)`` + + More generally, :func:`~mpmath.hyper2d` can evaluate any of the 34 distinct + convergent second-order (generalized Gaussian) hypergeometric + series enumerated by Horn, as well as the Kampe de Feriet + function. + + The series is computed by rewriting it so that the inner + series (i.e. the series containing `n` and `y`) has the form of an + ordinary generalized hypergeometric series and thereby can be + evaluated efficiently using :func:`~mpmath.hyper`. If possible, + manually swapping `x` and `y` and the corresponding parameters + can sometimes give better results. + + **Examples** + + Two separable cases: a product of two geometric series, and a + product of two Gaussian hypergeometric functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> x, y = mpf(0.25), mpf(0.5) + >>> hyper2d({'m':1,'n':1}, {}, x,y) + 2.666666666666666666666667 + >>> 1/(1-x)/(1-y) + 2.666666666666666666666667 + >>> hyper2d({'m':[1,2],'n':[3,4]}, {'m':[5],'n':[6]}, x,y) + 4.164358531238938319669856 + >>> hyp2f1(1,2,5,x)*hyp2f1(3,4,6,y) + 4.164358531238938319669856 + + Some more series that can be done in closed form:: + + >>> hyper2d({'m':1,'n':1},{'m+n':1},x,y) + 2.013417124712514809623881 + >>> (exp(x)*x-exp(y)*y)/(x-y) + 2.013417124712514809623881 + + Six of the 34 Horn functions, G1-G3 and H1-H3:: + + >>> from mpmath import * + >>> mp.dps = 10; mp.pretty = True + >>> x, y = 0.0625, 0.125 + >>> a1,a2,b1,b2,c1,c2,d = 1.1,-1.2,-1.3,-1.4,1.5,-1.6,1.7 + >>> hyper2d({'m+n':a1,'n-m':b1,'m-n':b2},{},x,y) # G1 + 1.139090746 + >>> nsum(lambda m,n: rf(a1,m+n)*rf(b1,n-m)*rf(b2,m-n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 1.139090746 + >>> hyper2d({'m':a1,'n':a2,'n-m':b1,'m-n':b2},{},x,y) # G2 + 0.9503682696 + >>> nsum(lambda m,n: rf(a1,m)*rf(a2,n)*rf(b1,n-m)*rf(b2,m-n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 0.9503682696 + >>> hyper2d({'2n-m':a1,'2m-n':a2},{},x,y) # G3 + 1.029372029 + >>> nsum(lambda m,n: rf(a1,2*n-m)*rf(a2,2*m-n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 1.029372029 + >>> hyper2d({'m-n':a1,'m+n':b1,'n':c1},{'m':d},x,y) # H1 + -1.605331256 + >>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m+n)*rf(c1,n)/rf(d,m)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + -1.605331256 + >>> hyper2d({'m-n':a1,'m':b1,'n':[c1,c2]},{'m':d},x,y) # H2 + -2.35405404 + >>> nsum(lambda m,n: rf(a1,m-n)*rf(b1,m)*rf(c1,n)*rf(c2,n)/rf(d,m)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + -2.35405404 + >>> hyper2d({'2m+n':a1,'n':b1},{'m+n':c1},x,y) # H3 + 0.974479074 + >>> nsum(lambda m,n: rf(a1,2*m+n)*rf(b1,n)/rf(c1,m+n)*\ + ... x**m*y**n/fac(m)/fac(n), [0,inf], [0,inf]) + 0.974479074 + + **References** + + 1. [SrivastavaKarlsson]_ + 2. [Weisstein]_ http://mathworld.wolfram.com/HornFunction.html + 3. [Weisstein]_ http://mathworld.wolfram.com/AppellHypergeometricFunction.html + + """ + x = ctx.convert(x) + y = ctx.convert(y) + def parse(dct, key): + args = dct.pop(key, []) + try: + args = list(args) + except TypeError: + args = [args] + return [ctx.convert(arg) for arg in args] + a_s = dict(a) + b_s = dict(b) + a_m = parse(a, 'm') + a_n = parse(a, 'n') + a_m_add_n = parse(a, 'm+n') + a_m_sub_n = parse(a, 'm-n') + a_n_sub_m = parse(a, 'n-m') + a_2m_add_n = parse(a, '2m+n') + a_2m_sub_n = parse(a, '2m-n') + a_2n_sub_m = parse(a, '2n-m') + b_m = parse(b, 'm') + b_n = parse(b, 'n') + b_m_add_n = parse(b, 'm+n') + if a: raise ValueError("unsupported key: %r" % a.keys()[0]) + if b: raise ValueError("unsupported key: %r" % b.keys()[0]) + s = 0 + outer = ctx.one + m = ctx.mpf(0) + ok_count = 0 + prec = ctx.prec + maxterms = kwargs.get('maxterms', 20*prec) + try: + ctx.prec += 10 + tol = +ctx.eps + while 1: + inner_sign = 1 + outer_sign = 1 + inner_a = list(a_n) + inner_b = list(b_n) + outer_a = [a+m for a in a_m] + outer_b = [b+m for b in b_m] + # (a)_{m+n} = (a)_m (a+m)_n + for a in a_m_add_n: + a = a+m + inner_a.append(a) + outer_a.append(a) + # (b)_{m+n} = (b)_m (b+m)_n + for b in b_m_add_n: + b = b+m + inner_b.append(b) + outer_b.append(b) + # (a)_{n-m} = (a-m)_n / (a-m)_m + for a in a_n_sub_m: + inner_a.append(a-m) + outer_b.append(a-m-1) + # (a)_{m-n} = (-1)^(m+n) (1-a-m)_m / (1-a-m)_n + for a in a_m_sub_n: + inner_sign *= (-1) + outer_sign *= (-1)**(m) + inner_b.append(1-a-m) + outer_a.append(-a-m) + # (a)_{2m+n} = (a)_{2m} (a+2m)_n + for a in a_2m_add_n: + inner_a.append(a+2*m) + outer_a.append((a+2*m)*(1+a+2*m)) + # (a)_{2m-n} = (-1)^(2m+n) (1-a-2m)_{2m} / (1-a-2m)_n + for a in a_2m_sub_n: + inner_sign *= (-1) + inner_b.append(1-a-2*m) + outer_a.append((a+2*m)*(1+a+2*m)) + # (a)_{2n-m} = 4^n ((a-m)/2)_n ((a-m+1)/2)_n / (a-m)_m + for a in a_2n_sub_m: + inner_sign *= 4 + inner_a.append(0.5*(a-m)) + inner_a.append(0.5*(a-m+1)) + outer_b.append(a-m-1) + inner = ctx.hyper(inner_a, inner_b, inner_sign*y, + zeroprec=ctx.prec, **kwargs) + term = outer * inner * outer_sign + if abs(term) < tol: + ok_count += 1 + else: + ok_count = 0 + if ok_count >= 3 or not outer: + break + s += term + for a in outer_a: outer *= a + for b in outer_b: outer /= b + m += 1 + outer = outer * x / m + if m > maxterms: + raise ctx.NoConvergence("maxterms exceeded in hyper2d") + finally: + ctx.prec = prec + return +s + +""" +@defun +def kampe_de_feriet(ctx,a,b,c,d,e,f,x,y,**kwargs): + return ctx.hyper2d({'m+n':a,'m':b,'n':c}, + {'m+n':d,'m':e,'n':f}, x,y, **kwargs) +""" + +@defun +def bihyper(ctx, a_s, b_s, z, **kwargs): + r""" + Evaluates the bilateral hypergeometric series + + .. math :: + + \,_AH_B(a_1, \ldots, a_k; b_1, \ldots, b_B; z) = + \sum_{n=-\infty}^{\infty} + \frac{(a_1)_n \ldots (a_A)_n} + {(b_1)_n \ldots (b_B)_n} \, z^n + + where, for direct convergence, `A = B` and `|z| = 1`, although a + regularized sum exists more generally by considering the + bilateral series as a sum of two ordinary hypergeometric + functions. In order for the series to make sense, none of the + parameters may be integers. + + **Examples** + + The value of `\,_2H_2` at `z = 1` is given by Dougall's formula:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a,b,c,d = 0.5, 1.5, 2.25, 3.25 + >>> bihyper([a,b],[c,d],1) + -14.49118026212345786148847 + >>> gammaprod([c,d,1-a,1-b,c+d-a-b-1],[c-a,d-a,c-b,d-b]) + -14.49118026212345786148847 + + The regularized function `\,_1H_0` can be expressed as the + sum of one `\,_2F_0` function and one `\,_1F_1` function:: + + >>> a = mpf(0.25) + >>> z = mpf(0.75) + >>> bihyper([a], [], z) + (0.2454393389657273841385582 + 0.2454393389657273841385582j) + >>> hyper([a,1],[],z) + (hyper([1],[1-a],-1/z)-1) + (0.2454393389657273841385582 + 0.2454393389657273841385582j) + >>> hyper([a,1],[],z) + hyper([1],[2-a],-1/z)/z/(a-1) + (0.2454393389657273841385582 + 0.2454393389657273841385582j) + + **References** + + 1. [Slater]_ (chapter 6: "Bilateral Series", pp. 180-189) + 2. [Wikipedia]_ http://en.wikipedia.org/wiki/Bilateral_hypergeometric_series + + """ + z = ctx.convert(z) + c_s = a_s + b_s + p = len(a_s) + q = len(b_s) + if (p, q) == (0,0) or (p, q) == (1,1): + return ctx.zero * z + neg = (p-q) % 2 + def h(*c_s): + a_s = list(c_s[:p]) + b_s = list(c_s[p:]) + aa_s = [2-b for b in b_s] + bb_s = [2-a for a in a_s] + rp = [(-1)**neg * z] + [1-b for b in b_s] + [1-a for a in a_s] + rc = [-1] + [1]*len(b_s) + [-1]*len(a_s) + T1 = [], [], [], [], a_s + [1], b_s, z + T2 = rp, rc, [], [], aa_s + [1], bb_s, (-1)**neg / z + return T1, T2 + return ctx.hypercomb(h, c_s, **kwargs) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/orthogonal.py b/.venv/lib/python3.11/site-packages/mpmath/functions/orthogonal.py new file mode 100644 index 0000000000000000000000000000000000000000..aa33d8bd78290f55a970e78dab7a317d5f652dee --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/orthogonal.py @@ -0,0 +1,493 @@ +from .functions import defun, defun_wrapped + +def _hermite_param(ctx, n, z, parabolic_cylinder): + """ + Combined calculation of the Hermite polynomial H_n(z) (and its + generalization to complex n) and the parabolic cylinder + function D. + """ + n, ntyp = ctx._convert_param(n) + z = ctx.convert(z) + q = -ctx.mpq_1_2 + # For re(z) > 0, 2F0 -- http://functions.wolfram.com/ + # HypergeometricFunctions/HermiteHGeneral/06/02/0009/ + # Otherwise, there is a reflection formula + # 2F0 + http://functions.wolfram.com/HypergeometricFunctions/ + # HermiteHGeneral/16/01/01/0006/ + # + # TODO: + # An alternative would be to use + # http://functions.wolfram.com/HypergeometricFunctions/ + # HermiteHGeneral/06/02/0006/ + # + # Also, the 1F1 expansion + # http://functions.wolfram.com/HypergeometricFunctions/ + # HermiteHGeneral/26/01/02/0001/ + # should probably be used for tiny z + if not z: + T1 = [2, ctx.pi], [n, 0.5], [], [q*(n-1)], [], [], 0 + if parabolic_cylinder: + T1[1][0] += q*n + return T1, + can_use_2f0 = ctx.isnpint(-n) or ctx.re(z) > 0 or \ + (ctx.re(z) == 0 and ctx.im(z) > 0) + expprec = ctx.prec*4 + 20 + if parabolic_cylinder: + u = ctx.fmul(ctx.fmul(z,z,prec=expprec), -0.25, exact=True) + w = ctx.fmul(z, ctx.sqrt(0.5,prec=expprec), prec=expprec) + else: + w = z + w2 = ctx.fmul(w, w, prec=expprec) + rw2 = ctx.fdiv(1, w2, prec=expprec) + nrw2 = ctx.fneg(rw2, exact=True) + nw = ctx.fneg(w, exact=True) + if can_use_2f0: + T1 = [2, w], [n, n], [], [], [q*n, q*(n-1)], [], nrw2 + terms = [T1] + else: + T1 = [2, nw], [n, n], [], [], [q*n, q*(n-1)], [], nrw2 + T2 = [2, ctx.pi, nw], [n+2, 0.5, 1], [], [q*n], [q*(n-1)], [1-q], w2 + terms = [T1,T2] + # Multiply by prefactor for D_n + if parabolic_cylinder: + expu = ctx.exp(u) + for i in range(len(terms)): + terms[i][1][0] += q*n + terms[i][0].append(expu) + terms[i][1].append(1) + return tuple(terms) + +@defun +def hermite(ctx, n, z, **kwargs): + return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 0), [], **kwargs) + +@defun +def pcfd(ctx, n, z, **kwargs): + r""" + Gives the parabolic cylinder function in Whittaker's notation + `D_n(z) = U(-n-1/2, z)` (see :func:`~mpmath.pcfu`). + It solves the differential equation + + .. math :: + + y'' + \left(n + \frac{1}{2} - \frac{1}{4} z^2\right) y = 0. + + and can be represented in terms of Hermite polynomials + (see :func:`~mpmath.hermite`) as + + .. math :: + + D_n(z) = 2^{-n/2} e^{-z^2/4} H_n\left(\frac{z}{\sqrt{2}}\right). + + **Plots** + + .. literalinclude :: /plots/pcfd.py + .. image :: /plots/pcfd.png + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> pcfd(0,0); pcfd(1,0); pcfd(2,0); pcfd(3,0) + 1.0 + 0.0 + -1.0 + 0.0 + >>> pcfd(4,0); pcfd(-3,0) + 3.0 + 0.6266570686577501256039413 + >>> pcfd('1/2', 2+3j) + (-5.363331161232920734849056 - 3.858877821790010714163487j) + >>> pcfd(2, -10) + 1.374906442631438038871515e-9 + + Verifying the differential equation:: + + >>> n = mpf(2.5) + >>> y = lambda z: pcfd(n,z) + >>> z = 1.75 + >>> chop(diff(y,z,2) + (n+0.5-0.25*z**2)*y(z)) + 0.0 + + Rational Taylor series expansion when `n` is an integer:: + + >>> taylor(lambda z: pcfd(5,z), 0, 7) + [0.0, 15.0, 0.0, -13.75, 0.0, 3.96875, 0.0, -0.6015625] + + """ + return ctx.hypercomb(lambda: _hermite_param(ctx, n, z, 1), [], **kwargs) + +@defun +def pcfu(ctx, a, z, **kwargs): + r""" + Gives the parabolic cylinder function `U(a,z)`, which may be + defined for `\Re(z) > 0` in terms of the confluent + U-function (see :func:`~mpmath.hyperu`) by + + .. math :: + + U(a,z) = 2^{-\frac{1}{4}-\frac{a}{2}} e^{-\frac{1}{4} z^2} + U\left(\frac{a}{2}+\frac{1}{4}, + \frac{1}{2}, \frac{1}{2}z^2\right) + + or, for arbitrary `z`, + + .. math :: + + e^{-\frac{1}{4}z^2} U(a,z) = + U(a,0) \,_1F_1\left(-\tfrac{a}{2}+\tfrac{1}{4}; + \tfrac{1}{2}; -\tfrac{1}{2}z^2\right) + + U'(a,0) z \,_1F_1\left(-\tfrac{a}{2}+\tfrac{3}{4}; + \tfrac{3}{2}; -\tfrac{1}{2}z^2\right). + + **Examples** + + Connection to other functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> z = mpf(3) + >>> pcfu(0.5,z) + 0.03210358129311151450551963 + >>> sqrt(pi/2)*exp(z**2/4)*erfc(z/sqrt(2)) + 0.03210358129311151450551963 + >>> pcfu(0.5,-z) + 23.75012332835297233711255 + >>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2)) + 23.75012332835297233711255 + >>> pcfu(0.5,-z) + 23.75012332835297233711255 + >>> sqrt(pi/2)*exp(z**2/4)*erfc(-z/sqrt(2)) + 23.75012332835297233711255 + + """ + n, _ = ctx._convert_param(a) + return ctx.pcfd(-n-ctx.mpq_1_2, z) + +@defun +def pcfv(ctx, a, z, **kwargs): + r""" + Gives the parabolic cylinder function `V(a,z)`, which can be + represented in terms of :func:`~mpmath.pcfu` as + + .. math :: + + V(a,z) = \frac{\Gamma(a+\tfrac{1}{2}) (U(a,-z)-\sin(\pi a) U(a,z)}{\pi}. + + **Examples** + + Wronskian relation between `U` and `V`:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a, z = 2, 3 + >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) + 0.7978845608028653558798921 + >>> sqrt(2/pi) + 0.7978845608028653558798921 + >>> a, z = 2.5, 3 + >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) + 0.7978845608028653558798921 + >>> a, z = 0.25, -1 + >>> pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z) + 0.7978845608028653558798921 + >>> a, z = 2+1j, 2+3j + >>> chop(pcfu(a,z)*diff(pcfv,(a,z),(0,1))-diff(pcfu,(a,z),(0,1))*pcfv(a,z)) + 0.7978845608028653558798921 + + """ + n, ntype = ctx._convert_param(a) + z = ctx.convert(z) + q = ctx.mpq_1_2 + r = ctx.mpq_1_4 + if ntype == 'Q' and ctx.isint(n*2): + # Faster for half-integers + def h(): + jz = ctx.fmul(z, -1j, exact=True) + T1terms = _hermite_param(ctx, -n-q, z, 1) + T2terms = _hermite_param(ctx, n-q, jz, 1) + for T in T1terms: + T[0].append(1j) + T[1].append(1) + T[3].append(q-n) + u = ctx.expjpi((q*n-r)) * ctx.sqrt(2/ctx.pi) + for T in T2terms: + T[0].append(u) + T[1].append(1) + return T1terms + T2terms + v = ctx.hypercomb(h, [], **kwargs) + if ctx._is_real_type(n) and ctx._is_real_type(z): + v = ctx._re(v) + return v + else: + def h(n): + w = ctx.square_exp_arg(z, -0.25) + u = ctx.square_exp_arg(z, 0.5) + e = ctx.exp(w) + l = [ctx.pi, q, ctx.exp(w)] + Y1 = l, [-q, n*q+r, 1], [r-q*n], [], [q*n+r], [q], u + Y2 = l + [z], [-q, n*q-r, 1, 1], [1-r-q*n], [], [q*n+1-r], [1+q], u + c, s = ctx.cospi_sinpi(r+q*n) + Y1[0].append(s) + Y2[0].append(c) + for Y in (Y1, Y2): + Y[1].append(1) + Y[3].append(q-n) + return Y1, Y2 + return ctx.hypercomb(h, [n], **kwargs) + + +@defun +def pcfw(ctx, a, z, **kwargs): + r""" + Gives the parabolic cylinder function `W(a,z)` defined in (DLMF 12.14). + + **Examples** + + Value at the origin:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> a = mpf(0.25) + >>> pcfw(a,0) + 0.9722833245718180765617104 + >>> power(2,-0.75)*sqrt(abs(gamma(0.25+0.5j*a)/gamma(0.75+0.5j*a))) + 0.9722833245718180765617104 + >>> diff(pcfw,(a,0),(0,1)) + -0.5142533944210078966003624 + >>> -power(2,-0.25)*sqrt(abs(gamma(0.75+0.5j*a)/gamma(0.25+0.5j*a))) + -0.5142533944210078966003624 + + """ + n, _ = ctx._convert_param(a) + z = ctx.convert(z) + def terms(): + phi2 = ctx.arg(ctx.gamma(0.5 + ctx.j*n)) + phi2 = (ctx.loggamma(0.5+ctx.j*n) - ctx.loggamma(0.5-ctx.j*n))/2j + rho = ctx.pi/8 + 0.5*phi2 + # XXX: cancellation computing k + k = ctx.sqrt(1 + ctx.exp(2*ctx.pi*n)) - ctx.exp(ctx.pi*n) + C = ctx.sqrt(k/2) * ctx.exp(0.25*ctx.pi*n) + yield C * ctx.expj(rho) * ctx.pcfu(ctx.j*n, z*ctx.expjpi(-0.25)) + yield C * ctx.expj(-rho) * ctx.pcfu(-ctx.j*n, z*ctx.expjpi(0.25)) + v = ctx.sum_accurately(terms) + if ctx._is_real_type(n) and ctx._is_real_type(z): + v = ctx._re(v) + return v + +""" +Even/odd PCFs. Useful? + +@defun +def pcfy1(ctx, a, z, **kwargs): + a, _ = ctx._convert_param(n) + z = ctx.convert(z) + def h(): + w = ctx.square_exp_arg(z) + w1 = ctx.fmul(w, -0.25, exact=True) + w2 = ctx.fmul(w, 0.5, exact=True) + e = ctx.exp(w1) + return [e], [1], [], [], [ctx.mpq_1_2*a+ctx.mpq_1_4], [ctx.mpq_1_2], w2 + return ctx.hypercomb(h, [], **kwargs) + +@defun +def pcfy2(ctx, a, z, **kwargs): + a, _ = ctx._convert_param(n) + z = ctx.convert(z) + def h(): + w = ctx.square_exp_arg(z) + w1 = ctx.fmul(w, -0.25, exact=True) + w2 = ctx.fmul(w, 0.5, exact=True) + e = ctx.exp(w1) + return [e, z], [1, 1], [], [], [ctx.mpq_1_2*a+ctx.mpq_3_4], \ + [ctx.mpq_3_2], w2 + return ctx.hypercomb(h, [], **kwargs) +""" + +@defun_wrapped +def gegenbauer(ctx, n, a, z, **kwargs): + # Special cases: a+0.5, a*2 poles + if ctx.isnpint(a): + return 0*(z+n) + if ctx.isnpint(a+0.5): + # TODO: something else is required here + # E.g.: gegenbauer(-2, -0.5, 3) == -12 + if ctx.isnpint(n+1): + raise NotImplementedError("Gegenbauer function with two limits") + def h(a): + a2 = 2*a + T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z) + return [T] + return ctx.hypercomb(h, [a], **kwargs) + def h(n): + a2 = 2*a + T = [], [], [n+a2], [n+1, a2], [-n, n+a2], [a+0.5], 0.5*(1-z) + return [T] + return ctx.hypercomb(h, [n], **kwargs) + +@defun_wrapped +def jacobi(ctx, n, a, b, x, **kwargs): + if not ctx.isnpint(a): + def h(n): + return (([], [], [a+n+1], [n+1, a+1], [-n, a+b+n+1], [a+1], (1-x)*0.5),) + return ctx.hypercomb(h, [n], **kwargs) + if not ctx.isint(b): + def h(n, a): + return (([], [], [-b], [n+1, -b-n], [-n, a+b+n+1], [b+1], (x+1)*0.5),) + return ctx.hypercomb(h, [n, a], **kwargs) + # XXX: determine appropriate limit + return ctx.binomial(n+a,n) * ctx.hyp2f1(-n,1+n+a+b,a+1,(1-x)/2, **kwargs) + +@defun_wrapped +def laguerre(ctx, n, a, z, **kwargs): + # XXX: limits, poles + #if ctx.isnpint(n): + # return 0*(a+z) + def h(a): + return (([], [], [a+n+1], [a+1, n+1], [-n], [a+1], z),) + return ctx.hypercomb(h, [a], **kwargs) + +@defun_wrapped +def legendre(ctx, n, x, **kwargs): + if ctx.isint(n): + n = int(n) + # Accuracy near zeros + if (n + (n < 0)) & 1: + if not x: + return x + mag = ctx.mag(x) + if mag < -2*ctx.prec-10: + return x + if mag < -5: + ctx.prec += -mag + return ctx.hyp2f1(-n,n+1,1,(1-x)/2, **kwargs) + +@defun +def legenp(ctx, n, m, z, type=2, **kwargs): + # Legendre function, 1st kind + n = ctx.convert(n) + m = ctx.convert(m) + # Faster + if not m: + return ctx.legendre(n, z, **kwargs) + # TODO: correct evaluation at singularities + if type == 2: + def h(n,m): + g = m*0.5 + T = [1+z, 1-z], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z) + return (T,) + return ctx.hypercomb(h, [n,m], **kwargs) + if type == 3: + def h(n,m): + g = m*0.5 + T = [z+1, z-1], [g, -g], [], [1-m], [-n, n+1], [1-m], 0.5*(1-z) + return (T,) + return ctx.hypercomb(h, [n,m], **kwargs) + raise ValueError("requires type=2 or type=3") + +@defun +def legenq(ctx, n, m, z, type=2, **kwargs): + # Legendre function, 2nd kind + n = ctx.convert(n) + m = ctx.convert(m) + z = ctx.convert(z) + if z in (1, -1): + #if ctx.isint(m): + # return ctx.nan + #return ctx.inf # unsigned + return ctx.nan + if type == 2: + def h(n, m): + cos, sin = ctx.cospi_sinpi(m) + s = 2 * sin / ctx.pi + c = cos + a = 1+z + b = 1-z + u = m/2 + w = (1-z)/2 + T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \ + [-n, n+1], [1-m], w + T2 = [-s, a, b], [-1, -u, u], [n+m+1], [n-m+1, m+1], \ + [-n, n+1], [m+1], w + return T1, T2 + return ctx.hypercomb(h, [n, m], **kwargs) + if type == 3: + # The following is faster when there only is a single series + # Note: not valid for -1 < z < 0 (?) + if abs(z) > 1: + def h(n, m): + T1 = [ctx.expjpi(m), 2, ctx.pi, z, z-1, z+1], \ + [1, -n-1, 0.5, -n-m-1, 0.5*m, 0.5*m], \ + [n+m+1], [n+1.5], \ + [0.5*(2+n+m), 0.5*(1+n+m)], [n+1.5], z**(-2) + return [T1] + return ctx.hypercomb(h, [n, m], **kwargs) + else: + # not valid for 1 < z < inf ? + def h(n, m): + s = 2 * ctx.sinpi(m) / ctx.pi + c = ctx.expjpi(m) + a = 1+z + b = z-1 + u = m/2 + w = (1-z)/2 + T1 = [s, c, a, b], [-1, 1, u, -u], [], [1-m], \ + [-n, n+1], [1-m], w + T2 = [-s, c, a, b], [-1, 1, -u, u], [n+m+1], [n-m+1, m+1], \ + [-n, n+1], [m+1], w + return T1, T2 + return ctx.hypercomb(h, [n, m], **kwargs) + raise ValueError("requires type=2 or type=3") + +@defun_wrapped +def chebyt(ctx, n, x, **kwargs): + if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1: + return x * 0 + return ctx.hyp2f1(-n,n,(1,2),(1-x)/2, **kwargs) + +@defun_wrapped +def chebyu(ctx, n, x, **kwargs): + if (not x) and ctx.isint(n) and int(ctx._re(n)) % 2 == 1: + return x * 0 + return (n+1) * ctx.hyp2f1(-n, n+2, (3,2), (1-x)/2, **kwargs) + +@defun +def spherharm(ctx, l, m, theta, phi, **kwargs): + l = ctx.convert(l) + m = ctx.convert(m) + theta = ctx.convert(theta) + phi = ctx.convert(phi) + l_isint = ctx.isint(l) + l_natural = l_isint and l >= 0 + m_isint = ctx.isint(m) + if l_isint and l < 0 and m_isint: + return ctx.spherharm(-(l+1), m, theta, phi, **kwargs) + if theta == 0 and m_isint and m < 0: + return ctx.zero * 1j + if l_natural and m_isint: + if abs(m) > l: + return ctx.zero * 1j + # http://functions.wolfram.com/Polynomials/ + # SphericalHarmonicY/26/01/02/0004/ + def h(l,m): + absm = abs(m) + C = [-1, ctx.expj(m*phi), + (2*l+1)*ctx.fac(l+absm)/ctx.pi/ctx.fac(l-absm), + ctx.sin(theta)**2, + ctx.fac(absm), 2] + P = [0.5*m*(ctx.sign(m)+1), 1, 0.5, 0.5*absm, -1, -absm-1] + return ((C, P, [], [], [absm-l, l+absm+1], [absm+1], + ctx.sin(0.5*theta)**2),) + else: + # http://functions.wolfram.com/HypergeometricFunctions/ + # SphericalHarmonicYGeneral/26/01/02/0001/ + def h(l,m): + if ctx.isnpint(l-m+1) or ctx.isnpint(l+m+1) or ctx.isnpint(1-m): + return (([0], [-1], [], [], [], [], 0),) + cos, sin = ctx.cos_sin(0.5*theta) + C = [0.5*ctx.expj(m*phi), (2*l+1)/ctx.pi, + ctx.gamma(l-m+1), ctx.gamma(l+m+1), + cos**2, sin**2] + P = [1, 0.5, 0.5, -0.5, 0.5*m, -0.5*m] + return ((C, P, [], [1-m], [-l,l+1], [1-m], sin**2),) + return ctx.hypercomb(h, [l,m], **kwargs) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/qfunctions.py b/.venv/lib/python3.11/site-packages/mpmath/functions/qfunctions.py new file mode 100644 index 0000000000000000000000000000000000000000..5a20e53a8b6fa0d8fbc9ad098614d2694998f49a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/qfunctions.py @@ -0,0 +1,280 @@ +from .functions import defun, defun_wrapped + +@defun +def qp(ctx, a, q=None, n=None, **kwargs): + r""" + Evaluates the q-Pochhammer symbol (or q-rising factorial) + + .. math :: + + (a; q)_n = \prod_{k=0}^{n-1} (1-a q^k) + + where `n = \infty` is permitted if `|q| < 1`. Called with two arguments, + ``qp(a,q)`` computes `(a;q)_{\infty}`; with a single argument, ``qp(q)`` + computes `(q;q)_{\infty}`. The special case + + .. math :: + + \phi(q) = (q; q)_{\infty} = \prod_{k=1}^{\infty} (1-q^k) = + \sum_{k=-\infty}^{\infty} (-1)^k q^{(3k^2-k)/2} + + is also known as the Euler function, or (up to a factor `q^{-1/24}`) + the Dedekind eta function. + + **Examples** + + If `n` is a positive integer, the function amounts to a finite product:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qp(2,3,5) + -725305.0 + >>> fprod(1-2*3**k for k in range(5)) + -725305.0 + >>> qp(2,3,0) + 1.0 + + Complex arguments are allowed:: + + >>> qp(2-1j, 0.75j) + (0.4628842231660149089976379 + 4.481821753552703090628793j) + + The regular Pochhammer symbol `(a)_n` is obtained in the + following limit as `q \to 1`:: + + >>> a, n = 4, 7 + >>> limit(lambda q: qp(q**a,q,n) / (1-q)**n, 1) + 604800.0 + >>> rf(a,n) + 604800.0 + + The Taylor series of the reciprocal Euler function gives + the partition function `P(n)`, i.e. the number of ways of writing + `n` as a sum of positive integers:: + + >>> taylor(lambda q: 1/qp(q), 0, 10) + [1.0, 1.0, 2.0, 3.0, 5.0, 7.0, 11.0, 15.0, 22.0, 30.0, 42.0] + + Special values include:: + + >>> qp(0) + 1.0 + >>> findroot(diffun(qp), -0.4) # location of maximum + -0.4112484791779547734440257 + >>> qp(_) + 1.228348867038575112586878 + + The q-Pochhammer symbol is related to the Jacobi theta functions. + For example, the following identity holds:: + + >>> q = mpf(0.5) # arbitrary + >>> qp(q) + 0.2887880950866024212788997 + >>> root(3,-2)*root(q,-24)*jtheta(2,pi/6,root(q,6)) + 0.2887880950866024212788997 + + """ + a = ctx.convert(a) + if n is None: + n = ctx.inf + else: + n = ctx.convert(n) + if n < 0: + raise ValueError("n cannot be negative") + if q is None: + q = a + else: + q = ctx.convert(q) + if n == 0: + return ctx.one + 0*(a+q) + infinite = (n == ctx.inf) + same = (a == q) + if infinite: + if abs(q) >= 1: + if same and (q == -1 or q == 1): + return ctx.zero * q + raise ValueError("q-function only defined for |q| < 1") + elif q == 0: + return ctx.one - a + maxterms = kwargs.get('maxterms', 50*ctx.prec) + if infinite and same: + # Euler's pentagonal theorem + def terms(): + t = 1 + yield t + k = 1 + x1 = q + x2 = q**2 + while 1: + yield (-1)**k * x1 + yield (-1)**k * x2 + x1 *= q**(3*k+1) + x2 *= q**(3*k+2) + k += 1 + if k > maxterms: + raise ctx.NoConvergence + return ctx.sum_accurately(terms) + # return ctx.nprod(lambda k: 1-a*q**k, [0,n-1]) + def factors(): + k = 0 + r = ctx.one + while 1: + yield 1 - a*r + r *= q + k += 1 + if k >= n: + return + if k > maxterms: + raise ctx.NoConvergence + return ctx.mul_accurately(factors) + +@defun_wrapped +def qgamma(ctx, z, q, **kwargs): + r""" + Evaluates the q-gamma function + + .. math :: + + \Gamma_q(z) = \frac{(q; q)_{\infty}}{(q^z; q)_{\infty}} (1-q)^{1-z}. + + + **Examples** + + Evaluation for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qgamma(4,0.75) + 4.046875 + >>> qgamma(6,6) + 121226245.0 + >>> qgamma(3+4j, 0.5j) + (0.1663082382255199834630088 + 0.01952474576025952984418217j) + + The q-gamma function satisfies a functional equation similar + to that of the ordinary gamma function:: + + >>> q = mpf(0.25) + >>> z = mpf(2.5) + >>> qgamma(z+1,q) + 1.428277424823760954685912 + >>> (1-q**z)/(1-q)*qgamma(z,q) + 1.428277424823760954685912 + + """ + if abs(q) > 1: + return ctx.qgamma(z,1/q)*q**((z-2)*(z-1)*0.5) + return ctx.qp(q, q, None, **kwargs) / \ + ctx.qp(q**z, q, None, **kwargs) * (1-q)**(1-z) + +@defun_wrapped +def qfac(ctx, z, q, **kwargs): + r""" + Evaluates the q-factorial, + + .. math :: + + [n]_q! = (1+q)(1+q+q^2)\cdots(1+q+\cdots+q^{n-1}) + + or more generally + + .. math :: + + [z]_q! = \frac{(q;q)_z}{(1-q)^z}. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qfac(0,0) + 1.0 + >>> qfac(4,3) + 2080.0 + >>> qfac(5,6) + 121226245.0 + >>> qfac(1+1j, 2+1j) + (0.4370556551322672478613695 + 0.2609739839216039203708921j) + + """ + if ctx.isint(z) and ctx._re(z) > 0: + n = int(ctx._re(z)) + return ctx.qp(q, q, n, **kwargs) / (1-q)**n + return ctx.qgamma(z+1, q, **kwargs) + +@defun +def qhyper(ctx, a_s, b_s, q, z, **kwargs): + r""" + Evaluates the basic hypergeometric series or hypergeometric q-series + + .. math :: + + \,_r\phi_s \left[\begin{matrix} + a_1 & a_2 & \ldots & a_r \\ + b_1 & b_2 & \ldots & b_s + \end{matrix} ; q,z \right] = + \sum_{n=0}^\infty + \frac{(a_1;q)_n, \ldots, (a_r;q)_n} + {(b_1;q)_n, \ldots, (b_s;q)_n} + \left((-1)^n q^{n\choose 2}\right)^{1+s-r} + \frac{z^n}{(q;q)_n} + + where `(a;q)_n` denotes the q-Pochhammer symbol (see :func:`~mpmath.qp`). + + **Examples** + + Evaluation works for real and complex arguments:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> qhyper([0.5], [2.25], 0.25, 4) + -0.1975849091263356009534385 + >>> qhyper([0.5], [2.25], 0.25-0.25j, 4) + (2.806330244925716649839237 + 3.568997623337943121769938j) + >>> qhyper([1+j], [2,3+0.5j], 0.25, 3+4j) + (9.112885171773400017270226 - 1.272756997166375050700388j) + + Comparing with a summation of the defining series, using + :func:`~mpmath.nsum`:: + + >>> b, q, z = 3, 0.25, 0.5 + >>> qhyper([], [b], q, z) + 0.6221136748254495583228324 + >>> nsum(lambda n: z**n / qp(q,q,n)/qp(b,q,n) * q**(n*(n-1)), [0,inf]) + 0.6221136748254495583228324 + + """ + #a_s = [ctx._convert_param(a)[0] for a in a_s] + #b_s = [ctx._convert_param(b)[0] for b in b_s] + #q = ctx._convert_param(q)[0] + a_s = [ctx.convert(a) for a in a_s] + b_s = [ctx.convert(b) for b in b_s] + q = ctx.convert(q) + z = ctx.convert(z) + r = len(a_s) + s = len(b_s) + d = 1+s-r + maxterms = kwargs.get('maxterms', 50*ctx.prec) + def terms(): + t = ctx.one + yield t + qk = 1 + k = 0 + x = 1 + while 1: + for a in a_s: + p = 1 - a*qk + t *= p + for b in b_s: + p = 1 - b*qk + if not p: + raise ValueError + t /= p + t *= z + x *= (-1)**d * qk ** d + qk *= q + t /= (1 - qk) + k += 1 + yield t * x + if k > maxterms: + raise ctx.NoConvergence + return ctx.sum_accurately(terms) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/rszeta.py b/.venv/lib/python3.11/site-packages/mpmath/functions/rszeta.py new file mode 100644 index 0000000000000000000000000000000000000000..19e2c9a251b81bafe8cf77a2b0180636b1078ee4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/rszeta.py @@ -0,0 +1,1403 @@ +""" +--------------------------------------------------------------------- +.. sectionauthor:: Juan Arias de Reyna + +This module implements zeta-related functions using the Riemann-Siegel +expansion: zeta_offline(s,k=0) + +* coef(J, eps): Need in the computation of Rzeta(s,k) + +* Rzeta_simul(s, der=0) computes Rzeta^(k)(s) and Rzeta^(k)(1-s) simultaneously + for 0 <= k <= der. Used by zeta_offline and z_offline + +* Rzeta_set(s, derivatives) computes Rzeta^(k)(s) for given derivatives, used by + z_half(t,k) and zeta_half + +* z_offline(w,k): Z(w) and its derivatives of order k <= 4 +* z_half(t,k): Z(t) (Riemann Siegel function) and its derivatives of order k <= 4 +* zeta_offline(s): zeta(s) and its derivatives of order k<= 4 +* zeta_half(1/2+it,k): zeta(s) and its derivatives of order k<= 4 + +* rs_zeta(s,k=0) Computes zeta^(k)(s) Unifies zeta_half and zeta_offline +* rs_z(w,k=0) Computes Z^(k)(w) Unifies z_offline and z_half +---------------------------------------------------------------------- + +This program uses Riemann-Siegel expansion even to compute +zeta(s) on points s = sigma + i t with sigma arbitrary not +necessarily equal to 1/2. + +It is founded on a new deduction of the formula, with rigorous +and sharp bounds for the terms and rest of this expansion. + +More information on the papers: + + J. Arias de Reyna, High Precision Computation of Riemann's + Zeta Function by the Riemann-Siegel Formula I, II + + We refer to them as I, II. + + In them we shall find detailed explanation of all the + procedure. + +The program uses Riemann-Siegel expansion. +This is useful when t is big, ( say t > 10000 ). +The precision is limited, roughly it can compute zeta(sigma+it) +with an error less than exp(-c t) for some constant c depending +on sigma. The program gives an error when the Riemann-Siegel +formula can not compute to the wanted precision. + +""" + +import math + +class RSCache(object): + def __init__(ctx): + ctx._rs_cache = [0, 10, {}, {}] + +from .functions import defun + +#-------------------------------------------------------------------------------# +# # +# coef(ctx, J, eps, _cache=[0, 10, {} ] ) # +# # +#-------------------------------------------------------------------------------# + +# This function computes the coefficients c[n] defined on (I, equation (47)) +# but see also (II, section 3.14). +# +# Since these coefficients are very difficult to compute we save the values +# in a cache. So if we compute several values of the functions Rzeta(s) for +# near values of s, we do not recompute these coefficients. +# +# c[n] are the Taylor coefficients of the function: +# +# F(z):= (exp(pi*j*(z*z/2+3/8))-j* sqrt(2) cos(pi*z/2))/(2*cos(pi *z)) +# +# + +def _coef(ctx, J, eps): + r""" + Computes the coefficients `c_n` for `0\le n\le 2J` with error less than eps + + **Definition** + + The coefficients c_n are defined by + + .. math :: + + \begin{equation} + F(z)=\frac{e^{\pi i + \bigl(\frac{z^2}{2}+\frac38\bigr)}-i\sqrt{2}\cos\frac{\pi}{2}z}{2\cos\pi + z}=\sum_{n=0}^\infty c_{2n} z^{2n} + \end{equation} + + they are computed applying the relation + + .. math :: + + \begin{multline} + c_{2n}=-\frac{i}{\sqrt{2}}\Bigl(\frac{\pi}{2}\Bigr)^{2n} + \sum_{k=0}^n\frac{(-1)^k}{(2k)!} + 2^{2n-2k}\frac{(-1)^{n-k}E_{2n-2k}}{(2n-2k)!}+\\ + +e^{3\pi i/8}\sum_{j=0}^n(-1)^j\frac{ + E_{2j}}{(2j)!}\frac{i^{n-j}\pi^{n+j}}{(n-j)!2^{n-j+1}}. + \end{multline} + """ + + newJ = J+2 # compute more coefficients that are needed + neweps6 = eps/2. # compute with a slight more precision that are needed + + # PREPARATION FOR THE COMPUTATION OF V(N) AND W(N) + # See II Section 3.16 + # + # Computing the exponent wpvw of the error II equation (81) + wpvw = max(ctx.mag(10*(newJ+3)), 4*newJ+5-ctx.mag(neweps6)) + + # Preparation of Euler numbers (we need until the 2*RS_NEWJ) + E = ctx._eulernum(2*newJ) + + # Now we have in the cache all the needed Euler numbers. + # + # Computing the powers of pi + # + # We need to compute the powers pi**n for 1<= n <= 2*J + # with relative error less than 2**(-wpvw) + # it is easy to show that this is obtained + # taking wppi as the least d with + # 2**d>40*J and 2**d> 4.24 *newJ + 2**wpvw + # In II Section 3.9 we need also that + # wppi > wptcoef[0], and that the powers + # here computed 0<= k <= 2*newJ are more + # than those needed there that are 2*L-2. + # so we need J >= L this will be checked + # before computing tcoef[] + wppi = max(ctx.mag(40*newJ), ctx.mag(newJ)+3 +wpvw) + ctx.prec = wppi + pipower = {} + pipower[0] = ctx.one + pipower[1] = ctx.pi + for n in range(2,2*newJ+1): + pipower[n] = pipower[n-1]*ctx.pi + + # COMPUTING THE COEFFICIENTS v(n) AND w(n) + # see II equation (61) and equations (81) and (82) + ctx.prec = wpvw+2 + v={} + w={} + for n in range(0,newJ+1): + va = (-1)**n * ctx._eulernum(2*n) + va = ctx.mpf(va)/ctx.fac(2*n) + v[n]=va*pipower[2*n] + for n in range(0,2*newJ+1): + wa = ctx.one/ctx.fac(n) + wa=wa/(2**n) + w[n]=wa*pipower[n] + + # COMPUTATION OF THE CONVOLUTIONS RS_P1 AND RS_P2 + # See II Section 3.16 + ctx.prec = 15 + wpp1a = 9 - ctx.mag(neweps6) + P1 = {} + for n in range(0,newJ+1): + ctx.prec = 15 + wpp1 = max(ctx.mag(10*(n+4)),4*n+wpp1a) + ctx.prec = wpp1 + sump = 0 + for k in range(0,n+1): + sump += ((-1)**k) * v[k]*w[2*n-2*k] + P1[n]=((-1)**(n+1))*ctx.j*sump + P2={} + for n in range(0,newJ+1): + ctx.prec = 15 + wpp2 = max(ctx.mag(10*(n+4)),4*n+wpp1a) + ctx.prec = wpp2 + sump = 0 + for k in range(0,n+1): + sump += (ctx.j**(n-k)) * v[k]*w[n-k] + P2[n]=sump + # COMPUTING THE COEFFICIENTS c[2n] + # See II Section 3.14 + ctx.prec = 15 + wpc0 = 5 - ctx.mag(neweps6) + wpc = max(6,4*newJ+wpc0) + ctx.prec = wpc + mu = ctx.sqrt(ctx.mpf('2'))/2 + nu = ctx.expjpi(3./8)/2 + c={} + for n in range(0,newJ): + ctx.prec = 15 + wpc = max(6,4*n+wpc0) + ctx.prec = wpc + c[2*n] = mu*P1[n]+nu*P2[n] + for n in range(1,2*newJ,2): + c[n] = 0 + return [newJ, neweps6, c, pipower] + +def coef(ctx, J, eps): + _cache = ctx._rs_cache + if J <= _cache[0] and eps >= _cache[1]: + return _cache[2], _cache[3] + orig = ctx._mp.prec + try: + data = _coef(ctx._mp, J, eps) + finally: + ctx._mp.prec = orig + if ctx is not ctx._mp: + data[2] = dict((k,ctx.convert(v)) for (k,v) in data[2].items()) + data[3] = dict((k,ctx.convert(v)) for (k,v) in data[3].items()) + ctx._rs_cache[:] = data + return ctx._rs_cache[2], ctx._rs_cache[3] + +#-------------------------------------------------------------------------------# +# # +# Rzeta_simul(s,k=0) # +# # +#-------------------------------------------------------------------------------# +# This function return a list with the values: +# Rzeta(sigma+it), conj(Rzeta(1-sigma+it)),Rzeta'(sigma+it), conj(Rzeta'(1-sigma+it)), +# .... , Rzeta^{(k)}(sigma+it), conj(Rzeta^{(k)}(1-sigma+it)) +# +# Useful to compute the function zeta(s) and Z(w) or its derivatives. +# + +def aux_M_Fp(ctx, xA, xeps4, a, xB1, xL): + # COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE + # See II Section 3.11 equations (47) and (48) + aux1 = 126.0657606*xA/xeps4 # 126.06.. = 316/sqrt(2*pi) + aux1 = ctx.ln(aux1) + aux2 = (2*ctx.ln(ctx.pi)+ctx.ln(xB1)+ctx.ln(a))/3 -ctx.ln(2*ctx.pi)/2 + m = 3*xL-3 + aux3= (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.) + while((aux1 < m*aux2+ aux3)and (m>1)): + m = m - 1 + aux3 = (ctx.loggamma(m+1)-ctx.loggamma(m/3.0+2))/2 -ctx.loggamma((m+1)/2.) + xM = m + return xM + +def aux_J_needed(ctx, xA, xeps4, a, xB1, xM): + # DETERMINATION OF J THE NUMBER OF TERMS NEEDED + # IN THE TAYLOR SERIES OF F. + # See II Section 3.11 equation (49)) + # Only determine one + h1 = xeps4/(632*xA) + h2 = xB1*a * 126.31337419529260248 # = pi^2*e^2*sqrt(3) + h2 = h1 * ctx.power((h2/xM**2),(xM-1)/3) / xM + h3 = min(h1,h2) + return h3 + +def Rzeta_simul(ctx, s, der=0): + # First we take the value of ctx.prec + wpinitial = ctx.prec + + # INITIALIZATION + # Take the real and imaginary part of s + t = ctx._im(s) + xsigma = ctx._re(s) + ysigma = 1 - xsigma + + # Now compute several parameter that appear on the program + ctx.prec = 15 + a = ctx.sqrt(t/(2*ctx.pi)) + xasigma = a ** xsigma + yasigma = a ** ysigma + + # We need a simple bound A1 < asigma (see II Section 3.1 and 3.3) + xA1=ctx.power(2, ctx.mag(xasigma)-1) + yA1=ctx.power(2, ctx.mag(yasigma)-1) + + # We compute various epsilon's (see II end of Section 3.1) + eps = ctx.power(2, -wpinitial) + eps1 = eps/6. + xeps2 = eps * xA1/3. + yeps2 = eps * yA1/3. + + # COMPUTING SOME COEFFICIENTS THAT DEPENDS + # ON sigma + # constant b and c (see I Theorem 2 formula (26) ) + # coefficients A and B1 (see I Section 6.1 equation (50)) + # + # here we not need high precision + ctx.prec = 15 + if xsigma > 0: + xb = 2. + xc = math.pow(9,xsigma)/4.44288 + # 4.44288 =(math.sqrt(2)*math.pi) + xA = math.pow(9,xsigma) + xB1 = 1 + else: + xb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) + xc = math.pow(2,-xsigma)/4.44288 + xA = math.pow(2,-xsigma) + xB1 = 1.10789 # = 2*sqrt(1-log(2)) + + if(ysigma > 0): + yb = 2. + yc = math.pow(9,ysigma)/4.44288 + # 4.44288 =(math.sqrt(2)*math.pi) + yA = math.pow(9,ysigma) + yB1 = 1 + else: + yb = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) + yc = math.pow(2,-ysigma)/4.44288 + yA = math.pow(2,-ysigma) + yB1 = 1.10789 # = 2*sqrt(1-log(2)) + + # COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL + # CORRECTION + # See II Section 3.2 + ctx.prec = 15 + xL = 1 + while 3*xc*ctx.gamma(xL*0.5) * ctx.power(xb*a,-xL) >= xeps2: + xL = xL+1 + xL = max(2,xL) + yL = 1 + while 3*yc*ctx.gamma(yL*0.5) * ctx.power(yb*a,-yL) >= yeps2: + yL = yL+1 + yL = max(2,yL) + + # The number L has to satify some conditions. + # If not RS can not compute Rzeta(s) with the prescribed precision + # (see II, Section 3.2 condition (20) ) and + # (II, Section 3.3 condition (22) ). Also we have added + # an additional technical condition in Section 3.17 Proposition 17 + if ((3*xL >= 2*a*a/25.) or (3*xL+2+xsigma<0) or (abs(xsigma) > a/2.) or \ + (3*yL >= 2*a*a/25.) or (3*yL+2+ysigma<0) or (abs(ysigma) > a/2.)): + ctx.prec = wpinitial + raise NotImplementedError("Riemann-Siegel can not compute with such precision") + + # We take the maximum of the two values + L = max(xL, yL) + + # INITIALIZATION (CONTINUATION) + # + # eps3 is the constant defined on (II, Section 3.5 equation (27) ) + # each term of the RS correction must be computed with error <= eps3 + xeps3 = xeps2/(4*xL) + yeps3 = yeps2/(4*yL) + + # eps4 is defined on (II Section 3.6 equation (30) ) + # each component of the formula (II Section 3.6 equation (29) ) + # must be computed with error <= eps4 + xeps4 = xeps3/(3*xL) + yeps4 = yeps3/(3*yL) + + # COMPUTING M NUMBER OF DERIVATIVES Fp[m] TO COMPUTE + xM = aux_M_Fp(ctx, xA, xeps4, a, xB1, xL) + yM = aux_M_Fp(ctx, yA, yeps4, a, yB1, yL) + M = max(xM, yM) + + # COMPUTING NUMBER OF TERMS J NEEDED + h3 = aux_J_needed(ctx, xA, xeps4, a, xB1, xM) + h4 = aux_J_needed(ctx, yA, yeps4, a, yB1, yM) + h3 = min(h3,h4) + J = 12 + jvalue = (2*ctx.pi)**J / ctx.gamma(J+1) + while jvalue > h3: + J = J+1 + jvalue = (2*ctx.pi)*jvalue/J + + # COMPUTING eps5[m] for 1 <= m <= 21 + # See II Section 10 equation (43) + # We choose the minimum of the two possibilities + eps5={} + xforeps5 = math.pi*math.pi*xB1*a + yforeps5 = math.pi*math.pi*yB1*a + for m in range(0,22): + xaux1 = math.pow(xforeps5, m/3)/(316.*xA) + yaux1 = math.pow(yforeps5, m/3)/(316.*yA) + aux1 = min(xaux1, yaux1) + aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5) + aux2 = math.sqrt(aux2) + eps5[m] = (aux1*aux2*min(xeps4,yeps4)) + + # COMPUTING wpfp + # See II Section 3.13 equation (59) + twenty = min(3*L-3, 21)+1 + aux = 6812*J + wpfp = ctx.mag(44*J) + for m in range(0,twenty): + wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m])) + + # COMPUTING N AND p + # See II Section + ctx.prec = wpfp + ctx.mag(t)+20 + a = ctx.sqrt(t/(2*ctx.pi)) + N = ctx.floor(a) + p = 1-2*(a-N) + + # now we get a rounded version of p + # to the precision wpfp + # this possibly is not necessary + num=ctx.floor(p*(ctx.mpf('2')**wpfp)) + difference = p * (ctx.mpf('2')**wpfp)-num + if (difference < 0.5): + num = num + else: + num = num+1 + p = ctx.convert(num * (ctx.mpf('2')**(-wpfp))) + + # COMPUTING THE COEFFICIENTS c[n] = cc[n] + # We shall use the notation cc[n], since there is + # a constant that is called c + # See II Section 3.14 + # We compute the coefficients and also save then in a + # cache. The bulk of the computation is passed to + # the function coef() + # + # eps6 is defined in II Section 3.13 equation (58) + eps6 = ctx.power(ctx.convert(2*ctx.pi), J)/(ctx.gamma(J+1)*3*J) + + # Now we compute the coefficients + cc = {} + cont = {} + cont, pipowers = coef(ctx, J, eps6) + cc=cont.copy() # we need a copy since we have to change his values. + Fp={} # this is the adequate locus of this + for n in range(M, 3*L-2): + Fp[n] = 0 + Fp={} + ctx.prec = wpfp + for m in range(0,M+1): + sumP = 0 + for k in range(2*J-m-1,-1,-1): + sumP = (sumP * p)+ cc[k] + Fp[m] = sumP + # preparation of the new coefficients + for k in range(0,2*J-m-1): + cc[k] = (k+1)* cc[k+1] + + # COMPUTING THE NUMBERS xd[u,n,k], yd[u,n,k] + # See II Section 3.17 + # + # First we compute the working precisions xwpd[k] + # Se II equation (92) + xwpd={} + d1 = max(6,ctx.mag(40*L*L)) + xd2 = 13+ctx.mag((1+abs(xsigma))*xA)-ctx.mag(xeps4)-1 + xconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*xB1*xB1)) /2 + for n in range(0,L): + xd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*xconst)+xd2 + xwpd[n]=max(xd3,d1) + + # procedure of II Section 3.17 + ctx.prec = xwpd[1]+10 + xpsigma = 1-(2*xsigma) + xd = {} + xd[0,0,-2]=0; xd[0,0,-1]=0; xd[0,0,0]=1; xd[0,0,1]=0 + xd[0,-1,-2]=0; xd[0,-1,-1]=0; xd[0,-1,0]=1; xd[0,-1,1]=0 + for n in range(1,L): + ctx.prec = xwpd[n]+10 + for k in range(0,3*n//2+1): + m = 3*n-2*k + if(m!=0): + m1 = ctx.one/m + c1= m1/4 + c2=(xpsigma*m1)/2 + c3=-(m+1) + xd[0,n,k]=c3*xd[0,n-1,k-2]+c1*xd[0,n-1,k]+c2*xd[0,n-1,k-1] + else: + xd[0,n,k]=0 + for r in range(0,k): + add=xd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r)) + xd[0,n,k] -= ((-1)**(k-r))*add + xd[0,n,-2]=0; xd[0,n,-1]=0; xd[0,n,3*n//2+1]=0 + for mu in range(-2,der+1): + for n in range(-2,L): + for k in range(-3,max(1,3*n//2+2)): + if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)): + xd[mu,n,k] = 0 + for mu in range(1,der+1): + for n in range(0,L): + ctx.prec = xwpd[n]+10 + for k in range(0,3*n//2+1): + aux=(2*mu-2)*xd[mu-2,n-2,k-3]+2*(xsigma+n-2)*xd[mu-1,n-2,k-3] + xd[mu,n,k] = aux - xd[mu-1,n-1,k-1] + + # Now we compute the working precisions ywpd[k] + # Se II equation (92) + ywpd={} + d1 = max(6,ctx.mag(40*L*L)) + yd2 = 13+ctx.mag((1+abs(ysigma))*yA)-ctx.mag(yeps4)-1 + yconst = ctx.ln(8/(ctx.pi*ctx.pi*a*a*yB1*yB1)) /2 + for n in range(0,L): + yd3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*yconst)+yd2 + ywpd[n]=max(yd3,d1) + + # procedure of II Section 3.17 + ctx.prec = ywpd[1]+10 + ypsigma = 1-(2*ysigma) + yd = {} + yd[0,0,-2]=0; yd[0,0,-1]=0; yd[0,0,0]=1; yd[0,0,1]=0 + yd[0,-1,-2]=0; yd[0,-1,-1]=0; yd[0,-1,0]=1; yd[0,-1,1]=0 + for n in range(1,L): + ctx.prec = ywpd[n]+10 + for k in range(0,3*n//2+1): + m = 3*n-2*k + if(m!=0): + m1 = ctx.one/m + c1= m1/4 + c2=(ypsigma*m1)/2 + c3=-(m+1) + yd[0,n,k]=c3*yd[0,n-1,k-2]+c1*yd[0,n-1,k]+c2*yd[0,n-1,k-1] + else: + yd[0,n,k]=0 + for r in range(0,k): + add=yd[0,n,r]*(ctx.mpf('1.0')*ctx.fac(2*k-2*r)/ctx.fac(k-r)) + yd[0,n,k] -= ((-1)**(k-r))*add + yd[0,n,-2]=0; yd[0,n,-1]=0; yd[0,n,3*n//2+1]=0 + + for mu in range(-2,der+1): + for n in range(-2,L): + for k in range(-3,max(1,3*n//2+2)): + if( (mu<0)or (n<0) or(k<0)or (k>3*n//2)): + yd[mu,n,k] = 0 + for mu in range(1,der+1): + for n in range(0,L): + ctx.prec = ywpd[n]+10 + for k in range(0,3*n//2+1): + aux=(2*mu-2)*yd[mu-2,n-2,k-3]+2*(ysigma+n-2)*yd[mu-1,n-2,k-3] + yd[mu,n,k] = aux - yd[mu-1,n-1,k-1] + + # COMPUTING THE COEFFICIENTS xtcoef[k,l] + # See II Section 3.9 + # + # computing the needed wp + xwptcoef={} + xwpterm={} + ctx.prec = 15 + c1 = ctx.mag(40*(L+2)) + xc2 = ctx.mag(68*(L+2)*xA) + xc4 = ctx.mag(xB1*a*math.sqrt(ctx.pi))-1 + for k in range(0,L): + xc3 = xc2 - k*xc4+ctx.mag(ctx.fac(k+0.5))/2. + xwptcoef[k] = (max(c1,xc3-ctx.mag(xeps4)+1)+1 +20)*1.5 + xwpterm[k] = (max(c1,ctx.mag(L+2)+xc3-ctx.mag(xeps3)+1)+1 +20) + ywptcoef={} + ywpterm={} + ctx.prec = 15 + c1 = ctx.mag(40*(L+2)) + yc2 = ctx.mag(68*(L+2)*yA) + yc4 = ctx.mag(yB1*a*math.sqrt(ctx.pi))-1 + for k in range(0,L): + yc3 = yc2 - k*yc4+ctx.mag(ctx.fac(k+0.5))/2. + ywptcoef[k] = ((max(c1,yc3-ctx.mag(yeps4)+1))+10)*1.5 + ywpterm[k] = (max(c1,ctx.mag(L+2)+yc3-ctx.mag(yeps3)+1)+1)+10 + + # check of power of pi + # computing the fortcoef[mu,k,ell] + xfortcoef={} + for mu in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + xfortcoef[mu,k,ell]=0 + for mu in range(0,der+1): + for k in range(0,L): + ctx.prec = xwptcoef[k] + for ell in range(0,3*k//2+1): + xfortcoef[mu,k,ell]=xd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] + xfortcoef[mu,k,ell]=xfortcoef[mu,k,ell]/((2*ctx.j)**ell) + + def trunc_a(t): + wp = ctx.prec + ctx.prec = wp + 2 + aa = ctx.sqrt(t/(2*ctx.pi)) + ctx.prec = wp + return aa + + # computing the tcoef[k,ell] + xtcoef={} + for mu in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + xtcoef[mu,k,ell]=0 + ctx.prec = max(xwptcoef[0],ywptcoef[0])+3 + aa= trunc_a(t) + la = -ctx.ln(aa) + + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = xwptcoef[k] + for ell in range(0,3*k//2+1): + xtcoef[chi,k,ell] =0 + for mu in range(0, chi+1): + tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*xfortcoef[chi-mu,k,ell] + xtcoef[chi,k,ell] += tcoefter + + # COMPUTING THE COEFFICIENTS ytcoef[k,l] + # See II Section 3.9 + # + # computing the needed wp + # check of power of pi + # computing the fortcoef[mu,k,ell] + yfortcoef={} + for mu in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + yfortcoef[mu,k,ell]=0 + for mu in range(0,der+1): + for k in range(0,L): + ctx.prec = ywptcoef[k] + for ell in range(0,3*k//2+1): + yfortcoef[mu,k,ell]=yd[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] + yfortcoef[mu,k,ell]=yfortcoef[mu,k,ell]/((2*ctx.j)**ell) + # computing the tcoef[k,ell] + ytcoef={} + for chi in range(0,der+1): + for k in range(0,L): + for ell in range(-2,3*k//2+1): + ytcoef[chi,k,ell]=0 + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = ywptcoef[k] + for ell in range(0,3*k//2+1): + ytcoef[chi,k,ell] =0 + for mu in range(0, chi+1): + tcoefter=ctx.binomial(chi,mu)*ctx.power(la,mu)*yfortcoef[chi-mu,k,ell] + ytcoef[chi,k,ell] += tcoefter + + # COMPUTING tv[k,ell] + # See II Section 3.8 + # + # a has a good value + ctx.prec = max(xwptcoef[0], ywptcoef[0])+2 + av = {} + av[0] = 1 + av[1] = av[0]/a + + ctx.prec = max(xwptcoef[0],ywptcoef[0]) + for k in range(2,L): + av[k] = av[k-1] * av[1] + + # Computing the quotients + xtv = {} + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = xwptcoef[k] + for ell in range(0,3*k//2+1): + xtv[chi,k,ell] = xtcoef[chi,k,ell]* av[k] + # Computing the quotients + ytv = {} + for chi in range(0,der+1): + for k in range(0,L): + ctx.prec = ywptcoef[k] + for ell in range(0,3*k//2+1): + ytv[chi,k,ell] = ytcoef[chi,k,ell]* av[k] + + # COMPUTING THE TERMS xterm[k] + # See II Section 3.6 + xterm = {} + for chi in range(0,der+1): + for n in range(0,L): + ctx.prec = xwpterm[n] + te = 0 + for k in range(0, 3*n//2+1): + te += xtv[chi,n,k] + xterm[chi,n] = te + + # COMPUTING THE TERMS yterm[k] + # See II Section 3.6 + yterm = {} + for chi in range(0,der+1): + for n in range(0,L): + ctx.prec = ywpterm[n] + te = 0 + for k in range(0, 3*n//2+1): + te += ytv[chi,n,k] + yterm[chi,n] = te + + # COMPUTING rssum + # See II Section 3.5 + xrssum={} + ctx.prec=15 + xrsbound = math.sqrt(ctx.pi) * xc /(xb*a) + ctx.prec=15 + xwprssum = ctx.mag(4.4*((L+3)**2)*xrsbound / xeps2) + xwprssum = max(xwprssum, ctx.mag(10*(L+1))) + ctx.prec = xwprssum + for chi in range(0,der+1): + xrssum[chi] = 0 + for k in range(1,L+1): + xrssum[chi] += xterm[chi,L-k] + yrssum={} + ctx.prec=15 + yrsbound = math.sqrt(ctx.pi) * yc /(yb*a) + ctx.prec=15 + ywprssum = ctx.mag(4.4*((L+3)**2)*yrsbound / yeps2) + ywprssum = max(ywprssum, ctx.mag(10*(L+1))) + ctx.prec = ywprssum + for chi in range(0,der+1): + yrssum[chi] = 0 + for k in range(1,L+1): + yrssum[chi] += yterm[chi,L-k] + + # COMPUTING S3 + # See II Section 3.19 + ctx.prec = 15 + A2 = 2**(max(ctx.mag(abs(xrssum[0])), ctx.mag(abs(yrssum[0])))) + eps8 = eps/(3*A2) + T = t *ctx.ln(t/(2*ctx.pi)) + xwps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-xsigma))*T) + ywps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-ysigma))*T) + + ctx.prec = max(xwps3, ywps3) + + tpi = t/(2*ctx.pi) + arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8 + U = ctx.expj(-arg) + a = trunc_a(t) + xasigma = ctx.power(a, -xsigma) + yasigma = ctx.power(a, -ysigma) + xS3 = ((-1)**(N-1)) * xasigma * U + yS3 = ((-1)**(N-1)) * yasigma * U + + # COMPUTING S1 the zetasum + # See II Section 3.18 + ctx.prec = 15 + xwpsum = 4+ ctx.mag((N+ctx.power(N,1-xsigma))*ctx.ln(N) /eps1) + ywpsum = 4+ ctx.mag((N+ctx.power(N,1-ysigma))*ctx.ln(N) /eps1) + wpsum = max(xwpsum, ywpsum) + + ctx.prec = wpsum +10 + ''' + # This can be improved + xS1={} + yS1={} + for chi in range(0,der+1): + xS1[chi] = 0 + yS1[chi] = 0 + for n in range(1,int(N)+1): + ln = ctx.ln(n) + xexpn = ctx.exp(-ln*(xsigma+ctx.j*t)) + yexpn = ctx.conj(1/(n*xexpn)) + for chi in range(0,der+1): + pown = ctx.power(-ln, chi) + xterm = pown*xexpn + yterm = pown*yexpn + xS1[chi] += xterm + yS1[chi] += yterm + ''' + xS1, yS1 = ctx._zetasum(s, 1, int(N)-1, range(0,der+1), True) + + # END OF COMPUTATION of xrz, yrz + # See II Section 3.1 + ctx.prec = 15 + xabsS1 = abs(xS1[der]) + xabsS2 = abs(xrssum[der] * xS3) + xwpend = max(6, wpinitial+ctx.mag(6*(3*xabsS1+7*xabsS2) ) ) + + ctx.prec = xwpend + xrz={} + for chi in range(0,der+1): + xrz[chi] = xS1[chi]+xrssum[chi]*xS3 + + ctx.prec = 15 + yabsS1 = abs(yS1[der]) + yabsS2 = abs(yrssum[der] * yS3) + ywpend = max(6, wpinitial+ctx.mag(6*(3*yabsS1+7*yabsS2) ) ) + + ctx.prec = ywpend + yrz={} + for chi in range(0,der+1): + yrz[chi] = yS1[chi]+yrssum[chi]*yS3 + yrz[chi] = ctx.conj(yrz[chi]) + ctx.prec = wpinitial + return xrz, yrz + +def Rzeta_set(ctx, s, derivatives=[0]): + r""" + Computes several derivatives of the auxiliary function of Riemann `R(s)`. + + **Definition** + + The function is defined by + + .. math :: + + \begin{equation} + {\mathop{\mathcal R }\nolimits}(s)= + \int_{0\swarrow1}\frac{x^{-s} e^{\pi i x^2}}{e^{\pi i x}- + e^{-\pi i x}}\,dx + \end{equation} + + To this function we apply the Riemann-Siegel expansion. + """ + der = max(derivatives) + # First we take the value of ctx.prec + # During the computation we will change ctx.prec, and finally we will + # restaurate the initial value + wpinitial = ctx.prec + # Take the real and imaginary part of s + t = ctx._im(s) + sigma = ctx._re(s) + # Now compute several parameter that appear on the program + ctx.prec = 15 + a = ctx.sqrt(t/(2*ctx.pi)) # Careful + asigma = ctx.power(a, sigma) # Careful + # We need a simple bound A1 < asigma (see II Section 3.1 and 3.3) + A1 = ctx.power(2, ctx.mag(asigma)-1) + # We compute various epsilon's (see II end of Section 3.1) + eps = ctx.power(2, -wpinitial) + eps1 = eps/6. + eps2 = eps * A1/3. + # COMPUTING SOME COEFFICIENTS THAT DEPENDS + # ON sigma + # constant b and c (see I Theorem 2 formula (26) ) + # coefficients A and B1 (see I Section 6.1 equation (50)) + # here we not need high precision + ctx.prec = 15 + if sigma > 0: + b = 2. + c = math.pow(9,sigma)/4.44288 + # 4.44288 =(math.sqrt(2)*math.pi) + A = math.pow(9,sigma) + B1 = 1 + else: + b = 2.25158 # math.sqrt( (3-2* math.log(2))*math.pi ) + c = math.pow(2,-sigma)/4.44288 + A = math.pow(2,-sigma) + B1 = 1.10789 # = 2*sqrt(1-log(2)) + # COMPUTING L THE NUMBER OF TERMS NEEDED IN THE RIEMANN-SIEGEL + # CORRECTION + # See II Section 3.2 + ctx.prec = 15 + L = 1 + while 3*c*ctx.gamma(L*0.5) * ctx.power(b*a,-L) >= eps2: + L = L+1 + L = max(2,L) + # The number L has to satify some conditions. + # If not RS can not compute Rzeta(s) with the prescribed precision + # (see II, Section 3.2 condition (20) ) and + # (II, Section 3.3 condition (22) ). Also we have added + # an additional technical condition in Section 3.17 Proposition 17 + if ((3*L >= 2*a*a/25.) or (3*L+2+sigma<0) or (abs(sigma)> a/2.)): + #print 'Error Riemann-Siegel can not compute with such precision' + ctx.prec = wpinitial + raise NotImplementedError("Riemann-Siegel can not compute with such precision") + + # INITIALIZATION (CONTINUATION) + # + # eps3 is the constant defined on (II, Section 3.5 equation (27) ) + # each term of the RS correction must be computed with error <= eps3 + eps3 = eps2/(4*L) + + # eps4 is defined on (II Section 3.6 equation (30) ) + # each component of the formula (II Section 3.6 equation (29) ) + # must be computed with error <= eps4 + eps4 = eps3/(3*L) + + # COMPUTING M. NUMBER OF DERIVATIVES Fp[m] TO COMPUTE + M = aux_M_Fp(ctx, A, eps4, a, B1, L) + Fp = {} + for n in range(M, 3*L-2): + Fp[n] = 0 + + # But I have not seen an instance of M != 3*L-3 + # + # DETERMINATION OF J THE NUMBER OF TERMS NEEDED + # IN THE TAYLOR SERIES OF F. + # See II Section 3.11 equation (49)) + h1 = eps4/(632*A) + h2 = ctx.pi*ctx.pi*B1*a *ctx.sqrt(3)*math.e*math.e + h2 = h1 * ctx.power((h2/M**2),(M-1)/3) / M + h3 = min(h1,h2) + J=12 + jvalue = (2*ctx.pi)**J / ctx.gamma(J+1) + while jvalue > h3: + J = J+1 + jvalue = (2*ctx.pi)*jvalue/J + + # COMPUTING eps5[m] for 1 <= m <= 21 + # See II Section 10 equation (43) + eps5={} + foreps5 = math.pi*math.pi*B1*a + for m in range(0,22): + aux1 = math.pow(foreps5, m/3)/(316.*A) + aux2 = ctx.gamma(m+1)/ctx.gamma(m/3.0+0.5) + aux2 = math.sqrt(aux2) + eps5[m] = aux1*aux2*eps4 + + # COMPUTING wpfp + # See II Section 3.13 equation (59) + twenty = min(3*L-3, 21)+1 + aux = 6812*J + wpfp = ctx.mag(44*J) + for m in range(0, twenty): + wpfp = max(wpfp, ctx.mag(aux*ctx.gamma(m+1)/eps5[m])) + # COMPUTING N AND p + # See II Section + ctx.prec = wpfp + ctx.mag(t) + 20 + a = ctx.sqrt(t/(2*ctx.pi)) + N = ctx.floor(a) + p = 1-2*(a-N) + + # now we get a rounded version of p to the precision wpfp + # this possibly is not necessary + num = ctx.floor(p*(ctx.mpf(2)**wpfp)) + difference = p * (ctx.mpf(2)**wpfp)-num + if difference < 0.5: + num = num + else: + num = num+1 + p = ctx.convert(num * (ctx.mpf(2)**(-wpfp))) + + # COMPUTING THE COEFFICIENTS c[n] = cc[n] + # We shall use the notation cc[n], since there is + # a constant that is called c + # See II Section 3.14 + # We compute the coefficients and also save then in a + # cache. The bulk of the computation is passed to + # the function coef() + # + # eps6 is defined in II Section 3.13 equation (58) + eps6 = ctx.power(2*ctx.pi, J)/(ctx.gamma(J+1)*3*J) + + # Now we compute the coefficients + cc={} + cont={} + cont, pipowers = coef(ctx, J, eps6) + cc = cont.copy() # we need a copy since we have + Fp={} + for n in range(M, 3*L-2): + Fp[n] = 0 + ctx.prec = wpfp + for m in range(0,M+1): + sumP = 0 + for k in range(2*J-m-1,-1,-1): + sumP = (sumP * p) + cc[k] + Fp[m] = sumP + # preparation of the new coefficients + for k in range(0, 2*J-m-1): + cc[k] = (k+1) * cc[k+1] + + # COMPUTING THE NUMBERS d[n,k] + # See II Section 3.17 + + # First we compute the working precisions wpd[k] + # Se II equation (92) + wpd = {} + d1 = max(6, ctx.mag(40*L*L)) + d2 = 13+ctx.mag((1+abs(sigma))*A)-ctx.mag(eps4)-1 + const = ctx.ln(8/(ctx.pi*ctx.pi*a*a*B1*B1)) /2 + for n in range(0,L): + d3 = ctx.mag(ctx.sqrt(ctx.gamma(n-0.5)))-ctx.floor(n*const)+d2 + wpd[n] = max(d3,d1) + + # procedure of II Section 3.17 + ctx.prec = wpd[1]+10 + psigma = 1-(2*sigma) + d = {} + d[0,0,-2]=0; d[0,0,-1]=0; d[0,0,0]=1; d[0,0,1]=0 + d[0,-1,-2]=0; d[0,-1,-1]=0; d[0,-1,0]=1; d[0,-1,1]=0 + for n in range(1,L): + ctx.prec = wpd[n]+10 + for k in range(0,3*n//2+1): + m = 3*n-2*k + if (m!=0): + m1 = ctx.one/m + c1 = m1/4 + c2 = (psigma*m1)/2 + c3 = -(m+1) + d[0,n,k] = c3*d[0,n-1,k-2]+c1*d[0,n-1,k]+c2*d[0,n-1,k-1] + else: + d[0,n,k]=0 + for r in range(0,k): + add = d[0,n,r]*(ctx.one*ctx.fac(2*k-2*r)/ctx.fac(k-r)) + d[0,n,k] -= ((-1)**(k-r))*add + d[0,n,-2]=0; d[0,n,-1]=0; d[0,n,3*n//2+1]=0 + + for mu in range(-2,der+1): + for n in range(-2,L): + for k in range(-3,max(1,3*n//2+2)): + if ((mu<0)or (n<0) or(k<0)or (k>3*n//2)): + d[mu,n,k] = 0 + + for mu in range(1,der+1): + for n in range(0,L): + ctx.prec = wpd[n]+10 + for k in range(0,3*n//2+1): + aux=(2*mu-2)*d[mu-2,n-2,k-3]+2*(sigma+n-2)*d[mu-1,n-2,k-3] + d[mu,n,k] = aux - d[mu-1,n-1,k-1] + + # COMPUTING THE COEFFICIENTS t[k,l] + # See II Section 3.9 + # + # computing the needed wp + wptcoef = {} + wpterm = {} + ctx.prec = 15 + c1 = ctx.mag(40*(L+2)) + c2 = ctx.mag(68*(L+2)*A) + c4 = ctx.mag(B1*a*math.sqrt(ctx.pi))-1 + for k in range(0,L): + c3 = c2 - k*c4+ctx.mag(ctx.fac(k+0.5))/2. + wptcoef[k] = max(c1,c3-ctx.mag(eps4)+1)+1 +10 + wpterm[k] = max(c1,ctx.mag(L+2)+c3-ctx.mag(eps3)+1)+1 +10 + + # check of power of pi + + # computing the fortcoef[mu,k,ell] + fortcoef={} + for mu in derivatives: + for k in range(0,L): + for ell in range(-2,3*k//2+1): + fortcoef[mu,k,ell]=0 + + for mu in derivatives: + for k in range(0,L): + ctx.prec = wptcoef[k] + for ell in range(0,3*k//2+1): + fortcoef[mu,k,ell]=d[mu,k,ell]*Fp[3*k-2*ell]/pipowers[2*k-ell] + fortcoef[mu,k,ell]=fortcoef[mu,k,ell]/((2*ctx.j)**ell) + + def trunc_a(t): + wp = ctx.prec + ctx.prec = wp + 2 + aa = ctx.sqrt(t/(2*ctx.pi)) + ctx.prec = wp + return aa + + # computing the tcoef[chi,k,ell] + tcoef={} + for chi in derivatives: + for k in range(0,L): + for ell in range(-2,3*k//2+1): + tcoef[chi,k,ell]=0 + ctx.prec = wptcoef[0]+3 + aa = trunc_a(t) + la = -ctx.ln(aa) + + for chi in derivatives: + for k in range(0,L): + ctx.prec = wptcoef[k] + for ell in range(0,3*k//2+1): + tcoef[chi,k,ell] = 0 + for mu in range(0, chi+1): + tcoefter = ctx.binomial(chi,mu) * la**mu * \ + fortcoef[chi-mu,k,ell] + tcoef[chi,k,ell] += tcoefter + + # COMPUTING tv[k,ell] + # See II Section 3.8 + + # Computing the powers av[k] = a**(-k) + ctx.prec = wptcoef[0] + 2 + + # a has a good value of a. + # See II Section 3.6 + av = {} + av[0] = 1 + av[1] = av[0]/a + + ctx.prec = wptcoef[0] + for k in range(2,L): + av[k] = av[k-1] * av[1] + + # Computing the quotients + tv = {} + for chi in derivatives: + for k in range(0,L): + ctx.prec = wptcoef[k] + for ell in range(0,3*k//2+1): + tv[chi,k,ell] = tcoef[chi,k,ell]* av[k] + + # COMPUTING THE TERMS term[k] + # See II Section 3.6 + term = {} + for chi in derivatives: + for n in range(0,L): + ctx.prec = wpterm[n] + te = 0 + for k in range(0, 3*n//2+1): + te += tv[chi,n,k] + term[chi,n] = te + + # COMPUTING rssum + # See II Section 3.5 + rssum={} + ctx.prec=15 + rsbound = math.sqrt(ctx.pi) * c /(b*a) + ctx.prec=15 + wprssum = ctx.mag(4.4*((L+3)**2)*rsbound / eps2) + wprssum = max(wprssum, ctx.mag(10*(L+1))) + ctx.prec = wprssum + for chi in derivatives: + rssum[chi] = 0 + for k in range(1,L+1): + rssum[chi] += term[chi,L-k] + + # COMPUTING S3 + # See II Section 3.19 + ctx.prec = 15 + A2 = 2**(ctx.mag(rssum[0])) + eps8 = eps/(3* A2) + T = t * ctx.ln(t/(2*ctx.pi)) + wps3 = 5 + ctx.mag((1+(2/eps8)*ctx.power(a,-sigma))*T) + + ctx.prec = wps3 + tpi = t/(2*ctx.pi) + arg = (t/2)*ctx.ln(tpi)-(t/2)-ctx.pi/8 + U = ctx.expj(-arg) + a = trunc_a(t) + asigma = ctx.power(a, -sigma) + S3 = ((-1)**(N-1)) * asigma * U + + # COMPUTING S1 the zetasum + # See II Section 3.18 + ctx.prec = 15 + wpsum = 4 + ctx.mag((N+ctx.power(N,1-sigma))*ctx.ln(N)/eps1) + + ctx.prec = wpsum + 10 + ''' + # This can be improved + S1 = {} + for chi in derivatives: + S1[chi] = 0 + for n in range(1,int(N)+1): + ln = ctx.ln(n) + expn = ctx.exp(-ln*(sigma+ctx.j*t)) + for chi in derivatives: + term = ctx.power(-ln, chi)*expn + S1[chi] += term + ''' + S1 = ctx._zetasum(s, 1, int(N)-1, derivatives)[0] + + # END OF COMPUTATION + # See II Section 3.1 + ctx.prec = 15 + absS1 = abs(S1[der]) + absS2 = abs(rssum[der] * S3) + wpend = max(6, wpinitial + ctx.mag(6*(3*absS1+7*absS2))) + ctx.prec = wpend + rz = {} + for chi in derivatives: + rz[chi] = S1[chi]+rssum[chi]*S3 + ctx.prec = wpinitial + return rz + + +def z_half(ctx,t,der=0): + r""" + z_half(t,der=0) Computes Z^(der)(t) + """ + s=ctx.mpf('0.5')+ctx.j*t + wpinitial = ctx.prec + ctx.prec = 15 + tt = t/(2*ctx.pi) + wptheta = wpinitial +1 + ctx.mag(3*(tt**1.5)*ctx.ln(tt)) + wpz = wpinitial + 1 + ctx.mag(12*tt*ctx.ln(tt)) + ctx.prec = wptheta + theta = ctx.siegeltheta(t) + ctx.prec = wpz + rz = Rzeta_set(ctx,s, range(der+1)) + if der > 0: ps1 = ctx._re(ctx.psi(0,s/2)/2 - ctx.ln(ctx.pi)/2) + if der > 1: ps2 = ctx._re(ctx.j*ctx.psi(1,s/2)/4) + if der > 2: ps3 = ctx._re(-ctx.psi(2,s/2)/8) + if der > 3: ps4 = ctx._re(-ctx.j*ctx.psi(3,s/2)/16) + exptheta = ctx.expj(theta) + if der == 0: + z = 2*exptheta*rz[0] + if der == 1: + zf = 2j*exptheta + z = zf*(ps1*rz[0]+rz[1]) + if der == 2: + zf = 2 * exptheta + z = -zf*(2*rz[1]*ps1+rz[0]*ps1**2+rz[2]-ctx.j*rz[0]*ps2) + if der == 3: + zf = -2j*exptheta + z = 3*rz[1]*ps1**2+rz[0]*ps1**3+3*ps1*rz[2] + z = zf*(z-3j*rz[1]*ps2-3j*rz[0]*ps1*ps2+rz[3]-rz[0]*ps3) + if der == 4: + zf = 2*exptheta + z = 4*rz[1]*ps1**3+rz[0]*ps1**4+6*ps1**2*rz[2] + z = z-12j*rz[1]*ps1*ps2-6j*rz[0]*ps1**2*ps2-6j*rz[2]*ps2-3*rz[0]*ps2*ps2 + z = z + 4*ps1*rz[3]-4*rz[1]*ps3-4*rz[0]*ps1*ps3+rz[4]+ctx.j*rz[0]*ps4 + z = zf*z + ctx.prec = wpinitial + return ctx._re(z) + +def zeta_half(ctx, s, k=0): + """ + zeta_half(s,k=0) Computes zeta^(k)(s) when Re s = 0.5 + """ + wpinitial = ctx.prec + sigma = ctx._re(s) + t = ctx._im(s) + #--- compute wptheta, wpR, wpbasic --- + ctx.prec = 53 + # X see II Section 3.21 (109) and (110) + if sigma > 0: + X = ctx.sqrt(abs(s)) + else: + X = (2*ctx.pi)**(sigma-1) * abs(1-s)**(0.5-sigma) + # M1 see II Section 3.21 (111) and (112) + if sigma > 0: + M1 = 2*ctx.sqrt(t/(2*ctx.pi)) + else: + M1 = 4 * t * X + # T see II Section 3.21 (113) + abst = abs(0.5-s) + T = 2* abst*math.log(abst) + # computing wpbasic, wptheta, wpR see II Section 3.21 + wpbasic = max(6,3+ctx.mag(t)) + wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M1*X+1.3*M1*X*T)+wpinitial+1 + wpbasic = max(wpbasic, wpbasic2) + wptheta = max(4, 3+ctx.mag(2.7*M1*X)+wpinitial+1) + wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1 + ctx.prec = wptheta + theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5'))) + if k > 0: ps1 = (ctx._re(ctx.psi(0,s/2)))/2 - ctx.ln(ctx.pi)/2 + if k > 1: ps2 = -(ctx._im(ctx.psi(1,s/2)))/4 + if k > 2: ps3 = -(ctx._re(ctx.psi(2,s/2)))/8 + if k > 3: ps4 = (ctx._im(ctx.psi(3,s/2)))/16 + ctx.prec = wpR + xrz = Rzeta_set(ctx,s,range(k+1)) + yrz={} + for chi in range(0,k+1): + yrz[chi] = ctx.conj(xrz[chi]) + ctx.prec = wpbasic + exptheta = ctx.expj(-2*theta) + if k==0: + zv = xrz[0]+exptheta*yrz[0] + if k==1: + zv1 = -yrz[1] - 2*yrz[0]*ps1 + zv = xrz[1] + exptheta*zv1 + if k==2: + zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2)+yrz[2]+2j*yrz[0]*ps2 + zv = xrz[2]+exptheta*zv1 + if k==3: + zv1 = -12*yrz[1]*ps1**2-8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2 + zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3 + zv = xrz[3]+exptheta*zv1 + if k == 4: + zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2 + zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2 + zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3 + zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4 + zv = xrz[4]+exptheta*zv1 + ctx.prec = wpinitial + return zv + +def zeta_offline(ctx, s, k=0): + """ + Computes zeta^(k)(s) off the line + """ + wpinitial = ctx.prec + sigma = ctx._re(s) + t = ctx._im(s) + #--- compute wptheta, wpR, wpbasic --- + ctx.prec = 53 + # X see II Section 3.21 (109) and (110) + if sigma > 0: + X = ctx.power(abs(s), 0.5) + else: + X = ctx.power(2*ctx.pi, sigma-1)*ctx.power(abs(1-s),0.5-sigma) + # M1 see II Section 3.21 (111) and (112) + if (sigma > 0): + M1 = 2*ctx.sqrt(t/(2*ctx.pi)) + else: + M1 = 4 * t * X + # M2 see II Section 3.21 (111) and (112) + if (1-sigma > 0): + M2 = 2*ctx.sqrt(t/(2*ctx.pi)) + else: + M2 = 4*t*ctx.power(2*ctx.pi, -sigma)*ctx.power(abs(s),sigma-0.5) + # T see II Section 3.21 (113) + abst = abs(0.5-s) + T = 2* abst*math.log(abst) + # computing wpbasic, wptheta, wpR see II Section 3.21 + wpbasic = max(6,3+ctx.mag(t)) + wpbasic2 = 2+ctx.mag(2.12*M1+21.2*M2*X+1.3*M2*X*T)+wpinitial+1 + wpbasic = max(wpbasic, wpbasic2) + wptheta = max(4, 3+ctx.mag(2.7*M2*X)+wpinitial+1) + wpR = 3+ctx.mag(1.1+2*X)+wpinitial+1 + ctx.prec = wptheta + theta = ctx.siegeltheta(t-ctx.j*(sigma-ctx.mpf('0.5'))) + s1 = s + s2 = ctx.conj(1-s1) + ctx.prec = wpR + xrz, yrz = Rzeta_simul(ctx, s, k) + if k > 0: ps1 = (ctx.psi(0,s1/2)+ctx.psi(0,(1-s1)/2))/4 - ctx.ln(ctx.pi)/2 + if k > 1: ps2 = ctx.j*(ctx.psi(1,s1/2)-ctx.psi(1,(1-s1)/2))/8 + if k > 2: ps3 = -(ctx.psi(2,s1/2)+ctx.psi(2,(1-s1)/2))/16 + if k > 3: ps4 = -ctx.j*(ctx.psi(3,s1/2)-ctx.psi(3,(1-s1)/2))/32 + ctx.prec = wpbasic + exptheta = ctx.expj(-2*theta) + if k == 0: + zv = xrz[0]+exptheta*yrz[0] + if k == 1: + zv1 = -yrz[1]-2*yrz[0]*ps1 + zv = xrz[1]+exptheta*zv1 + if k == 2: + zv1 = 4*yrz[1]*ps1+4*yrz[0]*(ps1**2) +yrz[2]+2j*yrz[0]*ps2 + zv = xrz[2]+exptheta*zv1 + if k == 3: + zv1 = -12*yrz[1]*ps1**2 -8*yrz[0]*ps1**3-6*yrz[2]*ps1-6j*yrz[1]*ps2 + zv1 = zv1 - 12j*yrz[0]*ps1*ps2-yrz[3]+2*yrz[0]*ps3 + zv = xrz[3]+exptheta*zv1 + if k == 4: + zv1 = 32*yrz[1]*ps1**3 +16*yrz[0]*ps1**4+24*yrz[2]*ps1**2 + zv1 = zv1 +48j*yrz[1]*ps1*ps2+48j*yrz[0]*(ps1**2)*ps2 + zv1 = zv1+12j*yrz[2]*ps2-12*yrz[0]*ps2**2+8*yrz[3]*ps1-8*yrz[1]*ps3 + zv1 = zv1-16*yrz[0]*ps1*ps3+yrz[4]-2j*yrz[0]*ps4 + zv = xrz[4]+exptheta*zv1 + ctx.prec = wpinitial + return zv + +def z_offline(ctx, w, k=0): + r""" + Computes Z(w) and its derivatives off the line + """ + s = ctx.mpf('0.5')+ctx.j*w + s1 = s + s2 = ctx.conj(1-s1) + wpinitial = ctx.prec + ctx.prec = 35 + # X see II Section 3.21 (109) and (110) + # M1 see II Section 3.21 (111) and (112) + if (ctx._re(s1) >= 0): + M1 = 2*ctx.sqrt(ctx._im(s1)/(2 * ctx.pi)) + X = ctx.sqrt(abs(s1)) + else: + X = (2*ctx.pi)**(ctx._re(s1)-1) * abs(1-s1)**(0.5-ctx._re(s1)) + M1 = 4 * ctx._im(s1)*X + # M2 see II Section 3.21 (111) and (112) + if (ctx._re(s2) >= 0): + M2 = 2*ctx.sqrt(ctx._im(s2)/(2 * ctx.pi)) + else: + M2 = 4 * ctx._im(s2)*(2*ctx.pi)**(ctx._re(s2)-1)*abs(1-s2)**(0.5-ctx._re(s2)) + # T see II Section 3.21 Prop. 27 + T = 2*abs(ctx.siegeltheta(w)) + # defining some precisions + # see II Section 3.22 (115), (116), (117) + aux1 = ctx.sqrt(X) + aux2 = aux1*(M1+M2) + aux3 = 3 +wpinitial + wpbasic = max(6, 3+ctx.mag(T), ctx.mag(aux2*(26+2*T))+aux3) + wptheta = max(4,ctx.mag(2.04*aux2)+aux3) + wpR = ctx.mag(4*aux1)+aux3 + # now the computations + ctx.prec = wptheta + theta = ctx.siegeltheta(w) + ctx.prec = wpR + xrz, yrz = Rzeta_simul(ctx,s,k) + pta = 0.25 + 0.5j*w + ptb = 0.25 - 0.5j*w + if k > 0: ps1 = 0.25*(ctx.psi(0,pta)+ctx.psi(0,ptb)) - ctx.ln(ctx.pi)/2 + if k > 1: ps2 = (1j/8)*(ctx.psi(1,pta)-ctx.psi(1,ptb)) + if k > 2: ps3 = (-1./16)*(ctx.psi(2,pta)+ctx.psi(2,ptb)) + if k > 3: ps4 = (-1j/32)*(ctx.psi(3,pta)-ctx.psi(3,ptb)) + ctx.prec = wpbasic + exptheta = ctx.expj(theta) + if k == 0: + zv = exptheta*xrz[0]+yrz[0]/exptheta + j = ctx.j + if k == 1: + zv = j*exptheta*(xrz[1]+xrz[0]*ps1)-j*(yrz[1]+yrz[0]*ps1)/exptheta + if k == 2: + zv = exptheta*(-2*xrz[1]*ps1-xrz[0]*ps1**2-xrz[2]+j*xrz[0]*ps2) + zv =zv + (-2*yrz[1]*ps1-yrz[0]*ps1**2-yrz[2]-j*yrz[0]*ps2)/exptheta + if k == 3: + zv1 = -3*xrz[1]*ps1**2-xrz[0]*ps1**3-3*xrz[2]*ps1+j*3*xrz[1]*ps2 + zv1 = (zv1+ 3j*xrz[0]*ps1*ps2-xrz[3]+xrz[0]*ps3)*j*exptheta + zv2 = 3*yrz[1]*ps1**2+yrz[0]*ps1**3+3*yrz[2]*ps1+j*3*yrz[1]*ps2 + zv2 = j*(zv2 + 3j*yrz[0]*ps1*ps2+ yrz[3]-yrz[0]*ps3)/exptheta + zv = zv1+zv2 + if k == 4: + zv1 = 4*xrz[1]*ps1**3+xrz[0]*ps1**4 + 6*xrz[2]*ps1**2 + zv1 = zv1-12j*xrz[1]*ps1*ps2-6j*xrz[0]*ps1**2*ps2-6j*xrz[2]*ps2 + zv1 = zv1-3*xrz[0]*ps2*ps2+4*xrz[3]*ps1-4*xrz[1]*ps3-4*xrz[0]*ps1*ps3 + zv1 = zv1+xrz[4]+j*xrz[0]*ps4 + zv2 = 4*yrz[1]*ps1**3+yrz[0]*ps1**4 + 6*yrz[2]*ps1**2 + zv2 = zv2+12j*yrz[1]*ps1*ps2+6j*yrz[0]*ps1**2*ps2+6j*yrz[2]*ps2 + zv2 = zv2-3*yrz[0]*ps2*ps2+4*yrz[3]*ps1-4*yrz[1]*ps3-4*yrz[0]*ps1*ps3 + zv2 = zv2+yrz[4]-j*yrz[0]*ps4 + zv = exptheta*zv1+zv2/exptheta + ctx.prec = wpinitial + return zv + +@defun +def rs_zeta(ctx, s, derivative=0, **kwargs): + if derivative > 4: + raise NotImplementedError + s = ctx.convert(s) + re = ctx._re(s); im = ctx._im(s) + if im < 0: + z = ctx.conj(ctx.rs_zeta(ctx.conj(s), derivative)) + return z + critical_line = (re == 0.5) + if critical_line: + return zeta_half(ctx, s, derivative) + else: + return zeta_offline(ctx, s, derivative) + +@defun +def rs_z(ctx, w, derivative=0): + w = ctx.convert(w) + re = ctx._re(w); im = ctx._im(w) + if re < 0: + return rs_z(ctx, -w, derivative) + critical_line = (im == 0) + if critical_line : + return z_half(ctx, w, derivative) + else: + return z_offline(ctx, w, derivative) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/signals.py b/.venv/lib/python3.11/site-packages/mpmath/functions/signals.py new file mode 100644 index 0000000000000000000000000000000000000000..6fadafb2dbb44fe19a2defa8d807d81d7c8e2789 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/signals.py @@ -0,0 +1,32 @@ +from .functions import defun_wrapped + +@defun_wrapped +def squarew(ctx, t, amplitude=1, period=1): + P = period + A = amplitude + return A*((-1)**ctx.floor(2*t/P)) + +@defun_wrapped +def trianglew(ctx, t, amplitude=1, period=1): + A = amplitude + P = period + + return 2*A*(0.5 - ctx.fabs(1 - 2*ctx.frac(t/P + 0.25))) + +@defun_wrapped +def sawtoothw(ctx, t, amplitude=1, period=1): + A = amplitude + P = period + return A*ctx.frac(t/P) + +@defun_wrapped +def unit_triangle(ctx, t, amplitude=1): + A = amplitude + if t <= -1 or t >= 1: + return ctx.zero + return A*(-ctx.fabs(t) + 1) + +@defun_wrapped +def sigmoid(ctx, t, amplitude=1): + A = amplitude + return A / (1 + ctx.exp(-t)) diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/theta.py b/.venv/lib/python3.11/site-packages/mpmath/functions/theta.py new file mode 100644 index 0000000000000000000000000000000000000000..2b3d8323a163a43186b85417a1b40f3b656c30d0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/theta.py @@ -0,0 +1,1049 @@ +from .functions import defun, defun_wrapped + +@defun +def _jacobi_theta2(ctx, z, q): + extra1 = 10 + extra2 = 20 + # the loops below break when the fixed precision quantities + # a and b go to zero; + # right shifting small negative numbers by wp one obtains -1, not zero, + # so the condition a**2 + b**2 > MIN is used to break the loops. + MIN = 2 + if z == ctx.zero: + if (not ctx._im(q)): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + s = x2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + s += a + s = (1 << (wp+1)) + (s << 1) + s = ctx.ldexp(s, -wp) + else: + wp = ctx.prec + extra1 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp-1) + are = bre = x2re + aim = bim = x2im + sre = (1< MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + sre += are + sim += aim + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + else: + if (not ctx._im(q)) and (not ctx._im(z)): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + s = c1 + ((a * cn) >> wp) + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + s += (a * cn) >> wp + s = (s << 1) + s = ctx.ldexp(s, -wp) + s *= ctx.nthroot(q, 4) + return s + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + sre = c1 + ((are * cn) >> wp) + sim = ((aim * cn) >> wp) + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + sre += ((are * cn) >> wp) + sim += ((aim * cn) >> wp) + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + #c2 = (c1*c1 - s1*s1) >> wp + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + #s2 = (c1 * s1) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + #cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre = c1re + ((a * cnre) >> wp) + sim = c1im + ((a * cnim) >> wp) + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre += ((a * cnre) >> wp) + sim += ((a * cnim) >> wp) + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + prec0 = ctx.prec + ctx.prec = wp + # cos(z), sin(z) with z complex + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + n = 1 + termre = c1re + termim = c1im + sre = c1re + ((are * cnre - aim * cnim) >> wp) + sim = c1im + ((are * cnim + aim * cnre) >> wp) + n = 3 + termre = ((are * cnre - aim * cnim) >> wp) + termim = ((are * cnim + aim * cnre) >> wp) + sre = c1re + ((are * cnre - aim * cnim) >> wp) + sim = c1im + ((are * cnim + aim * cnre) >> wp) + n = 5 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + #cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + termre = ((are * cnre - aim * cnim) >> wp) + termim = ((aim * cnre + are * cnim) >> wp) + sre += ((are * cnre - aim * cnim) >> wp) + sim += ((aim * cnre + are * cnim) >> wp) + n += 2 + sre = (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + s *= ctx.nthroot(q, 4) + return s + +@defun +def _djacobi_theta2(ctx, z, q, nd): + MIN = 2 + extra1 = 10 + extra2 = 20 + if (not ctx._im(q)) and (not ctx._im(z)): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + if (nd&1): + s = s1 + ((a * sn * 3**nd) >> wp) + else: + s = c1 + ((a * cn * 3**nd) >> wp) + n = 2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + if nd&1: + s += (a * sn * (2*n+1)**nd) >> wp + else: + s += (a * cn * (2*n+1)**nd) >> wp + n += 1 + s = -(s << 1) + s = ctx.ldexp(s, -wp) + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + c1, s1 = ctx.cos_sin(ctx._re(z), prec=wp) + cn = c1 = ctx.to_fixed(c1, wp) + sn = s1 = ctx.to_fixed(s1, wp) + c2 = (c1*c1 - s1*s1) >> wp + s2 = (c1 * s1) >> (wp - 1) + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + if (nd&1): + sre = s1 + ((are * sn * 3**nd) >> wp) + sim = ((aim * sn * 3**nd) >> wp) + else: + sre = c1 + ((are * cn * 3**nd) >> wp) + sim = ((aim * cn * 3**nd) >> wp) + n = 5 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + + if (nd&1): + sre += ((are * sn * n**nd) >> wp) + sim += ((aim * sn * n**nd) >> wp) + else: + sre += ((are * cn * n**nd) >> wp) + sim += ((aim * cn * n**nd) >> wp) + n += 2 + sre = -(sre << 1) + sim = -(sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + x2 = (x*x) >> wp + a = b = x2 + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + #c2 = (c1*c1 - s1*s1) >> wp + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + #s2 = (c1 * s1) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + #cn, sn = (cn*c2 - sn*s2) >> wp, (sn*c2 + cn*s2) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre = s1re + ((a * snre * 3**nd) >> wp) + sim = s1im + ((a * snim * 3**nd) >> wp) + else: + sre = c1re + ((a * cnre * 3**nd) >> wp) + sim = c1im + ((a * cnim * 3**nd) >> wp) + n = 5 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre += ((a * snre * n**nd) >> wp) + sim += ((a * snim * n**nd) >> wp) + else: + sre += ((a * cnre * n**nd) >> wp) + sim += ((a * cnim * n**nd) >> wp) + n += 2 + sre = -(sre << 1) + sim = -(sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = x2re + aim = bim = x2im + prec0 = ctx.prec + ctx.prec = wp + # cos(2*z), sin(2*z) with z complex + c1, s1 = ctx.cos_sin(z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + c2re = (c1re*c1re - c1im*c1im - s1re*s1re + s1im*s1im) >> wp + c2im = (c1re*c1im - s1re*s1im) >> (wp - 1) + s2re = (c1re*s1re - c1im*s1im) >> (wp - 1) + s2im = (c1re*s1im + c1im*s1re) >> (wp - 1) + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre = s1re + (((are * snre - aim * snim) * 3**nd) >> wp) + sim = s1im + (((are * snim + aim * snre)* 3**nd) >> wp) + else: + sre = c1re + (((are * cnre - aim * cnim) * 3**nd) >> wp) + sim = c1im + (((are * cnim + aim * cnre)* 3**nd) >> wp) + n = 5 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + #cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + t1 = (cnre*c2re - cnim*c2im - snre*s2re + snim*s2im) >> wp + t2 = (cnre*c2im + cnim*c2re - snre*s2im - snim*s2re) >> wp + t3 = (snre*c2re - snim*c2im + cnre*s2re - cnim*s2im) >> wp + t4 = (snre*c2im + snim*c2re + cnre*s2im + cnim*s2re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre += (((are * snre - aim * snim) * n**nd) >> wp) + sim += (((aim * snre + are * snim) * n**nd) >> wp) + else: + sre += (((are * cnre - aim * cnim) * n**nd) >> wp) + sim += (((aim * cnre + are * cnim) * n**nd) >> wp) + n += 2 + sre = -(sre << 1) + sim = -(sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + s *= ctx.nthroot(q, 4) + if (nd&1): + return (-1)**(nd//2) * s + else: + return (-1)**(1 + nd//2) * s + +@defun +def _jacobi_theta3(ctx, z, q): + extra1 = 10 + extra2 = 20 + MIN = 2 + if z == ctx.zero: + if not ctx._im(q): + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + s = x + a = b = x + x2 = (x*x) >> wp + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + s += a + s = (1 << wp) + (s << 1) + s = ctx.ldexp(s, -wp) + return s + else: + wp = ctx.prec + extra1 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + sre = are = bre = xre + sim = aim = bim = xim + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + sre += are + sim += aim + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + else: + if (not ctx._im(q)) and (not ctx._im(z)): + s = 0 + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + s += (a * cn) >> wp + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + s += (a * cn) >> wp + s = (1 << wp) + (s << 1) + s = ctx.ldexp(s, -wp) + return s + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + sre = (are * cn) >> wp + sim = (aim * cn) >> wp + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + sre += (are * cn) >> wp + sim += (aim * cn) >> wp + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + sre = (a * cnre) >> wp + sim = (a * cnim) >> wp + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre += (a * cnre) >> wp + sim += (a * cnim) >> wp + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + prec0 = ctx.prec + ctx.prec = wp + # cos(2*z), sin(2*z) with z complex + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + sre = (are * cnre - aim * cnim) >> wp + sim = (aim * cnre + are * cnim) >> wp + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + sre += (are * cnre - aim * cnim) >> wp + sim += (aim * cnre + are * cnim) >> wp + sre = (1 << wp) + (sre << 1) + sim = (sim << 1) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + return s + +@defun +def _djacobi_theta3(ctx, z, q, nd): + """nd=1,2,3 order of the derivative with respect to z""" + MIN = 2 + extra1 = 10 + extra2 = 20 + if (not ctx._im(q)) and (not ctx._im(z)): + s = 0 + wp = ctx.prec + extra1 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + if (nd&1): + s += (a * sn) >> wp + else: + s += (a * cn) >> wp + n = 2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + if nd&1: + s += (a * sn * n**nd) >> wp + else: + s += (a * cn * n**nd) >> wp + n += 1 + s = -(s << (nd+1)) + s = ctx.ldexp(s, -wp) + # case z real, q complex + elif not ctx._im(z): + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + c1, s1 = ctx.cos_sin(ctx._re(z)*2, prec=wp) + c1 = ctx.to_fixed(c1, wp) + s1 = ctx.to_fixed(s1, wp) + cn = c1 + sn = s1 + if (nd&1): + sre = (are * sn) >> wp + sim = (aim * sn) >> wp + else: + sre = (are * cn) >> wp + sim = (aim * cn) >> wp + n = 2 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + cn, sn = (cn*c1 - sn*s1) >> wp, (sn*c1 + cn*s1) >> wp + if nd&1: + sre += (are * sn * n**nd) >> wp + sim += (aim * sn * n**nd) >> wp + else: + sre += (are * cn * n**nd) >> wp + sim += (aim * cn * n**nd) >> wp + n += 1 + sre = -(sre << (nd+1)) + sim = -(sim << (nd+1)) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + #case z complex, q real + elif not ctx._im(q): + wp = ctx.prec + extra2 + x = ctx.to_fixed(ctx._re(q), wp) + a = b = x + x2 = (x*x) >> wp + prec0 = ctx.prec + ctx.prec = wp + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + if (nd&1): + sre = (a * snre) >> wp + sim = (a * snim) >> wp + else: + sre = (a * cnre) >> wp + sim = (a * cnim) >> wp + n = 2 + while abs(a) > MIN: + b = (b*x2) >> wp + a = (a*b) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if (nd&1): + sre += (a * snre * n**nd) >> wp + sim += (a * snim * n**nd) >> wp + else: + sre += (a * cnre * n**nd) >> wp + sim += (a * cnim * n**nd) >> wp + n += 1 + sre = -(sre << (nd+1)) + sim = -(sim << (nd+1)) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + # case z and q complex + else: + wp = ctx.prec + extra2 + xre = ctx.to_fixed(ctx._re(q), wp) + xim = ctx.to_fixed(ctx._im(q), wp) + x2re = (xre*xre - xim*xim) >> wp + x2im = (xre*xim) >> (wp - 1) + are = bre = xre + aim = bim = xim + prec0 = ctx.prec + ctx.prec = wp + # cos(2*z), sin(2*z) with z complex + c1, s1 = ctx.cos_sin(2*z) + ctx.prec = prec0 + cnre = c1re = ctx.to_fixed(ctx._re(c1), wp) + cnim = c1im = ctx.to_fixed(ctx._im(c1), wp) + snre = s1re = ctx.to_fixed(ctx._re(s1), wp) + snim = s1im = ctx.to_fixed(ctx._im(s1), wp) + if (nd&1): + sre = (are * snre - aim * snim) >> wp + sim = (aim * snre + are * snim) >> wp + else: + sre = (are * cnre - aim * cnim) >> wp + sim = (aim * cnre + are * cnim) >> wp + n = 2 + while are**2 + aim**2 > MIN: + bre, bim = (bre * x2re - bim * x2im) >> wp, \ + (bre * x2im + bim * x2re) >> wp + are, aim = (are * bre - aim * bim) >> wp, \ + (are * bim + aim * bre) >> wp + t1 = (cnre*c1re - cnim*c1im - snre*s1re + snim*s1im) >> wp + t2 = (cnre*c1im + cnim*c1re - snre*s1im - snim*s1re) >> wp + t3 = (snre*c1re - snim*c1im + cnre*s1re - cnim*s1im) >> wp + t4 = (snre*c1im + snim*c1re + cnre*s1im + cnim*s1re) >> wp + cnre = t1 + cnim = t2 + snre = t3 + snim = t4 + if(nd&1): + sre += ((are * snre - aim * snim) * n**nd) >> wp + sim += ((aim * snre + are * snim) * n**nd) >> wp + else: + sre += ((are * cnre - aim * cnim) * n**nd) >> wp + sim += ((aim * cnre + are * cnim) * n**nd) >> wp + n += 1 + sre = -(sre << (nd+1)) + sim = -(sim << (nd+1)) + sre = ctx.ldexp(sre, -wp) + sim = ctx.ldexp(sim, -wp) + s = ctx.mpc(sre, sim) + if (nd&1): + return (-1)**(nd//2) * s + else: + return (-1)**(1 + nd//2) * s + +@defun +def _jacobi_theta2a(ctx, z, q): + """ + case ctx._im(z) != 0 + theta(2, z, q) = + q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=-inf, inf) + max term for minimum (2*n+1)*log(q).real - 2* ctx._im(z) + n0 = int(ctx._im(z)/log(q).real - 1/2) + theta(2, z, q) = + q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n=n0, inf) + + q**1/4 * Sum(q**(n*n + n) * exp(j*(2*n + 1)*z), n, n0-1, -inf) + """ + n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj((2*n+1)*z) + a = q**(n*n + n) + # leading term + term = a * e + s = term + eps1 = ctx.eps*abs(term) + while 1: + n += 1 + e = e * e2 + term = q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + term = q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + s = s * ctx.nthroot(q, 4) + return s + +@defun +def _jacobi_theta3a(ctx, z, q): + """ + case ctx._im(z) != 0 + theta3(z, q) = Sum(q**(n*n) * exp(j*2*n*z), n, -inf, inf) + max term for n*abs(log(q).real) + ctx._im(z) ~= 0 + n0 = int(- ctx._im(z)/abs(log(q).real)) + """ + n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q)))) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj(2*n*z) + s = term = q**(n*n) * e + eps1 = ctx.eps*abs(term) + while 1: + n += 1 + e = e * e2 + term = q**(n*n) * e + if abs(term) < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + term = q**(n*n) * e + if abs(term) < eps1: + break + s += term + return s + +@defun +def _djacobi_theta2a(ctx, z, q, nd): + """ + case ctx._im(z) != 0 + dtheta(2, z, q, nd) = + j* q**1/4 * Sum(q**(n*n + n) * (2*n+1)*exp(j*(2*n + 1)*z), n=-inf, inf) + max term for (2*n0+1)*log(q).real - 2* ctx._im(z) ~= 0 + n0 = int(ctx._im(z)/log(q).real - 1/2) + """ + n = n0 = int(ctx._im(z)/ctx._re(ctx.log(q)) - 1/2) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj((2*n + 1)*z) + a = q**(n*n + n) + # leading term + term = (2*n+1)**nd * a * e + s = term + eps1 = ctx.eps*abs(term) + while 1: + n += 1 + e = e * e2 + term = (2*n+1)**nd * q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + term = (2*n+1)**nd * q**(n*n + n) * e + if abs(term) < eps1: + break + s += term + return ctx.j**nd * s * ctx.nthroot(q, 4) + +@defun +def _djacobi_theta3a(ctx, z, q, nd): + """ + case ctx._im(z) != 0 + djtheta3(z, q, nd) = (2*j)**nd * + Sum(q**(n*n) * n**nd * exp(j*2*n*z), n, -inf, inf) + max term for minimum n*abs(log(q).real) + ctx._im(z) + """ + n = n0 = int(-ctx._im(z)/abs(ctx._re(ctx.log(q)))) + e2 = ctx.expj(2*z) + e = e0 = ctx.expj(2*n*z) + a = q**(n*n) * e + s = term = n**nd * a + if n != 0: + eps1 = ctx.eps*abs(term) + else: + eps1 = ctx.eps*abs(a) + while 1: + n += 1 + e = e * e2 + a = q**(n*n) * e + term = n**nd * a + if n != 0: + aterm = abs(term) + else: + aterm = abs(a) + if aterm < eps1: + break + s += term + e = e0 + e2 = ctx.expj(-2*z) + n = n0 + while 1: + n -= 1 + e = e * e2 + a = q**(n*n) * e + term = n**nd * a + if n != 0: + aterm = abs(term) + else: + aterm = abs(a) + if aterm < eps1: + break + s += term + return (2*ctx.j)**nd * s + +@defun +def jtheta(ctx, n, z, q, derivative=0): + if derivative: + return ctx._djtheta(n, z, q, derivative) + + z = ctx.convert(z) + q = ctx.convert(q) + + # Implementation note + # If ctx._im(z) is close to zero, _jacobi_theta2 and _jacobi_theta3 + # are used, + # which compute the series starting from n=0 using fixed precision + # numbers; + # otherwise _jacobi_theta2a and _jacobi_theta3a are used, which compute + # the series starting from n=n0, which is the largest term. + + # TODO: write _jacobi_theta2a and _jacobi_theta3a using fixed-point + + if abs(q) > ctx.THETA_Q_LIM: + raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM) + + extra = 10 + if z: + M = ctx.mag(z) + if M > 5 or (n == 1 and M < -5): + extra += 2*abs(M) + cz = 0.5 + extra2 = 50 + prec0 = ctx.prec + try: + ctx.prec += extra + if n == 1: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta2(z - ctx.pi/2, q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta2a(z - ctx.pi/2, q) + else: + res = ctx._jacobi_theta2(z - ctx.pi/2, q) + elif n == 2: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta2(z, q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta2a(z, q) + else: + res = ctx._jacobi_theta2(z, q) + elif n == 3: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta3(z, q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta3a(z, q) + else: + res = ctx._jacobi_theta3(z, q) + elif n == 4: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._jacobi_theta3(z, -q) + else: + ctx.dps += 10 + res = ctx._jacobi_theta3a(z, -q) + else: + res = ctx._jacobi_theta3(z, -q) + else: + raise ValueError + finally: + ctx.prec = prec0 + return res + +@defun +def _djtheta(ctx, n, z, q, derivative=1): + z = ctx.convert(z) + q = ctx.convert(q) + nd = int(derivative) + + if abs(q) > ctx.THETA_Q_LIM: + raise ValueError('abs(q) > THETA_Q_LIM = %f' % ctx.THETA_Q_LIM) + extra = 10 + ctx.prec * nd // 10 + if z: + M = ctx.mag(z) + if M > 5 or (n != 1 and M < -5): + extra += 2*abs(M) + cz = 0.5 + extra2 = 50 + prec0 = ctx.prec + try: + ctx.prec += extra + if n == 1: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta2a(z - ctx.pi/2, q, nd) + else: + res = ctx._djacobi_theta2(z - ctx.pi/2, q, nd) + elif n == 2: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta2(z, q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta2a(z, q, nd) + else: + res = ctx._djacobi_theta2(z, q, nd) + elif n == 3: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta3(z, q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta3a(z, q, nd) + else: + res = ctx._djacobi_theta3(z, q, nd) + elif n == 4: + if ctx._im(z): + if abs(ctx._im(z)) < cz * abs(ctx._re(ctx.log(q))): + ctx.dps += extra2 + res = ctx._djacobi_theta3(z, -q, nd) + else: + ctx.dps += 10 + res = ctx._djacobi_theta3a(z, -q, nd) + else: + res = ctx._djacobi_theta3(z, -q, nd) + else: + raise ValueError + finally: + ctx.prec = prec0 + return +res diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/zeta.py b/.venv/lib/python3.11/site-packages/mpmath/functions/zeta.py new file mode 100644 index 0000000000000000000000000000000000000000..d7ede50d95e5b6eff511619620c934529942cbdd --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/zeta.py @@ -0,0 +1,1154 @@ +from __future__ import print_function + +from ..libmp.backend import xrange +from .functions import defun, defun_wrapped, defun_static + +@defun +def stieltjes(ctx, n, a=1): + n = ctx.convert(n) + a = ctx.convert(a) + if n < 0: + return ctx.bad_domain("Stieltjes constants defined for n >= 0") + if hasattr(ctx, "stieltjes_cache"): + stieltjes_cache = ctx.stieltjes_cache + else: + stieltjes_cache = ctx.stieltjes_cache = {} + if a == 1: + if n == 0: + return +ctx.euler + if n in stieltjes_cache: + prec, s = stieltjes_cache[n] + if prec >= ctx.prec: + return +s + mag = 1 + def f(x): + xa = x/a + v = (xa-ctx.j)*ctx.ln(a-ctx.j*x)**n/(1+xa**2)/(ctx.exp(2*ctx.pi*x)-1) + return ctx._re(v) / mag + orig = ctx.prec + try: + # Normalize integrand by approx. magnitude to + # speed up quadrature (which uses absolute error) + if n > 50: + ctx.prec = 20 + mag = ctx.quad(f, [0,ctx.inf], maxdegree=3) + ctx.prec = orig + 10 + int(n**0.5) + s = ctx.quad(f, [0,ctx.inf], maxdegree=20) + v = ctx.ln(a)**n/(2*a) - ctx.ln(a)**(n+1)/(n+1) + 2*s/a*mag + finally: + ctx.prec = orig + if a == 1 and ctx.isint(n): + stieltjes_cache[n] = (ctx.prec, v) + return +v + +@defun_wrapped +def siegeltheta(ctx, t, derivative=0): + d = int(derivative) + if (t == ctx.inf or t == ctx.ninf): + if d < 2: + if t == ctx.ninf and d == 0: + return ctx.ninf + return ctx.inf + else: + return ctx.zero + if d == 0: + if ctx._im(t): + # XXX: cancellation occurs + a = ctx.loggamma(0.25+0.5j*t) + b = ctx.loggamma(0.25-0.5j*t) + return -ctx.ln(ctx.pi)/2*t - 0.5j*(a-b) + else: + if ctx.isinf(t): + return t + return ctx._im(ctx.loggamma(0.25+0.5j*t)) - ctx.ln(ctx.pi)/2*t + if d > 0: + a = (-0.5j)**(d-1)*ctx.polygamma(d-1, 0.25-0.5j*t) + b = (0.5j)**(d-1)*ctx.polygamma(d-1, 0.25+0.5j*t) + if ctx._im(t): + if d == 1: + return -0.5*ctx.log(ctx.pi)+0.25*(a+b) + else: + return 0.25*(a+b) + else: + if d == 1: + return ctx._re(-0.5*ctx.log(ctx.pi)+0.25*(a+b)) + else: + return ctx._re(0.25*(a+b)) + +@defun_wrapped +def grampoint(ctx, n): + # asymptotic expansion, from + # http://mathworld.wolfram.com/GramPoint.html + g = 2*ctx.pi*ctx.exp(1+ctx.lambertw((8*n+1)/(8*ctx.e))) + return ctx.findroot(lambda t: ctx.siegeltheta(t)-ctx.pi*n, g) + + +@defun_wrapped +def siegelz(ctx, t, **kwargs): + d = int(kwargs.get("derivative", 0)) + t = ctx.convert(t) + t1 = ctx._re(t) + t2 = ctx._im(t) + prec = ctx.prec + try: + if abs(t1) > 500*prec and t2**2 < t1: + v = ctx.rs_z(t, d) + if ctx._is_real_type(t): + return ctx._re(v) + return v + except NotImplementedError: + pass + ctx.prec += 21 + e1 = ctx.expj(ctx.siegeltheta(t)) + z = ctx.zeta(0.5+ctx.j*t) + if d == 0: + v = e1*z + ctx.prec=prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + z1 = ctx.zeta(0.5+ctx.j*t, derivative=1) + theta1 = ctx.siegeltheta(t, derivative=1) + if d == 1: + v = ctx.j*e1*(z1+z*theta1) + ctx.prec=prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + z2 = ctx.zeta(0.5+ctx.j*t, derivative=2) + theta2 = ctx.siegeltheta(t, derivative=2) + comb1 = theta1**2-ctx.j*theta2 + if d == 2: + def terms(): + return [2*z1*theta1, z2, z*comb1] + v = ctx.sum_accurately(terms, 1) + v = -e1*v + ctx.prec = prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + ctx.prec += 10 + z3 = ctx.zeta(0.5+ctx.j*t, derivative=3) + theta3 = ctx.siegeltheta(t, derivative=3) + comb2 = theta1**3-3*ctx.j*theta1*theta2-theta3 + if d == 3: + def terms(): + return [3*theta1*z2, 3*z1*comb1, z3+z*comb2] + v = ctx.sum_accurately(terms, 1) + v = -ctx.j*e1*v + ctx.prec = prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + z4 = ctx.zeta(0.5+ctx.j*t, derivative=4) + theta4 = ctx.siegeltheta(t, derivative=4) + def terms(): + return [theta1**4, -6*ctx.j*theta1**2*theta2, -3*theta2**2, + -4*theta1*theta3, ctx.j*theta4] + comb3 = ctx.sum_accurately(terms, 1) + if d == 4: + def terms(): + return [6*theta1**2*z2, -6*ctx.j*z2*theta2, 4*theta1*z3, + 4*z1*comb2, z4, z*comb3] + v = ctx.sum_accurately(terms, 1) + v = e1*v + ctx.prec = prec + if ctx._is_real_type(t): + return ctx._re(v) + return +v + if d > 4: + h = lambda x: ctx.siegelz(x, derivative=4) + return ctx.diff(h, t, n=d-4) + + +_zeta_zeros = [ +14.134725142,21.022039639,25.010857580,30.424876126,32.935061588, +37.586178159,40.918719012,43.327073281,48.005150881,49.773832478, +52.970321478,56.446247697,59.347044003,60.831778525,65.112544048, +67.079810529,69.546401711,72.067157674,75.704690699,77.144840069, +79.337375020,82.910380854,84.735492981,87.425274613,88.809111208, +92.491899271,94.651344041,95.870634228,98.831194218,101.317851006, +103.725538040,105.446623052,107.168611184,111.029535543,111.874659177, +114.320220915,116.226680321,118.790782866,121.370125002,122.946829294, +124.256818554,127.516683880,129.578704200,131.087688531,133.497737203, +134.756509753,138.116042055,139.736208952,141.123707404,143.111845808, +146.000982487,147.422765343,150.053520421,150.925257612,153.024693811, +156.112909294,157.597591818,158.849988171,161.188964138,163.030709687, +165.537069188,167.184439978,169.094515416,169.911976479,173.411536520, +174.754191523,176.441434298,178.377407776,179.916484020,182.207078484, +184.874467848,185.598783678,187.228922584,189.416158656,192.026656361, +193.079726604,195.265396680,196.876481841,198.015309676,201.264751944, +202.493594514,204.189671803,205.394697202,207.906258888,209.576509717, +211.690862595,213.347919360,214.547044783,216.169538508,219.067596349, +220.714918839,221.430705555,224.007000255,224.983324670,227.421444280, +229.337413306,231.250188700,231.987235253,233.693404179,236.524229666, +] + +def _load_zeta_zeros(url): + import urllib + d = urllib.urlopen(url) + L = [float(x) for x in d.readlines()] + # Sanity check + assert round(L[0]) == 14 + _zeta_zeros[:] = L + +@defun +def oldzetazero(ctx, n, url='http://www.dtc.umn.edu/~odlyzko/zeta_tables/zeros1'): + n = int(n) + if n < 0: + return ctx.zetazero(-n).conjugate() + if n == 0: + raise ValueError("n must be nonzero") + if n > len(_zeta_zeros) and n <= 100000: + _load_zeta_zeros(url) + if n > len(_zeta_zeros): + raise NotImplementedError("n too large for zetazeros") + return ctx.mpc(0.5, ctx.findroot(ctx.siegelz, _zeta_zeros[n-1])) + +@defun_wrapped +def riemannr(ctx, x): + if x == 0: + return ctx.zero + # Check if a simple asymptotic estimate is accurate enough + if abs(x) > 1000: + a = ctx.li(x) + b = 0.5*ctx.li(ctx.sqrt(x)) + if abs(b) < abs(a)*ctx.eps: + return a + if abs(x) < 0.01: + # XXX + ctx.prec += int(-ctx.log(abs(x),2)) + # Sum Gram's series + s = t = ctx.one + u = ctx.ln(x) + k = 1 + while abs(t) > abs(s)*ctx.eps: + t = t * u / k + s += t / (k * ctx._zeta_int(k+1)) + k += 1 + return s + +@defun_static +def primepi(ctx, x): + x = int(x) + if x < 2: + return 0 + return len(ctx.list_primes(x)) + +# TODO: fix the interface wrt contexts +@defun_wrapped +def primepi2(ctx, x): + x = int(x) + if x < 2: + return ctx._iv.zero + if x < 2657: + return ctx._iv.mpf(ctx.primepi(x)) + mid = ctx.li(x) + # Schoenfeld's estimate for x >= 2657, assuming RH + err = ctx.sqrt(x,rounding='u')*ctx.ln(x,rounding='u')/8/ctx.pi(rounding='d') + a = ctx.floor((ctx._iv.mpf(mid)-err).a, rounding='d') + b = ctx.ceil((ctx._iv.mpf(mid)+err).b, rounding='u') + return ctx._iv.mpf([a,b]) + +@defun_wrapped +def primezeta(ctx, s): + if ctx.isnan(s): + return s + if ctx.re(s) <= 0: + raise ValueError("prime zeta function defined only for re(s) > 0") + if s == 1: + return ctx.inf + if s == 0.5: + return ctx.mpc(ctx.ninf, ctx.pi) + r = ctx.re(s) + if r > ctx.prec: + return 0.5**s + else: + wp = ctx.prec + int(r) + def terms(): + orig = ctx.prec + # zeta ~ 1+eps; need to set precision + # to get logarithm accurately + k = 0 + while 1: + k += 1 + u = ctx.moebius(k) + if not u: + continue + ctx.prec = wp + t = u*ctx.ln(ctx.zeta(k*s))/k + if not t: + return + #print ctx.prec, ctx.nstr(t) + ctx.prec = orig + yield t + return ctx.sum_accurately(terms) + +# TODO: for bernpoly and eulerpoly, ensure that all exact zeros are covered + +@defun_wrapped +def bernpoly(ctx, n, z): + # Slow implementation: + #return sum(ctx.binomial(n,k)*ctx.bernoulli(k)*z**(n-k) for k in xrange(0,n+1)) + n = int(n) + if n < 0: + raise ValueError("Bernoulli polynomials only defined for n >= 0") + if z == 0 or (z == 1 and n > 1): + return ctx.bernoulli(n) + if z == 0.5: + return (ctx.ldexp(1,1-n)-1)*ctx.bernoulli(n) + if n <= 3: + if n == 0: return z ** 0 + if n == 1: return z - 0.5 + if n == 2: return (6*z*(z-1)+1)/6 + if n == 3: return z*(z*(z-1.5)+0.5) + if ctx.isinf(z): + return z ** n + if ctx.isnan(z): + return z + if abs(z) > 2: + def terms(): + t = ctx.one + yield t + r = ctx.one/z + k = 1 + while k <= n: + t = t*(n+1-k)/k*r + if not (k > 2 and k & 1): + yield t*ctx.bernoulli(k) + k += 1 + return ctx.sum_accurately(terms) * z**n + else: + def terms(): + yield ctx.bernoulli(n) + t = ctx.one + k = 1 + while k <= n: + t = t*(n+1-k)/k * z + m = n-k + if not (m > 2 and m & 1): + yield t*ctx.bernoulli(m) + k += 1 + return ctx.sum_accurately(terms) + +@defun_wrapped +def eulerpoly(ctx, n, z): + n = int(n) + if n < 0: + raise ValueError("Euler polynomials only defined for n >= 0") + if n <= 2: + if n == 0: return z ** 0 + if n == 1: return z - 0.5 + if n == 2: return z*(z-1) + if ctx.isinf(z): + return z**n + if ctx.isnan(z): + return z + m = n+1 + if z == 0: + return -2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0 + if z == 1: + return 2*(ctx.ldexp(1,m)-1)*ctx.bernoulli(m)/m * z**0 + if z == 0.5: + if n % 2: + return ctx.zero + # Use exact code for Euler numbers + if n < 100 or n*ctx.mag(0.46839865*n) < ctx.prec*0.25: + return ctx.ldexp(ctx._eulernum(n), -n) + # http://functions.wolfram.com/Polynomials/EulerE2/06/01/02/01/0002/ + def terms(): + t = ctx.one + k = 0 + w = ctx.ldexp(1,n+2) + while 1: + v = n-k+1 + if not (v > 2 and v & 1): + yield (2-w)*ctx.bernoulli(v)*t + k += 1 + if k > n: + break + t = t*z*(n-k+2)/k + w *= 0.5 + return ctx.sum_accurately(terms) / m + +@defun +def eulernum(ctx, n, exact=False): + n = int(n) + if exact: + return int(ctx._eulernum(n)) + if n < 100: + return ctx.mpf(ctx._eulernum(n)) + if n % 2: + return ctx.zero + return ctx.ldexp(ctx.eulerpoly(n,0.5), n) + +# TODO: this should be implemented low-level +def polylog_series(ctx, s, z): + tol = +ctx.eps + l = ctx.zero + k = 1 + zk = z + while 1: + term = zk / k**s + l += term + if abs(term) < tol: + break + zk *= z + k += 1 + return l + +def polylog_continuation(ctx, n, z): + if n < 0: + return z*0 + twopij = 2j * ctx.pi + a = -twopij**n/ctx.fac(n) * ctx.bernpoly(n, ctx.ln(z)/twopij) + if ctx._is_real_type(z) and z < 0: + a = ctx._re(a) + if ctx._im(z) < 0 or (ctx._im(z) == 0 and ctx._re(z) >= 1): + a -= twopij*ctx.ln(z)**(n-1)/ctx.fac(n-1) + return a + +def polylog_unitcircle(ctx, n, z): + tol = +ctx.eps + if n > 1: + l = ctx.zero + logz = ctx.ln(z) + logmz = ctx.one + m = 0 + while 1: + if (n-m) != 1: + term = ctx.zeta(n-m) * logmz / ctx.fac(m) + if term and abs(term) < tol: + break + l += term + logmz *= logz + m += 1 + l += ctx.ln(z)**(n-1)/ctx.fac(n-1)*(ctx.harmonic(n-1)-ctx.ln(-ctx.ln(z))) + elif n < 1: # else + l = ctx.fac(-n)*(-ctx.ln(z))**(n-1) + logz = ctx.ln(z) + logkz = ctx.one + k = 0 + while 1: + b = ctx.bernoulli(k-n+1) + if b: + term = b*logkz/(ctx.fac(k)*(k-n+1)) + if abs(term) < tol: + break + l -= term + logkz *= logz + k += 1 + else: + raise ValueError + if ctx._is_real_type(z) and z < 0: + l = ctx._re(l) + return l + +def polylog_general(ctx, s, z): + v = ctx.zero + u = ctx.ln(z) + if not abs(u) < 5: # theoretically |u| < 2*pi + j = ctx.j + v = 1-s + y = ctx.ln(-z)/(2*ctx.pi*j) + return ctx.gamma(v)*(j**v*ctx.zeta(v,0.5+y) + j**-v*ctx.zeta(v,0.5-y))/(2*ctx.pi)**v + t = 1 + k = 0 + while 1: + term = ctx.zeta(s-k) * t + if abs(term) < ctx.eps: + break + v += term + k += 1 + t *= u + t /= k + return ctx.gamma(1-s)*(-u)**(s-1) + v + +@defun_wrapped +def polylog(ctx, s, z): + s = ctx.convert(s) + z = ctx.convert(z) + if z == 1: + return ctx.zeta(s) + if z == -1: + return -ctx.altzeta(s) + if s == 0: + return z/(1-z) + if s == 1: + return -ctx.ln(1-z) + if s == -1: + return z/(1-z)**2 + if abs(z) <= 0.75 or (not ctx.isint(s) and abs(z) < 0.9): + return polylog_series(ctx, s, z) + if abs(z) >= 1.4 and ctx.isint(s): + return (-1)**(s+1)*polylog_series(ctx, s, 1/z) + polylog_continuation(ctx, int(ctx.re(s)), z) + if ctx.isint(s): + return polylog_unitcircle(ctx, int(ctx.re(s)), z) + return polylog_general(ctx, s, z) + +@defun_wrapped +def clsin(ctx, s, z, pi=False): + if ctx.isint(s) and s < 0 and int(s) % 2 == 1: + return z*0 + if pi: + a = ctx.expjpi(z) + else: + a = ctx.expj(z) + if ctx._is_real_type(z) and ctx._is_real_type(s): + return ctx.im(ctx.polylog(s,a)) + b = 1/a + return (-0.5j)*(ctx.polylog(s,a) - ctx.polylog(s,b)) + +@defun_wrapped +def clcos(ctx, s, z, pi=False): + if ctx.isint(s) and s < 0 and int(s) % 2 == 0: + return z*0 + if pi: + a = ctx.expjpi(z) + else: + a = ctx.expj(z) + if ctx._is_real_type(z) and ctx._is_real_type(s): + return ctx.re(ctx.polylog(s,a)) + b = 1/a + return 0.5*(ctx.polylog(s,a) + ctx.polylog(s,b)) + +@defun +def altzeta(ctx, s, **kwargs): + try: + return ctx._altzeta(s, **kwargs) + except NotImplementedError: + return ctx._altzeta_generic(s) + +@defun_wrapped +def _altzeta_generic(ctx, s): + if s == 1: + return ctx.ln2 + 0*s + return -ctx.powm1(2, 1-s) * ctx.zeta(s) + +@defun +def zeta(ctx, s, a=1, derivative=0, method=None, **kwargs): + d = int(derivative) + if a == 1 and not (d or method): + try: + return ctx._zeta(s, **kwargs) + except NotImplementedError: + pass + s = ctx.convert(s) + prec = ctx.prec + method = kwargs.get('method') + verbose = kwargs.get('verbose') + if (not s) and (not derivative): + return ctx.mpf(0.5) - ctx._convert_param(a)[0] + if a == 1 and method != 'euler-maclaurin': + im = abs(ctx._im(s)) + re = abs(ctx._re(s)) + #if (im < prec or method == 'borwein') and not derivative: + # try: + # if verbose: + # print "zeta: Attempting to use the Borwein algorithm" + # return ctx._zeta(s, **kwargs) + # except NotImplementedError: + # if verbose: + # print "zeta: Could not use the Borwein algorithm" + # pass + if abs(im) > 500*prec and 10*re < prec and derivative <= 4 or \ + method == 'riemann-siegel': + try: # py2.4 compatible try block + try: + if verbose: + print("zeta: Attempting to use the Riemann-Siegel algorithm") + return ctx.rs_zeta(s, derivative, **kwargs) + except NotImplementedError: + if verbose: + print("zeta: Could not use the Riemann-Siegel algorithm") + pass + finally: + ctx.prec = prec + if s == 1: + return ctx.inf + abss = abs(s) + if abss == ctx.inf: + if ctx.re(s) == ctx.inf: + if d == 0: + return ctx.one + return ctx.zero + return s*0 + elif ctx.isnan(abss): + return 1/s + if ctx.re(s) > 2*ctx.prec and a == 1 and not derivative: + return ctx.one + ctx.power(2, -s) + return +ctx._hurwitz(s, a, d, **kwargs) + +@defun +def _hurwitz(ctx, s, a=1, d=0, **kwargs): + prec = ctx.prec + verbose = kwargs.get('verbose') + try: + extraprec = 10 + ctx.prec += extraprec + # We strongly want to special-case rational a + a, atype = ctx._convert_param(a) + if ctx.re(s) < 0: + if verbose: + print("zeta: Attempting reflection formula") + try: + return _hurwitz_reflection(ctx, s, a, d, atype) + except NotImplementedError: + pass + if verbose: + print("zeta: Reflection formula failed") + if verbose: + print("zeta: Using the Euler-Maclaurin algorithm") + while 1: + ctx.prec = prec + extraprec + T1, T2 = _hurwitz_em(ctx, s, a, d, prec+10, verbose) + cancellation = ctx.mag(T1) - ctx.mag(T1+T2) + if verbose: + print("Term 1:", T1) + print("Term 2:", T2) + print("Cancellation:", cancellation, "bits") + if cancellation < extraprec: + return T1 + T2 + else: + extraprec = max(2*extraprec, min(cancellation + 5, 100*prec)) + if extraprec > kwargs.get('maxprec', 100*prec): + raise ctx.NoConvergence("zeta: too much cancellation") + finally: + ctx.prec = prec + +def _hurwitz_reflection(ctx, s, a, d, atype): + # TODO: implement for derivatives + if d != 0: + raise NotImplementedError + res = ctx.re(s) + negs = -s + # Integer reflection formula + if ctx.isnpint(s): + n = int(res) + if n <= 0: + return ctx.bernpoly(1-n, a) / (n-1) + if not (atype == 'Q' or atype == 'Z'): + raise NotImplementedError + t = 1-s + # We now require a to be standardized + v = 0 + shift = 0 + b = a + while ctx.re(b) > 1: + b -= 1 + v -= b**negs + shift -= 1 + while ctx.re(b) <= 0: + v += b**negs + b += 1 + shift += 1 + # Rational reflection formula + try: + p, q = a._mpq_ + except: + assert a == int(a) + p = int(a) + q = 1 + p += shift*q + assert 1 <= p <= q + g = ctx.fsum(ctx.cospi(t/2-2*k*b)*ctx._hurwitz(t,(k,q)) \ + for k in range(1,q+1)) + g *= 2*ctx.gamma(t)/(2*ctx.pi*q)**t + v += g + return v + +def _hurwitz_em(ctx, s, a, d, prec, verbose): + # May not be converted at this point + a = ctx.convert(a) + tol = -prec + # Estimate number of terms for Euler-Maclaurin summation; could be improved + M1 = 0 + M2 = prec // 3 + N = M2 + lsum = 0 + # This speeds up the recurrence for derivatives + if ctx.isint(s): + s = int(ctx._re(s)) + s1 = s-1 + while 1: + # Truncated L-series + l = ctx._zetasum(s, M1+a, M2-M1-1, [d])[0][0] + #if d: + # l = ctx.fsum((-ctx.ln(n+a))**d * (n+a)**negs for n in range(M1,M2)) + #else: + # l = ctx.fsum((n+a)**negs for n in range(M1,M2)) + lsum += l + M2a = M2+a + logM2a = ctx.ln(M2a) + logM2ad = logM2a**d + logs = [logM2ad] + logr = 1/logM2a + rM2a = 1/M2a + M2as = M2a**(-s) + if d: + tailsum = ctx.gammainc(d+1, s1*logM2a) / s1**(d+1) + else: + tailsum = 1/((s1)*(M2a)**s1) + tailsum += 0.5 * logM2ad * M2as + U = [1] + r = M2as + fact = 2 + for j in range(1, N+1): + # TODO: the following could perhaps be tidied a bit + j2 = 2*j + if j == 1: + upds = [1] + else: + upds = [j2-2, j2-1] + for m in upds: + D = min(m,d+1) + if m <= d: + logs.append(logs[-1] * logr) + Un = [0]*(D+1) + for i in xrange(D): Un[i] = (1-m-s)*U[i] + for i in xrange(1,D+1): Un[i] += (d-(i-1))*U[i-1] + U = Un + r *= rM2a + t = ctx.fdot(U, logs) * r * ctx.bernoulli(j2)/(-fact) + tailsum += t + if ctx.mag(t) < tol: + return lsum, (-1)**d * tailsum + fact *= (j2+1)*(j2+2) + if verbose: + print("Sum range:", M1, M2, "term magnitude", ctx.mag(t), "tolerance", tol) + M1, M2 = M2, M2*2 + if ctx.re(s) < 0: + N += N//2 + + + +@defun +def _zetasum(ctx, s, a, n, derivatives=[0], reflect=False): + """ + Returns [xd0,xd1,...,xdr], [yd0,yd1,...ydr] where + + xdk = D^k ( 1/a^s + 1/(a+1)^s + ... + 1/(a+n)^s ) + ydk = D^k conj( 1/a^(1-s) + 1/(a+1)^(1-s) + ... + 1/(a+n)^(1-s) ) + + D^k = kth derivative with respect to s, k ranges over the given list of + derivatives (which should consist of either a single element + or a range 0,1,...r). If reflect=False, the ydks are not computed. + """ + #print "zetasum", s, a, n + # don't use the fixed-point code if there are large exponentials + if abs(ctx.re(s)) < 0.5 * ctx.prec: + try: + return ctx._zetasum_fast(s, a, n, derivatives, reflect) + except NotImplementedError: + pass + negs = ctx.fneg(s, exact=True) + have_derivatives = derivatives != [0] + have_one_derivative = len(derivatives) == 1 + if not reflect: + if not have_derivatives: + return [ctx.fsum((a+k)**negs for k in xrange(n+1))], [] + if have_one_derivative: + d = derivatives[0] + x = ctx.fsum(ctx.ln(a+k)**d * (a+k)**negs for k in xrange(n+1)) + return [(-1)**d * x], [] + maxd = max(derivatives) + if not have_one_derivative: + derivatives = range(maxd+1) + xs = [ctx.zero for d in derivatives] + if reflect: + ys = [ctx.zero for d in derivatives] + else: + ys = [] + for k in xrange(n+1): + w = a + k + xterm = w ** negs + if reflect: + yterm = ctx.conj(ctx.one / (w * xterm)) + if have_derivatives: + logw = -ctx.ln(w) + if have_one_derivative: + logw = logw ** maxd + xs[0] += xterm * logw + if reflect: + ys[0] += yterm * logw + else: + t = ctx.one + for d in derivatives: + xs[d] += xterm * t + if reflect: + ys[d] += yterm * t + t *= logw + else: + xs[0] += xterm + if reflect: + ys[0] += yterm + return xs, ys + +@defun +def dirichlet(ctx, s, chi=[1], derivative=0): + s = ctx.convert(s) + q = len(chi) + d = int(derivative) + if d > 2: + raise NotImplementedError("arbitrary order derivatives") + prec = ctx.prec + try: + ctx.prec += 10 + if s == 1: + have_pole = True + for x in chi: + if x and x != 1: + have_pole = False + h = +ctx.eps + ctx.prec *= 2*(d+1) + s += h + if have_pole: + return +ctx.inf + z = ctx.zero + for p in range(1,q+1): + if chi[p%q]: + if d == 1: + z += chi[p%q] * (ctx.zeta(s, (p,q), 1) - \ + ctx.zeta(s, (p,q))*ctx.log(q)) + else: + z += chi[p%q] * ctx.zeta(s, (p,q)) + z /= q**s + finally: + ctx.prec = prec + return +z + + +def secondzeta_main_term(ctx, s, a, **kwargs): + tol = ctx.eps + f = lambda n: ctx.gammainc(0.5*s, a*gamm**2, regularized=True)*gamm**(-s) + totsum = term = ctx.zero + mg = ctx.inf + n = 0 + while mg > tol: + totsum += term + n += 1 + gamm = ctx.im(ctx.zetazero_memoized(n)) + term = f(n) + mg = abs(term) + err = 0 + if kwargs.get("error"): + sg = ctx.re(s) + err = 0.5*ctx.pi**(-1)*max(1,sg)*a**(sg-0.5)*ctx.log(gamm/(2*ctx.pi))*\ + ctx.gammainc(-0.5, a*gamm**2)/abs(ctx.gamma(s/2)) + err = abs(err) + return +totsum, err, n + +def secondzeta_prime_term(ctx, s, a, **kwargs): + tol = ctx.eps + f = lambda n: ctx.gammainc(0.5*(1-s),0.25*ctx.log(n)**2 * a**(-1))*\ + ((0.5*ctx.log(n))**(s-1))*ctx.mangoldt(n)/ctx.sqrt(n)/\ + (2*ctx.gamma(0.5*s)*ctx.sqrt(ctx.pi)) + totsum = term = ctx.zero + mg = ctx.inf + n = 1 + while mg > tol or n < 9: + totsum += term + n += 1 + term = f(n) + if term == 0: + mg = ctx.inf + else: + mg = abs(term) + if kwargs.get("error"): + err = mg + return +totsum, err, n + +def secondzeta_exp_term(ctx, s, a): + if ctx.isint(s) and ctx.re(s) <= 0: + m = int(round(ctx.re(s))) + if not m & 1: + return ctx.mpf('-0.25')**(-m//2) + tol = ctx.eps + f = lambda n: (0.25*a)**n/((n+0.5*s)*ctx.fac(n)) + totsum = ctx.zero + term = f(0) + mg = ctx.inf + n = 0 + while mg > tol: + totsum += term + n += 1 + term = f(n) + mg = abs(term) + v = a**(0.5*s)*totsum/ctx.gamma(0.5*s) + return v + +def secondzeta_singular_term(ctx, s, a, **kwargs): + factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s)) + extraprec = ctx.mag(factor) + ctx.prec += extraprec + factor = a**(0.5*(s-1))/(4*ctx.sqrt(ctx.pi)*ctx.gamma(0.5*s)) + tol = ctx.eps + f = lambda n: ctx.bernpoly(n,0.75)*(4*ctx.sqrt(a))**n*\ + ctx.gamma(0.5*n)/((s+n-1)*ctx.fac(n)) + totsum = ctx.zero + mg1 = ctx.inf + n = 1 + term = f(n) + mg2 = abs(term) + while mg2 > tol and mg2 <= mg1: + totsum += term + n += 1 + term = f(n) + totsum += term + n +=1 + term = f(n) + mg1 = mg2 + mg2 = abs(term) + totsum += term + pole = -2*(s-1)**(-2)+(ctx.euler+ctx.log(16*ctx.pi**2*a))*(s-1)**(-1) + st = factor*(pole+totsum) + err = 0 + if kwargs.get("error"): + if not ((mg2 > tol) and (mg2 <= mg1)): + if mg2 <= tol: + err = ctx.mpf(10)**int(ctx.log(abs(factor*tol),10)) + if mg2 > mg1: + err = ctx.mpf(10)**int(ctx.log(abs(factor*mg1),10)) + err = max(err, ctx.eps*1.) + ctx.prec -= extraprec + return +st, err + +@defun +def secondzeta(ctx, s, a = 0.015, **kwargs): + r""" + Evaluates the secondary zeta function `Z(s)`, defined for + `\mathrm{Re}(s)>1` by + + .. math :: + + Z(s) = \sum_{n=1}^{\infty} \frac{1}{\tau_n^s} + + where `\frac12+i\tau_n` runs through the zeros of `\zeta(s)` with + imaginary part positive. + + `Z(s)` extends to a meromorphic function on `\mathbb{C}` with a + double pole at `s=1` and simple poles at the points `-2n` for + `n=0`, 1, 2, ... + + **Examples** + + >>> from mpmath import * + >>> mp.pretty = True; mp.dps = 15 + >>> secondzeta(2) + 0.023104993115419 + >>> xi = lambda s: 0.5*s*(s-1)*pi**(-0.5*s)*gamma(0.5*s)*zeta(s) + >>> Xi = lambda t: xi(0.5+t*j) + >>> chop(-0.5*diff(Xi,0,n=2)/Xi(0)) + 0.023104993115419 + + We may ask for an approximate error value:: + + >>> secondzeta(0.5+100j, error=True) + ((-0.216272011276718 - 0.844952708937228j), 2.22044604925031e-16) + + The function has poles at the negative odd integers, + and dyadic rational values at the negative even integers:: + + >>> mp.dps = 30 + >>> secondzeta(-8) + -0.67236328125 + >>> secondzeta(-7) + +inf + + **Implementation notes** + + The function is computed as sum of four terms `Z(s)=A(s)-P(s)+E(s)-S(s)` + respectively main, prime, exponential and singular terms. + The main term `A(s)` is computed from the zeros of zeta. + The prime term depends on the von Mangoldt function. + The singular term is responsible for the poles of the function. + + The four terms depends on a small parameter `a`. We may change the + value of `a`. Theoretically this has no effect on the sum of the four + terms, but in practice may be important. + + A smaller value of the parameter `a` makes `A(s)` depend on + a smaller number of zeros of zeta, but `P(s)` uses more values of + von Mangoldt function. + + We may also add a verbose option to obtain data about the + values of the four terms. + + >>> mp.dps = 10 + >>> secondzeta(0.5 + 40j, error=True, verbose=True) + main term = (-30190318549.138656312556 - 13964804384.624622876523j) + computed using 19 zeros of zeta + prime term = (132717176.89212754625045 + 188980555.17563978290601j) + computed using 9 values of the von Mangoldt function + exponential term = (542447428666.07179812536 + 362434922978.80192435203j) + singular term = (512124392939.98154322355 + 348281138038.65531023921j) + ((0.059471043 + 0.3463514534j), 1.455191523e-11) + + >>> secondzeta(0.5 + 40j, a=0.04, error=True, verbose=True) + main term = (-151962888.19606243907725 - 217930683.90210294051982j) + computed using 9 zeros of zeta + prime term = (2476659342.3038722372461 + 28711581821.921627163136j) + computed using 37 values of the von Mangoldt function + exponential term = (178506047114.7838188264 + 819674143244.45677330576j) + singular term = (175877424884.22441310708 + 790744630738.28669174871j) + ((0.059471043 + 0.3463514534j), 1.455191523e-11) + + Notice the great cancellation between the four terms. Changing `a`, the + four terms are very different numbers but the cancellation gives + the good value of Z(s). + + **References** + + A. Voros, Zeta functions for the Riemann zeros, Ann. Institute Fourier, + 53, (2003) 665--699. + + A. Voros, Zeta functions over Zeros of Zeta Functions, Lecture Notes + of the Unione Matematica Italiana, Springer, 2009. + """ + s = ctx.convert(s) + a = ctx.convert(a) + tol = ctx.eps + if ctx.isint(s) and ctx.re(s) <= 1: + if abs(s-1) < tol*1000: + return ctx.inf + m = int(round(ctx.re(s))) + if m & 1: + return ctx.inf + else: + return ((-1)**(-m//2)*\ + ctx.fraction(8-ctx.eulernum(-m,exact=True),2**(-m+3))) + prec = ctx.prec + try: + t3 = secondzeta_exp_term(ctx, s, a) + extraprec = max(ctx.mag(t3),0) + ctx.prec += extraprec + 3 + t1, r1, gt = secondzeta_main_term(ctx,s,a,error='True', verbose='True') + t2, r2, pt = secondzeta_prime_term(ctx,s,a,error='True', verbose='True') + t4, r4 = secondzeta_singular_term(ctx,s,a,error='True') + t3 = secondzeta_exp_term(ctx, s, a) + err = r1+r2+r4 + t = t1-t2+t3-t4 + if kwargs.get("verbose"): + print('main term =', t1) + print(' computed using', gt, 'zeros of zeta') + print('prime term =', t2) + print(' computed using', pt, 'values of the von Mangoldt function') + print('exponential term =', t3) + print('singular term =', t4) + finally: + ctx.prec = prec + if kwargs.get("error"): + w = max(ctx.mag(abs(t)),0) + err = max(err*2**w, ctx.eps*1.*2**w) + return +t, err + return +t + + +@defun_wrapped +def lerchphi(ctx, z, s, a): + r""" + Gives the Lerch transcendent, defined for `|z| < 1` and + `\Re{a} > 0` by + + .. math :: + + \Phi(z,s,a) = \sum_{k=0}^{\infty} \frac{z^k}{(a+k)^s} + + and generally by the recurrence `\Phi(z,s,a) = z \Phi(z,s,a+1) + a^{-s}` + along with the integral representation valid for `\Re{a} > 0` + + .. math :: + + \Phi(z,s,a) = \frac{1}{2 a^s} + + \int_0^{\infty} \frac{z^t}{(a+t)^s} dt - + 2 \int_0^{\infty} \frac{\sin(t \log z - s + \operatorname{arctan}(t/a)}{(a^2 + t^2)^{s/2} + (e^{2 \pi t}-1)} dt. + + The Lerch transcendent generalizes the Hurwitz zeta function :func:`zeta` + (`z = 1`) and the polylogarithm :func:`polylog` (`a = 1`). + + **Examples** + + Several evaluations in terms of simpler functions:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> lerchphi(-1,2,0.5); 4*catalan + 3.663862376708876060218414 + 3.663862376708876060218414 + >>> diff(lerchphi, (-1,-2,1), (0,1,0)); 7*zeta(3)/(4*pi**2) + 0.2131391994087528954617607 + 0.2131391994087528954617607 + >>> lerchphi(-4,1,1); log(5)/4 + 0.4023594781085250936501898 + 0.4023594781085250936501898 + >>> lerchphi(-3+2j,1,0.5); 2*atanh(sqrt(-3+2j))/sqrt(-3+2j) + (1.142423447120257137774002 + 0.2118232380980201350495795j) + (1.142423447120257137774002 + 0.2118232380980201350495795j) + + Evaluation works for complex arguments and `|z| \ge 1`:: + + >>> lerchphi(1+2j, 3-j, 4+2j) + (0.002025009957009908600539469 + 0.003327897536813558807438089j) + >>> lerchphi(-2,2,-2.5) + -12.28676272353094275265944 + >>> lerchphi(10,10,10) + (-4.462130727102185701817349e-11 - 1.575172198981096218823481e-12j) + >>> lerchphi(10,10,-10.5) + (112658784011940.5605789002 - 498113185.5756221777743631j) + + Some degenerate cases:: + + >>> lerchphi(0,1,2) + 0.5 + >>> lerchphi(0,1,-2) + -0.5 + + Reduction to simpler functions:: + + >>> lerchphi(1, 4.25+1j, 1) + (1.044674457556746668033975 - 0.04674508654012658932271226j) + >>> zeta(4.25+1j) + (1.044674457556746668033975 - 0.04674508654012658932271226j) + >>> lerchphi(1 - 0.5**10, 4.25+1j, 1) + (1.044629338021507546737197 - 0.04667768813963388181708101j) + >>> lerchphi(3, 4, 1) + (1.249503297023366545192592 - 0.2314252413375664776474462j) + >>> polylog(4, 3) / 3 + (1.249503297023366545192592 - 0.2314252413375664776474462j) + >>> lerchphi(3, 4, 1 - 0.5**10) + (1.253978063946663945672674 - 0.2316736622836535468765376j) + + **References** + + 1. [DLMF]_ section 25.14 + + """ + if z == 0: + return a ** (-s) + # Faster, but these cases are useful for testing right now + if z == 1: + return ctx.zeta(s, a) + if a == 1: + return ctx.polylog(s, z) / z + if ctx.re(a) < 1: + if ctx.isnpint(a): + raise ValueError("Lerch transcendent complex infinity") + m = int(ctx.ceil(1-ctx.re(a))) + v = ctx.zero + zpow = ctx.one + for n in xrange(m): + v += zpow / (a+n)**s + zpow *= z + return zpow * ctx.lerchphi(z,s, a+m) + v + g = ctx.ln(z) + v = 1/(2*a**s) + ctx.gammainc(1-s, -a*g) * (-g)**(s-1) / z**a + h = s / 2 + r = 2*ctx.pi + f = lambda t: ctx.sin(s*ctx.atan(t/a)-t*g) / \ + ((a**2+t**2)**h * ctx.expm1(r*t)) + v += 2*ctx.quad(f, [0, ctx.inf]) + if not ctx.im(z) and not ctx.im(s) and not ctx.im(a) and ctx.re(z) < 1: + v = ctx.chop(v) + return v diff --git a/.venv/lib/python3.11/site-packages/mpmath/functions/zetazeros.py b/.venv/lib/python3.11/site-packages/mpmath/functions/zetazeros.py new file mode 100644 index 0000000000000000000000000000000000000000..37c11a29426b0114053ae61664541f7ae7de95d8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/functions/zetazeros.py @@ -0,0 +1,1018 @@ +""" +The function zetazero(n) computes the n-th nontrivial zero of zeta(s). + +The general strategy is to locate a block of Gram intervals B where we +know exactly the number of zeros contained and which of those zeros +is that which we search. + +If n <= 400 000 000 we know exactly the Rosser exceptions, contained +in a list in this file. Hence for n<=400 000 000 we simply +look at these list of exceptions. If our zero is implicated in one of +these exceptions we have our block B. In other case we simply locate +the good Rosser block containing our zero. + +For n > 400 000 000 we apply the method of Turing, as complemented by +Lehman, Brent and Trudgian to find a suitable B. +""" + +from .functions import defun, defun_wrapped + +def find_rosser_block_zero(ctx, n): + """for n<400 000 000 determines a block were one find our zero""" + for k in range(len(_ROSSER_EXCEPTIONS)//2): + a=_ROSSER_EXCEPTIONS[2*k][0] + b=_ROSSER_EXCEPTIONS[2*k][1] + if ((a<= n-2) and (n-1 <= b)): + t0 = ctx.grampoint(a) + t1 = ctx.grampoint(b) + v0 = ctx._fp.siegelz(t0) + v1 = ctx._fp.siegelz(t1) + my_zero_number = n-a-1 + zero_number_block = b-a + pattern = _ROSSER_EXCEPTIONS[2*k+1] + return (my_zero_number, [a,b], [t0,t1], [v0,v1]) + k = n-2 + t,v,b = compute_triple_tvb(ctx, k) + T = [t] + V = [v] + while b < 0: + k -= 1 + t,v,b = compute_triple_tvb(ctx, k) + T.insert(0,t) + V.insert(0,v) + my_zero_number = n-k-1 + m = n-1 + t,v,b = compute_triple_tvb(ctx, m) + T.append(t) + V.append(v) + while b < 0: + m += 1 + t,v,b = compute_triple_tvb(ctx, m) + T.append(t) + V.append(v) + return (my_zero_number, [k,m], T, V) + +def wpzeros(t): + """Precision needed to compute higher zeros""" + wp = 53 + if t > 3*10**8: + wp = 63 + if t > 10**11: + wp = 70 + if t > 10**14: + wp = 83 + return wp + +def separate_zeros_in_block(ctx, zero_number_block, T, V, limitloop=None, + fp_tolerance=None): + """Separate the zeros contained in the block T, limitloop + determines how long one must search""" + if limitloop is None: + limitloop = ctx.inf + loopnumber = 0 + variations = count_variations(V) + while ((variations < zero_number_block) and (loopnumber 0): + alpha = ctx.sqrt(u/v) + b= (alpha*a+b2)/(alpha+1) + else: + b = (a+b2)/2 + if fp_tolerance < 10: + w = ctx._fp.siegelz(b) + if abs(w)ITERATION_LIMIT)and(loopnumber>2)and(variations+2==zero_number_block): + dtMax=0 + dtSec=0 + kMax = 0 + for k1 in range(1,len(T)): + dt = T[k1]-T[k1-1] + if dt > dtMax: + kMax=k1 + dtSec = dtMax + dtMax = dt + elif (dtdtSec): + dtSec = dt + if dtMax>3*dtSec: + f = lambda x: ctx.rs_z(x,derivative=1) + t0=T[kMax-1] + t1 = T[kMax] + t=ctx.findroot(f, (t0,t1), solver ='illinois',verify=False, verbose=False) + v = ctx.siegelz(t) + if (t0 2*wpz: + index +=1 + precs = [precs[0] // 2 +3+2*index] + precs + ctx.prec = precs[0] + guard + r = ctx.findroot(lambda x:ctx.siegelz(x), (t0,t1), solver ='illinois', verbose=False) + #print "first step at", ctx.dps, "digits" + z=ctx.mpc(0.5,r) + for prec in precs[1:]: + ctx.prec = prec + guard + #print "refining to", ctx.dps, "digits" + znew = z - ctx.zeta(z) / ctx.zeta(z, derivative=1) + #print "difference", ctx.nstr(abs(z-znew)) + z=ctx.mpc(0.5,ctx.im(znew)) + return ctx.im(z) + +def sure_number_block(ctx, n): + """The number of good Rosser blocks needed to apply + Turing method + References: + R. P. Brent, On the Zeros of the Riemann Zeta Function + in the Critical Strip, Math. Comp. 33 (1979) 1361--1372 + T. Trudgian, Improvements to Turing Method, Math. Comp.""" + if n < 9*10**5: + return(2) + g = ctx.grampoint(n-100) + lg = ctx._fp.ln(g) + brent = 0.0061 * lg**2 +0.08*lg + trudgian = 0.0031 * lg**2 +0.11*lg + N = ctx.ceil(min(brent,trudgian)) + N = int(N) + return N + +def compute_triple_tvb(ctx, n): + t = ctx.grampoint(n) + v = ctx._fp.siegelz(t) + if ctx.mag(abs(v))400 000 000""" + sb = sure_number_block(ctx, n) + number_goodblocks = 0 + m2 = n-1 + t, v, b = compute_triple_tvb(ctx, m2) + Tf = [t] + Vf = [v] + while b < 0: + m2 += 1 + t,v,b = compute_triple_tvb(ctx, m2) + Tf.append(t) + Vf.append(v) + goodpoints = [m2] + T = [t] + V = [v] + while number_goodblocks < 2*sb: + m2 += 1 + t, v, b = compute_triple_tvb(ctx, m2) + T.append(t) + V.append(v) + while b < 0: + m2 += 1 + t,v,b = compute_triple_tvb(ctx, m2) + T.append(t) + V.append(v) + goodpoints.append(m2) + zn = len(T)-1 + A, B, separated =\ + separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, + fp_tolerance=fp_tolerance) + Tf.pop() + Tf.extend(A) + Vf.pop() + Vf.extend(B) + if separated: + number_goodblocks += 1 + else: + number_goodblocks = 0 + T = [t] + V = [v] + # Now the same procedure to the left + number_goodblocks = 0 + m2 = n-2 + t, v, b = compute_triple_tvb(ctx, m2) + Tf.insert(0,t) + Vf.insert(0,v) + while b < 0: + m2 -= 1 + t,v,b = compute_triple_tvb(ctx, m2) + Tf.insert(0,t) + Vf.insert(0,v) + goodpoints.insert(0,m2) + T = [t] + V = [v] + while number_goodblocks < 2*sb: + m2 -= 1 + t, v, b = compute_triple_tvb(ctx, m2) + T.insert(0,t) + V.insert(0,v) + while b < 0: + m2 -= 1 + t,v,b = compute_triple_tvb(ctx, m2) + T.insert(0,t) + V.insert(0,v) + goodpoints.insert(0,m2) + zn = len(T)-1 + A, B, separated =\ + separate_zeros_in_block(ctx, zn, T, V, limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance) + A.pop() + Tf = A+Tf + B.pop() + Vf = B+Vf + if separated: + number_goodblocks += 1 + else: + number_goodblocks = 0 + T = [t] + V = [v] + r = goodpoints[2*sb] + lg = len(goodpoints) + s = goodpoints[lg-2*sb-1] + tr, vr, br = compute_triple_tvb(ctx, r) + ar = Tf.index(tr) + ts, vs, bs = compute_triple_tvb(ctx, s) + as1 = Tf.index(ts) + T = Tf[ar:as1+1] + V = Vf[ar:as1+1] + zn = s-r + A, B, separated =\ + separate_zeros_in_block(ctx, zn,T,V,limitloop=ITERATION_LIMIT, fp_tolerance=fp_tolerance) + if separated: + return (n-r-1,[r,s],A,B) + q = goodpoints[sb] + lg = len(goodpoints) + t = goodpoints[lg-sb-1] + tq, vq, bq = compute_triple_tvb(ctx, q) + aq = Tf.index(tq) + tt, vt, bt = compute_triple_tvb(ctx, t) + at = Tf.index(tt) + T = Tf[aq:at+1] + V = Vf[aq:at+1] + return (n-q-1,[q,t],T,V) + +def count_variations(V): + count = 0 + vold = V[0] + for n in range(1, len(V)): + vnew = V[n] + if vold*vnew < 0: + count +=1 + vold = vnew + return count + +def pattern_construct(ctx, block, T, V): + pattern = '(' + a = block[0] + b = block[1] + t0,v0,b0 = compute_triple_tvb(ctx, a) + k = 0 + k0 = 0 + for n in range(a+1,b+1): + t1,v1,b1 = compute_triple_tvb(ctx, n) + lgT =len(T) + while (k < lgT) and (T[k] <= t1): + k += 1 + L = V[k0:k] + L.append(v1) + L.insert(0,v0) + count = count_variations(L) + pattern = pattern + ("%s" % count) + if b1 > 0: + pattern = pattern + ')(' + k0 = k + t0,v0,b0 = t1,v1,b1 + pattern = pattern[:-1] + return pattern + +@defun +def zetazero(ctx, n, info=False, round=True): + r""" + Computes the `n`-th nontrivial zero of `\zeta(s)` on the critical line, + i.e. returns an approximation of the `n`-th largest complex number + `s = \frac{1}{2} + ti` for which `\zeta(s) = 0`. Equivalently, the + imaginary part `t` is a zero of the Z-function (:func:`~mpmath.siegelz`). + + **Examples** + + The first few zeros:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> zetazero(1) + (0.5 + 14.13472514173469379045725j) + >>> zetazero(2) + (0.5 + 21.02203963877155499262848j) + >>> zetazero(20) + (0.5 + 77.14484006887480537268266j) + + Verifying that the values are zeros:: + + >>> for n in range(1,5): + ... s = zetazero(n) + ... chop(zeta(s)), chop(siegelz(s.imag)) + ... + (0.0, 0.0) + (0.0, 0.0) + (0.0, 0.0) + (0.0, 0.0) + + Negative indices give the conjugate zeros (`n = 0` is undefined):: + + >>> zetazero(-1) + (0.5 - 14.13472514173469379045725j) + + :func:`~mpmath.zetazero` supports arbitrarily large `n` and arbitrary precision:: + + >>> mp.dps = 15 + >>> zetazero(1234567) + (0.5 + 727690.906948208j) + >>> mp.dps = 50 + >>> zetazero(1234567) + (0.5 + 727690.9069482075392389420041147142092708393819935j) + >>> chop(zeta(_)/_) + 0.0 + + with *info=True*, :func:`~mpmath.zetazero` gives additional information:: + + >>> mp.dps = 15 + >>> zetazero(542964976,info=True) + ((0.5 + 209039046.578535j), [542964969, 542964978], 6, '(013111110)') + + This means that the zero is between Gram points 542964969 and 542964978; + it is the 6-th zero between them. Finally (01311110) is the pattern + of zeros in this interval. The numbers indicate the number of zeros + in each Gram interval (Rosser blocks between parenthesis). In this case + there is only one Rosser block of length nine. + """ + n = int(n) + if n < 0: + return ctx.zetazero(-n).conjugate() + if n == 0: + raise ValueError("n must be nonzero") + wpinitial = ctx.prec + try: + wpz, fp_tolerance = comp_fp_tolerance(ctx, n) + ctx.prec = wpz + if n < 400000000: + my_zero_number, block, T, V =\ + find_rosser_block_zero(ctx, n) + else: + my_zero_number, block, T, V =\ + search_supergood_block(ctx, n, fp_tolerance) + zero_number_block = block[1]-block[0] + T, V, separated = separate_zeros_in_block(ctx, zero_number_block, T, V, + limitloop=ctx.inf, fp_tolerance=fp_tolerance) + if info: + pattern = pattern_construct(ctx,block,T,V) + prec = max(wpinitial, wpz) + t = separate_my_zero(ctx, my_zero_number, zero_number_block,T,V,prec) + v = ctx.mpc(0.5,t) + finally: + ctx.prec = wpinitial + if round: + v =+v + if info: + return (v,block,my_zero_number,pattern) + else: + return v + +def gram_index(ctx, t): + if t > 10**13: + wp = 3*ctx.log(t, 10) + else: + wp = 0 + prec = ctx.prec + try: + ctx.prec += wp + h = int(ctx.siegeltheta(t)/ctx.pi) + finally: + ctx.prec = prec + return(h) + +def count_to(ctx, t, T, V): + count = 0 + vold = V[0] + told = T[0] + tnew = T[1] + k = 1 + while tnew < t: + vnew = V[k] + if vold*vnew < 0: + count += 1 + vold = vnew + k += 1 + tnew = T[k] + a = ctx.siegelz(t) + if a*vold < 0: + count += 1 + return count + +def comp_fp_tolerance(ctx, n): + wpz = wpzeros(n*ctx.log(n)) + if n < 15*10**8: + fp_tolerance = 0.0005 + elif n <= 10**14: + fp_tolerance = 0.1 + else: + fp_tolerance = 100 + return wpz, fp_tolerance + +@defun +def nzeros(ctx, t): + r""" + Computes the number of zeros of the Riemann zeta function in + `(0,1) \times (0,t]`, usually denoted by `N(t)`. + + **Examples** + + The first zero has imaginary part between 14 and 15:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nzeros(14) + 0 + >>> nzeros(15) + 1 + >>> zetazero(1) + (0.5 + 14.1347251417347j) + + Some closely spaced zeros:: + + >>> nzeros(10**7) + 21136125 + >>> zetazero(21136125) + (0.5 + 9999999.32718175j) + >>> zetazero(21136126) + (0.5 + 10000000.2400236j) + >>> nzeros(545439823.215) + 1500000001 + >>> zetazero(1500000001) + (0.5 + 545439823.201985j) + >>> zetazero(1500000002) + (0.5 + 545439823.325697j) + + This confirms the data given by J. van de Lune, + H. J. J. te Riele and D. T. Winter in 1986. + """ + if t < 14.1347251417347: + return 0 + x = gram_index(ctx, t) + k = int(ctx.floor(x)) + wpinitial = ctx.prec + wpz, fp_tolerance = comp_fp_tolerance(ctx, k) + ctx.prec = wpz + a = ctx.siegelz(t) + if k == -1 and a < 0: + return 0 + elif k == -1 and a > 0: + return 1 + if k+2 < 400000000: + Rblock = find_rosser_block_zero(ctx, k+2) + else: + Rblock = search_supergood_block(ctx, k+2, fp_tolerance) + n1, n2 = Rblock[1] + if n2-n1 == 1: + b = Rblock[3][0] + if a*b > 0: + ctx.prec = wpinitial + return k+1 + else: + ctx.prec = wpinitial + return k+2 + my_zero_number,block, T, V = Rblock + zero_number_block = n2-n1 + T, V, separated = separate_zeros_in_block(ctx,\ + zero_number_block, T, V,\ + limitloop=ctx.inf,\ + fp_tolerance=fp_tolerance) + n = count_to(ctx, t, T, V) + ctx.prec = wpinitial + return n+n1+1 + +@defun_wrapped +def backlunds(ctx, t): + r""" + Computes the function + `S(t) = \operatorname{arg} \zeta(\frac{1}{2} + it) / \pi`. + + See Titchmarsh Section 9.3 for details of the definition. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> backlunds(217.3) + 0.16302205431184 + + Generally, the value is a small number. At Gram points it is an integer, + frequently equal to 0:: + + >>> chop(backlunds(grampoint(200))) + 0.0 + >>> backlunds(extraprec(10)(grampoint)(211)) + 1.0 + >>> backlunds(extraprec(10)(grampoint)(232)) + -1.0 + + The number of zeros of the Riemann zeta function up to height `t` + satisfies `N(t) = \theta(t)/\pi + 1 + S(t)` (see :func:nzeros` and + :func:`siegeltheta`):: + + >>> t = 1234.55 + >>> nzeros(t) + 842 + >>> siegeltheta(t)/pi+1+backlunds(t) + 842.0 + + """ + return ctx.nzeros(t)-1-ctx.siegeltheta(t)/ctx.pi + + +""" +_ROSSER_EXCEPTIONS is a list of all exceptions to +Rosser's rule for n <= 400 000 000. + +Alternately the entry is of type [n,m], or a string. +The string is the zero pattern of the Block and the relevant +adjacent. For example (010)3 corresponds to a block +composed of three Gram intervals, the first ant third without +a zero and the intermediate with a zero. The next Gram interval +contain three zeros. So that in total we have 4 zeros in 4 Gram +blocks. n and m are the indices of the Gram points of this +interval of four Gram intervals. The Rosser exception is therefore +formed by the three Gram intervals that are signaled between +parenthesis. + +We have included also some Rosser's exceptions beyond n=400 000 000 +that are noted in the literature by some reason. + +The list is composed from the data published in the references: + +R. P. Brent, J. van de Lune, H. J. J. te Riele, D. T. Winter, +'On the Zeros of the Riemann Zeta Function in the Critical Strip. II', +Math. Comp. 39 (1982) 681--688. +See also Corrigenda in Math. Comp. 46 (1986) 771. + +J. van de Lune, H. J. J. te Riele, +'On the Zeros of the Riemann Zeta Function in the Critical Strip. III', +Math. Comp. 41 (1983) 759--767. +See also Corrigenda in Math. Comp. 46 (1986) 771. + +J. van de Lune, +'Sums of Equal Powers of Positive Integers', +Dissertation, +Vrije Universiteit te Amsterdam, Centrum voor Wiskunde en Informatica, +Amsterdam, 1984. + +Thanks to the authors all this papers and those others that have +contributed to make this possible. +""" + + + + + + + +_ROSSER_EXCEPTIONS = \ +[[13999525, 13999528], '(00)3', +[30783329, 30783332], '(00)3', +[30930926, 30930929], '3(00)', +[37592215, 37592218], '(00)3', +[40870156, 40870159], '(00)3', +[43628107, 43628110], '(00)3', +[46082042, 46082045], '(00)3', +[46875667, 46875670], '(00)3', +[49624540, 49624543], '3(00)', +[50799238, 50799241], '(00)3', +[55221453, 55221456], '3(00)', +[56948779, 56948782], '3(00)', +[60515663, 60515666], '(00)3', +[61331766, 61331770], '(00)40', +[69784843, 69784846], '3(00)', +[75052114, 75052117], '(00)3', +[79545240, 79545243], '3(00)', +[79652247, 79652250], '3(00)', +[83088043, 83088046], '(00)3', +[83689522, 83689525], '3(00)', +[85348958, 85348961], '(00)3', +[86513820, 86513823], '(00)3', +[87947596, 87947599], '3(00)', +[88600095, 88600098], '(00)3', +[93681183, 93681186], '(00)3', +[100316551, 100316554], '3(00)', +[100788444, 100788447], '(00)3', +[106236172, 106236175], '(00)3', +[106941327, 106941330], '3(00)', +[107287955, 107287958], '(00)3', +[107532016, 107532019], '3(00)', +[110571044, 110571047], '(00)3', +[111885253, 111885256], '3(00)', +[113239783, 113239786], '(00)3', +[120159903, 120159906], '(00)3', +[121424391, 121424394], '3(00)', +[121692931, 121692934], '3(00)', +[121934170, 121934173], '3(00)', +[122612848, 122612851], '3(00)', +[126116567, 126116570], '(00)3', +[127936513, 127936516], '(00)3', +[128710277, 128710280], '3(00)', +[129398902, 129398905], '3(00)', +[130461096, 130461099], '3(00)', +[131331947, 131331950], '3(00)', +[137334071, 137334074], '3(00)', +[137832603, 137832606], '(00)3', +[138799471, 138799474], '3(00)', +[139027791, 139027794], '(00)3', +[141617806, 141617809], '(00)3', +[144454931, 144454934], '(00)3', +[145402379, 145402382], '3(00)', +[146130245, 146130248], '3(00)', +[147059770, 147059773], '(00)3', +[147896099, 147896102], '3(00)', +[151097113, 151097116], '(00)3', +[152539438, 152539441], '(00)3', +[152863168, 152863171], '3(00)', +[153522726, 153522729], '3(00)', +[155171524, 155171527], '3(00)', +[155366607, 155366610], '(00)3', +[157260686, 157260689], '3(00)', +[157269224, 157269227], '(00)3', +[157755123, 157755126], '(00)3', +[158298484, 158298487], '3(00)', +[160369050, 160369053], '3(00)', +[162962787, 162962790], '(00)3', +[163724709, 163724712], '(00)3', +[164198113, 164198116], '3(00)', +[164689301, 164689305], '(00)40', +[164880228, 164880231], '3(00)', +[166201932, 166201935], '(00)3', +[168573836, 168573839], '(00)3', +[169750763, 169750766], '(00)3', +[170375507, 170375510], '(00)3', +[170704879, 170704882], '3(00)', +[172000992, 172000995], '3(00)', +[173289941, 173289944], '(00)3', +[173737613, 173737616], '3(00)', +[174102513, 174102516], '(00)3', +[174284990, 174284993], '(00)3', +[174500513, 174500516], '(00)3', +[175710609, 175710612], '(00)3', +[176870843, 176870846], '3(00)', +[177332732, 177332735], '3(00)', +[177902861, 177902864], '3(00)', +[179979095, 179979098], '(00)3', +[181233726, 181233729], '3(00)', +[181625435, 181625438], '(00)3', +[182105255, 182105259], '22(00)', +[182223559, 182223562], '3(00)', +[191116404, 191116407], '3(00)', +[191165599, 191165602], '3(00)', +[191297535, 191297539], '(00)22', +[192485616, 192485619], '(00)3', +[193264634, 193264638], '22(00)', +[194696968, 194696971], '(00)3', +[195876805, 195876808], '(00)3', +[195916548, 195916551], '3(00)', +[196395160, 196395163], '3(00)', +[196676303, 196676306], '(00)3', +[197889882, 197889885], '3(00)', +[198014122, 198014125], '(00)3', +[199235289, 199235292], '(00)3', +[201007375, 201007378], '(00)3', +[201030605, 201030608], '3(00)', +[201184290, 201184293], '3(00)', +[201685414, 201685418], '(00)22', +[202762875, 202762878], '3(00)', +[202860957, 202860960], '3(00)', +[203832577, 203832580], '3(00)', +[205880544, 205880547], '(00)3', +[206357111, 206357114], '(00)3', +[207159767, 207159770], '3(00)', +[207167343, 207167346], '3(00)', +[207482539, 207482543], '3(010)', +[207669540, 207669543], '3(00)', +[208053426, 208053429], '(00)3', +[208110027, 208110030], '3(00)', +[209513826, 209513829], '3(00)', +[212623522, 212623525], '(00)3', +[213841715, 213841718], '(00)3', +[214012333, 214012336], '(00)3', +[214073567, 214073570], '(00)3', +[215170600, 215170603], '3(00)', +[215881039, 215881042], '3(00)', +[216274604, 216274607], '3(00)', +[216957120, 216957123], '3(00)', +[217323208, 217323211], '(00)3', +[218799264, 218799267], '(00)3', +[218803557, 218803560], '3(00)', +[219735146, 219735149], '(00)3', +[219830062, 219830065], '3(00)', +[219897904, 219897907], '(00)3', +[221205545, 221205548], '(00)3', +[223601929, 223601932], '(00)3', +[223907076, 223907079], '3(00)', +[223970397, 223970400], '(00)3', +[224874044, 224874048], '22(00)', +[225291157, 225291160], '(00)3', +[227481734, 227481737], '(00)3', +[228006442, 228006445], '3(00)', +[228357900, 228357903], '(00)3', +[228386399, 228386402], '(00)3', +[228907446, 228907449], '(00)3', +[228984552, 228984555], '3(00)', +[229140285, 229140288], '3(00)', +[231810024, 231810027], '(00)3', +[232838062, 232838065], '3(00)', +[234389088, 234389091], '3(00)', +[235588194, 235588197], '(00)3', +[236645695, 236645698], '(00)3', +[236962876, 236962879], '3(00)', +[237516723, 237516727], '04(00)', +[240004911, 240004914], '(00)3', +[240221306, 240221309], '3(00)', +[241389213, 241389217], '(010)3', +[241549003, 241549006], '(00)3', +[241729717, 241729720], '(00)3', +[241743684, 241743687], '3(00)', +[243780200, 243780203], '3(00)', +[243801317, 243801320], '(00)3', +[244122072, 244122075], '(00)3', +[244691224, 244691227], '3(00)', +[244841577, 244841580], '(00)3', +[245813461, 245813464], '(00)3', +[246299475, 246299478], '(00)3', +[246450176, 246450179], '3(00)', +[249069349, 249069352], '(00)3', +[250076378, 250076381], '(00)3', +[252442157, 252442160], '3(00)', +[252904231, 252904234], '3(00)', +[255145220, 255145223], '(00)3', +[255285971, 255285974], '3(00)', +[256713230, 256713233], '(00)3', +[257992082, 257992085], '(00)3', +[258447955, 258447959], '22(00)', +[259298045, 259298048], '3(00)', +[262141503, 262141506], '(00)3', +[263681743, 263681746], '3(00)', +[266527881, 266527885], '(010)3', +[266617122, 266617125], '(00)3', +[266628044, 266628047], '3(00)', +[267305763, 267305766], '(00)3', +[267388404, 267388407], '3(00)', +[267441672, 267441675], '3(00)', +[267464886, 267464889], '(00)3', +[267554907, 267554910], '3(00)', +[269787480, 269787483], '(00)3', +[270881434, 270881437], '(00)3', +[270997583, 270997586], '3(00)', +[272096378, 272096381], '3(00)', +[272583009, 272583012], '(00)3', +[274190881, 274190884], '3(00)', +[274268747, 274268750], '(00)3', +[275297429, 275297432], '3(00)', +[275545476, 275545479], '3(00)', +[275898479, 275898482], '3(00)', +[275953000, 275953003], '(00)3', +[277117197, 277117201], '(00)22', +[277447310, 277447313], '3(00)', +[279059657, 279059660], '3(00)', +[279259144, 279259147], '3(00)', +[279513636, 279513639], '3(00)', +[279849069, 279849072], '3(00)', +[280291419, 280291422], '(00)3', +[281449425, 281449428], '3(00)', +[281507953, 281507956], '3(00)', +[281825600, 281825603], '(00)3', +[282547093, 282547096], '3(00)', +[283120963, 283120966], '3(00)', +[283323493, 283323496], '(00)3', +[284764535, 284764538], '3(00)', +[286172639, 286172642], '3(00)', +[286688824, 286688827], '(00)3', +[287222172, 287222175], '3(00)', +[287235534, 287235537], '3(00)', +[287304861, 287304864], '3(00)', +[287433571, 287433574], '(00)3', +[287823551, 287823554], '(00)3', +[287872422, 287872425], '3(00)', +[288766615, 288766618], '3(00)', +[290122963, 290122966], '3(00)', +[290450849, 290450853], '(00)22', +[291426141, 291426144], '3(00)', +[292810353, 292810356], '3(00)', +[293109861, 293109864], '3(00)', +[293398054, 293398057], '3(00)', +[294134426, 294134429], '3(00)', +[294216438, 294216441], '(00)3', +[295367141, 295367144], '3(00)', +[297834111, 297834114], '3(00)', +[299099969, 299099972], '3(00)', +[300746958, 300746961], '3(00)', +[301097423, 301097426], '(00)3', +[301834209, 301834212], '(00)3', +[302554791, 302554794], '(00)3', +[303497445, 303497448], '3(00)', +[304165344, 304165347], '3(00)', +[304790218, 304790222], '3(010)', +[305302352, 305302355], '(00)3', +[306785996, 306785999], '3(00)', +[307051443, 307051446], '3(00)', +[307481539, 307481542], '3(00)', +[308605569, 308605572], '3(00)', +[309237610, 309237613], '3(00)', +[310509287, 310509290], '(00)3', +[310554057, 310554060], '3(00)', +[310646345, 310646348], '3(00)', +[311274896, 311274899], '(00)3', +[311894272, 311894275], '3(00)', +[312269470, 312269473], '(00)3', +[312306601, 312306605], '(00)40', +[312683193, 312683196], '3(00)', +[314499804, 314499807], '3(00)', +[314636802, 314636805], '(00)3', +[314689897, 314689900], '3(00)', +[314721319, 314721322], '3(00)', +[316132890, 316132893], '3(00)', +[316217470, 316217474], '(010)3', +[316465705, 316465708], '3(00)', +[316542790, 316542793], '(00)3', +[320822347, 320822350], '3(00)', +[321733242, 321733245], '3(00)', +[324413970, 324413973], '(00)3', +[325950140, 325950143], '(00)3', +[326675884, 326675887], '(00)3', +[326704208, 326704211], '3(00)', +[327596247, 327596250], '3(00)', +[328123172, 328123175], '3(00)', +[328182212, 328182215], '(00)3', +[328257498, 328257501], '3(00)', +[328315836, 328315839], '(00)3', +[328800974, 328800977], '(00)3', +[328998509, 328998512], '3(00)', +[329725370, 329725373], '(00)3', +[332080601, 332080604], '(00)3', +[332221246, 332221249], '(00)3', +[332299899, 332299902], '(00)3', +[332532822, 332532825], '(00)3', +[333334544, 333334548], '(00)22', +[333881266, 333881269], '3(00)', +[334703267, 334703270], '3(00)', +[334875138, 334875141], '3(00)', +[336531451, 336531454], '3(00)', +[336825907, 336825910], '(00)3', +[336993167, 336993170], '(00)3', +[337493998, 337494001], '3(00)', +[337861034, 337861037], '3(00)', +[337899191, 337899194], '(00)3', +[337958123, 337958126], '(00)3', +[342331982, 342331985], '3(00)', +[342676068, 342676071], '3(00)', +[347063781, 347063784], '3(00)', +[347697348, 347697351], '3(00)', +[347954319, 347954322], '3(00)', +[348162775, 348162778], '3(00)', +[349210702, 349210705], '(00)3', +[349212913, 349212916], '3(00)', +[349248650, 349248653], '(00)3', +[349913500, 349913503], '3(00)', +[350891529, 350891532], '3(00)', +[351089323, 351089326], '3(00)', +[351826158, 351826161], '3(00)', +[352228580, 352228583], '(00)3', +[352376244, 352376247], '3(00)', +[352853758, 352853761], '(00)3', +[355110439, 355110442], '(00)3', +[355808090, 355808094], '(00)40', +[355941556, 355941559], '3(00)', +[356360231, 356360234], '(00)3', +[356586657, 356586660], '3(00)', +[356892926, 356892929], '(00)3', +[356908232, 356908235], '3(00)', +[357912730, 357912733], '3(00)', +[358120344, 358120347], '3(00)', +[359044096, 359044099], '(00)3', +[360819357, 360819360], '3(00)', +[361399662, 361399666], '(010)3', +[362361315, 362361318], '(00)3', +[363610112, 363610115], '(00)3', +[363964804, 363964807], '3(00)', +[364527375, 364527378], '(00)3', +[365090327, 365090330], '(00)3', +[365414539, 365414542], '3(00)', +[366738474, 366738477], '3(00)', +[368714778, 368714783], '04(010)', +[368831545, 368831548], '(00)3', +[368902387, 368902390], '(00)3', +[370109769, 370109772], '3(00)', +[370963333, 370963336], '3(00)', +[372541136, 372541140], '3(010)', +[372681562, 372681565], '(00)3', +[373009410, 373009413], '(00)3', +[373458970, 373458973], '3(00)', +[375648658, 375648661], '3(00)', +[376834728, 376834731], '3(00)', +[377119945, 377119948], '(00)3', +[377335703, 377335706], '(00)3', +[378091745, 378091748], '3(00)', +[379139522, 379139525], '3(00)', +[380279160, 380279163], '(00)3', +[380619442, 380619445], '3(00)', +[381244231, 381244234], '3(00)', +[382327446, 382327450], '(010)3', +[382357073, 382357076], '3(00)', +[383545479, 383545482], '3(00)', +[384363766, 384363769], '(00)3', +[384401786, 384401790], '22(00)', +[385198212, 385198215], '3(00)', +[385824476, 385824479], '(00)3', +[385908194, 385908197], '3(00)', +[386946806, 386946809], '3(00)', +[387592175, 387592179], '22(00)', +[388329293, 388329296], '(00)3', +[388679566, 388679569], '3(00)', +[388832142, 388832145], '3(00)', +[390087103, 390087106], '(00)3', +[390190926, 390190930], '(00)22', +[390331207, 390331210], '3(00)', +[391674495, 391674498], '3(00)', +[391937831, 391937834], '3(00)', +[391951632, 391951636], '(00)22', +[392963986, 392963989], '(00)3', +[393007921, 393007924], '3(00)', +[393373210, 393373213], '3(00)', +[393759572, 393759575], '(00)3', +[394036662, 394036665], '(00)3', +[395813866, 395813869], '(00)3', +[395956690, 395956693], '3(00)', +[396031670, 396031673], '3(00)', +[397076433, 397076436], '3(00)', +[397470601, 397470604], '3(00)', +[398289458, 398289461], '3(00)', +# +[368714778, 368714783], '04(010)', +[437953499, 437953504], '04(010)', +[526196233, 526196238], '032(00)', +[744719566, 744719571], '(010)40', +[750375857, 750375862], '032(00)', +[958241932, 958241937], '04(010)', +[983377342, 983377347], '(00)410', +[1003780080, 1003780085], '04(010)', +[1070232754, 1070232759], '(00)230', +[1209834865, 1209834870], '032(00)', +[1257209100, 1257209105], '(00)410', +[1368002233, 1368002238], '(00)230' +] diff --git a/.venv/lib/python3.11/site-packages/mpmath/identification.py b/.venv/lib/python3.11/site-packages/mpmath/identification.py new file mode 100644 index 0000000000000000000000000000000000000000..226f62d3fe9cacedbd9ba2b1e66ff0ad017fa604 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/identification.py @@ -0,0 +1,844 @@ +""" +Implements the PSLQ algorithm for integer relation detection, +and derivative algorithms for constant recognition. +""" + +from .libmp.backend import xrange +from .libmp import int_types, sqrt_fixed + +# round to nearest integer (can be done more elegantly...) +def round_fixed(x, prec): + return ((x + (1<<(prec-1))) >> prec) << prec + +class IdentificationMethods(object): + pass + + +def pslq(ctx, x, tol=None, maxcoeff=1000, maxsteps=100, verbose=False): + r""" + Given a vector of real numbers `x = [x_0, x_1, ..., x_n]`, ``pslq(x)`` + uses the PSLQ algorithm to find a list of integers + `[c_0, c_1, ..., c_n]` such that + + .. math :: + + |c_1 x_1 + c_2 x_2 + ... + c_n x_n| < \mathrm{tol} + + and such that `\max |c_k| < \mathrm{maxcoeff}`. If no such vector + exists, :func:`~mpmath.pslq` returns ``None``. The tolerance defaults to + 3/4 of the working precision. + + **Examples** + + Find rational approximations for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> pslq([-1, pi], tol=0.01) + [22, 7] + >>> pslq([-1, pi], tol=0.001) + [355, 113] + >>> mpf(22)/7; mpf(355)/113; +pi + 3.14285714285714 + 3.14159292035398 + 3.14159265358979 + + Pi is not a rational number with denominator less than 1000:: + + >>> pslq([-1, pi]) + >>> + + To within the standard precision, it can however be approximated + by at least one rational number with denominator less than `10^{12}`:: + + >>> p, q = pslq([-1, pi], maxcoeff=10**12) + >>> print(p); print(q) + 238410049439 + 75888275702 + >>> mpf(p)/q + 3.14159265358979 + + The PSLQ algorithm can be applied to long vectors. For example, + we can investigate the rational (in)dependence of integer square + roots:: + + >>> mp.dps = 30 + >>> pslq([sqrt(n) for n in range(2, 5+1)]) + >>> + >>> pslq([sqrt(n) for n in range(2, 6+1)]) + >>> + >>> pslq([sqrt(n) for n in range(2, 8+1)]) + [2, 0, 0, 0, 0, 0, -1] + + **Machin formulas** + + A famous formula for `\pi` is Machin's, + + .. math :: + + \frac{\pi}{4} = 4 \operatorname{acot} 5 - \operatorname{acot} 239 + + There are actually infinitely many formulas of this type. Two + others are + + .. math :: + + \frac{\pi}{4} = \operatorname{acot} 1 + + \frac{\pi}{4} = 12 \operatorname{acot} 49 + 32 \operatorname{acot} 57 + + 5 \operatorname{acot} 239 + 12 \operatorname{acot} 110443 + + We can easily verify the formulas using the PSLQ algorithm:: + + >>> mp.dps = 30 + >>> pslq([pi/4, acot(1)]) + [1, -1] + >>> pslq([pi/4, acot(5), acot(239)]) + [1, -4, 1] + >>> pslq([pi/4, acot(49), acot(57), acot(239), acot(110443)]) + [1, -12, -32, 5, -12] + + We could try to generate a custom Machin-like formula by running + the PSLQ algorithm with a few inverse cotangent values, for example + acot(2), acot(3) ... acot(10). Unfortunately, there is a linear + dependence among these values, resulting in only that dependence + being detected, with a zero coefficient for `\pi`:: + + >>> pslq([pi] + [acot(n) for n in range(2,11)]) + [0, 1, -1, 0, 0, 0, -1, 0, 0, 0] + + We get better luck by removing linearly dependent terms:: + + >>> pslq([pi] + [acot(n) for n in range(2,11) if n not in (3, 5)]) + [1, -8, 0, 0, 4, 0, 0, 0] + + In other words, we found the following formula:: + + >>> 8*acot(2) - 4*acot(7) + 3.14159265358979323846264338328 + >>> +pi + 3.14159265358979323846264338328 + + **Algorithm** + + This is a fairly direct translation to Python of the pseudocode given by + David Bailey, "The PSLQ Integer Relation Algorithm": + http://www.cecm.sfu.ca/organics/papers/bailey/paper/html/node3.html + + The present implementation uses fixed-point instead of floating-point + arithmetic, since this is significantly (about 7x) faster. + """ + + n = len(x) + if n < 2: + raise ValueError("n cannot be less than 2") + + # At too low precision, the algorithm becomes meaningless + prec = ctx.prec + if prec < 53: + raise ValueError("prec cannot be less than 53") + + if verbose and prec // max(2,n) < 5: + print("Warning: precision for PSLQ may be too low") + + target = int(prec * 0.75) + + if tol is None: + tol = ctx.mpf(2)**(-target) + else: + tol = ctx.convert(tol) + + extra = 60 + prec += extra + + if verbose: + print("PSLQ using prec %i and tol %s" % (prec, ctx.nstr(tol))) + + tol = ctx.to_fixed(tol, prec) + assert tol + + # Convert to fixed-point numbers. The dummy None is added so we can + # use 1-based indexing. (This just allows us to be consistent with + # Bailey's indexing. The algorithm is 100 lines long, so debugging + # a single wrong index can be painful.) + x = [None] + [ctx.to_fixed(ctx.mpf(xk), prec) for xk in x] + + # Sanity check on magnitudes + minx = min(abs(xx) for xx in x[1:]) + if not minx: + raise ValueError("PSLQ requires a vector of nonzero numbers") + if minx < tol//100: + if verbose: + print("STOPPING: (one number is too small)") + return None + + g = sqrt_fixed((4<> prec) + s[k] = sqrt_fixed(t, prec) + t = s[1] + y = x[:] + for k in xrange(1, n+1): + y[k] = (x[k] << prec) // t + s[k] = (s[k] << prec) // t + # step 3 + for i in xrange(1, n+1): + for j in xrange(i+1, n): + H[i,j] = 0 + if i <= n-1: + if s[i]: + H[i,i] = (s[i+1] << prec) // s[i] + else: + H[i,i] = 0 + for j in range(1, i): + sjj1 = s[j]*s[j+1] + if sjj1: + H[i,j] = ((-y[i]*y[j])<> prec) + for k in xrange(1, j+1): + H[i,k] = H[i,k] - (t*H[j,k] >> prec) + for k in xrange(1, n+1): + A[i,k] = A[i,k] - (t*A[j,k] >> prec) + B[k,j] = B[k,j] + (t*B[k,i] >> prec) + # Main algorithm + for REP in range(maxsteps): + # Step 1 + m = -1 + szmax = -1 + for i in range(1, n): + h = H[i,i] + sz = (g**i * abs(h)) >> (prec*(i-1)) + if sz > szmax: + m = i + szmax = sz + # Step 2 + y[m], y[m+1] = y[m+1], y[m] + for i in xrange(1,n+1): H[m,i], H[m+1,i] = H[m+1,i], H[m,i] + for i in xrange(1,n+1): A[m,i], A[m+1,i] = A[m+1,i], A[m,i] + for i in xrange(1,n+1): B[i,m], B[i,m+1] = B[i,m+1], B[i,m] + # Step 3 + if m <= n - 2: + t0 = sqrt_fixed((H[m,m]**2 + H[m,m+1]**2)>>prec, prec) + # A zero element probably indicates that the precision has + # been exhausted. XXX: this could be spurious, due to + # using fixed-point arithmetic + if not t0: + break + t1 = (H[m,m] << prec) // t0 + t2 = (H[m,m+1] << prec) // t0 + for i in xrange(m, n+1): + t3 = H[i,m] + t4 = H[i,m+1] + H[i,m] = (t1*t3+t2*t4) >> prec + H[i,m+1] = (-t2*t3+t1*t4) >> prec + # Step 4 + for i in xrange(m+1, n+1): + for j in xrange(min(i-1, m+1), 0, -1): + try: + t = round_fixed((H[i,j] << prec)//H[j,j], prec) + # Precision probably exhausted + except ZeroDivisionError: + break + y[j] = y[j] + ((t*y[i]) >> prec) + for k in xrange(1, j+1): + H[i,k] = H[i,k] - (t*H[j,k] >> prec) + for k in xrange(1, n+1): + A[i,k] = A[i,k] - (t*A[j,k] >> prec) + B[k,j] = B[k,j] + (t*B[k,i] >> prec) + # Until a relation is found, the error typically decreases + # slowly (e.g. a factor 1-10) with each step TODO: we could + # compare err from two successive iterations. If there is a + # large drop (several orders of magnitude), that indicates a + # "high quality" relation was detected. Reporting this to + # the user somehow might be useful. + best_err = maxcoeff<> prec) for j in \ + range(1,n+1)] + if max(abs(v) for v in vec) < maxcoeff: + if verbose: + print("FOUND relation at iter %i/%i, error: %s" % \ + (REP, maxsteps, ctx.nstr(err / ctx.mpf(2)**prec, 1))) + return vec + best_err = min(err, best_err) + # Calculate a lower bound for the norm. We could do this + # more exactly (using the Euclidean norm) but there is probably + # no practical benefit. + recnorm = max(abs(h) for h in H.values()) + if recnorm: + norm = ((1 << (2*prec)) // recnorm) >> prec + norm //= 100 + else: + norm = ctx.inf + if verbose: + print("%i/%i: Error: %8s Norm: %s" % \ + (REP, maxsteps, ctx.nstr(best_err / ctx.mpf(2)**prec, 1), norm)) + if norm >= maxcoeff: + break + if verbose: + print("CANCELLING after step %i/%i." % (REP, maxsteps)) + print("Could not find an integer relation. Norm bound: %s" % norm) + return None + +def findpoly(ctx, x, n=1, **kwargs): + r""" + ``findpoly(x, n)`` returns the coefficients of an integer + polynomial `P` of degree at most `n` such that `P(x) \approx 0`. + If no polynomial having `x` as a root can be found, + :func:`~mpmath.findpoly` returns ``None``. + + :func:`~mpmath.findpoly` works by successively calling :func:`~mpmath.pslq` with + the vectors `[1, x]`, `[1, x, x^2]`, `[1, x, x^2, x^3]`, ..., + `[1, x, x^2, .., x^n]` as input. Keyword arguments given to + :func:`~mpmath.findpoly` are forwarded verbatim to :func:`~mpmath.pslq`. In + particular, you can specify a tolerance for `P(x)` with ``tol`` + and a maximum permitted coefficient size with ``maxcoeff``. + + For large values of `n`, it is recommended to run :func:`~mpmath.findpoly` + at high precision; preferably 50 digits or more. + + **Examples** + + By default (degree `n = 1`), :func:`~mpmath.findpoly` simply finds a linear + polynomial with a rational root:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> findpoly(0.7) + [-10, 7] + + The generated coefficient list is valid input to ``polyval`` and + ``polyroots``:: + + >>> nprint(polyval(findpoly(phi, 2), phi), 1) + -2.0e-16 + >>> for r in polyroots(findpoly(phi, 2)): + ... print(r) + ... + -0.618033988749895 + 1.61803398874989 + + Numbers of the form `m + n \sqrt p` for integers `(m, n, p)` are + solutions to quadratic equations. As we find here, `1+\sqrt 2` + is a root of the polynomial `x^2 - 2x - 1`:: + + >>> findpoly(1+sqrt(2), 2) + [1, -2, -1] + >>> findroot(lambda x: x**2 - 2*x - 1, 1) + 2.4142135623731 + + Despite only containing square roots, the following number results + in a polynomial of degree 4:: + + >>> findpoly(sqrt(2)+sqrt(3), 4) + [1, 0, -10, 0, 1] + + In fact, `x^4 - 10x^2 + 1` is the *minimal polynomial* of + `r = \sqrt 2 + \sqrt 3`, meaning that a rational polynomial of + lower degree having `r` as a root does not exist. Given sufficient + precision, :func:`~mpmath.findpoly` will usually find the correct + minimal polynomial of a given algebraic number. + + **Non-algebraic numbers** + + If :func:`~mpmath.findpoly` fails to find a polynomial with given + coefficient size and tolerance constraints, that means no such + polynomial exists. + + We can verify that `\pi` is not an algebraic number of degree 3 with + coefficients less than 1000:: + + >>> mp.dps = 15 + >>> findpoly(pi, 3) + >>> + + It is always possible to find an algebraic approximation of a number + using one (or several) of the following methods: + + 1. Increasing the permitted degree + 2. Allowing larger coefficients + 3. Reducing the tolerance + + One example of each method is shown below:: + + >>> mp.dps = 15 + >>> findpoly(pi, 4) + [95, -545, 863, -183, -298] + >>> findpoly(pi, 3, maxcoeff=10000) + [836, -1734, -2658, -457] + >>> findpoly(pi, 3, tol=1e-7) + [-4, 22, -29, -2] + + It is unknown whether Euler's constant is transcendental (or even + irrational). We can use :func:`~mpmath.findpoly` to check that if is + an algebraic number, its minimal polynomial must have degree + at least 7 and a coefficient of magnitude at least 1000000:: + + >>> mp.dps = 200 + >>> findpoly(euler, 6, maxcoeff=10**6, tol=1e-100, maxsteps=1000) + >>> + + Note that the high precision and strict tolerance is necessary + for such high-degree runs, since otherwise unwanted low-accuracy + approximations will be detected. It may also be necessary to set + maxsteps high to prevent a premature exit (before the coefficient + bound has been reached). Running with ``verbose=True`` to get an + idea what is happening can be useful. + """ + x = ctx.mpf(x) + if n < 1: + raise ValueError("n cannot be less than 1") + if x == 0: + return [1, 0] + xs = [ctx.mpf(1)] + for i in range(1,n+1): + xs.append(x**i) + a = ctx.pslq(xs, **kwargs) + if a is not None: + return a[::-1] + +def fracgcd(p, q): + x, y = p, q + while y: + x, y = y, x % y + if x != 1: + p //= x + q //= x + if q == 1: + return p + return p, q + +def pslqstring(r, constants): + q = r[0] + r = r[1:] + s = [] + for i in range(len(r)): + p = r[i] + if p: + z = fracgcd(-p,q) + cs = constants[i][1] + if cs == '1': + cs = '' + else: + cs = '*' + cs + if isinstance(z, int_types): + if z > 0: term = str(z) + cs + else: term = ("(%s)" % z) + cs + else: + term = ("(%s/%s)" % z) + cs + s.append(term) + s = ' + '.join(s) + if '+' in s or '*' in s: + s = '(' + s + ')' + return s or '0' + +def prodstring(r, constants): + q = r[0] + r = r[1:] + num = [] + den = [] + for i in range(len(r)): + p = r[i] + if p: + z = fracgcd(-p,q) + cs = constants[i][1] + if isinstance(z, int_types): + if abs(z) == 1: t = cs + else: t = '%s**%s' % (cs, abs(z)) + ([num,den][z<0]).append(t) + else: + t = '%s**(%s/%s)' % (cs, abs(z[0]), z[1]) + ([num,den][z[0]<0]).append(t) + num = '*'.join(num) + den = '*'.join(den) + if num and den: return "(%s)/(%s)" % (num, den) + if num: return num + if den: return "1/(%s)" % den + +def quadraticstring(ctx,t,a,b,c): + if c < 0: + a,b,c = -a,-b,-c + u1 = (-b+ctx.sqrt(b**2-4*a*c))/(2*c) + u2 = (-b-ctx.sqrt(b**2-4*a*c))/(2*c) + if abs(u1-t) < abs(u2-t): + if b: s = '((%s+sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) + else: s = '(sqrt(%s)/%s)' % (-4*a*c,2*c) + else: + if b: s = '((%s-sqrt(%s))/%s)' % (-b,b**2-4*a*c,2*c) + else: s = '(-sqrt(%s)/%s)' % (-4*a*c,2*c) + return s + +# Transformation y = f(x,c), with inverse function x = f(y,c) +# The third entry indicates whether the transformation is +# redundant when c = 1 +transforms = [ + (lambda ctx,x,c: x*c, '$y/$c', 0), + (lambda ctx,x,c: x/c, '$c*$y', 1), + (lambda ctx,x,c: c/x, '$c/$y', 0), + (lambda ctx,x,c: (x*c)**2, 'sqrt($y)/$c', 0), + (lambda ctx,x,c: (x/c)**2, '$c*sqrt($y)', 1), + (lambda ctx,x,c: (c/x)**2, '$c/sqrt($y)', 0), + (lambda ctx,x,c: c*x**2, 'sqrt($y)/sqrt($c)', 1), + (lambda ctx,x,c: x**2/c, 'sqrt($c)*sqrt($y)', 1), + (lambda ctx,x,c: c/x**2, 'sqrt($c)/sqrt($y)', 1), + (lambda ctx,x,c: ctx.sqrt(x*c), '$y**2/$c', 0), + (lambda ctx,x,c: ctx.sqrt(x/c), '$c*$y**2', 1), + (lambda ctx,x,c: ctx.sqrt(c/x), '$c/$y**2', 0), + (lambda ctx,x,c: c*ctx.sqrt(x), '$y**2/$c**2', 1), + (lambda ctx,x,c: ctx.sqrt(x)/c, '$c**2*$y**2', 1), + (lambda ctx,x,c: c/ctx.sqrt(x), '$c**2/$y**2', 1), + (lambda ctx,x,c: ctx.exp(x*c), 'log($y)/$c', 0), + (lambda ctx,x,c: ctx.exp(x/c), '$c*log($y)', 1), + (lambda ctx,x,c: ctx.exp(c/x), '$c/log($y)', 0), + (lambda ctx,x,c: c*ctx.exp(x), 'log($y/$c)', 1), + (lambda ctx,x,c: ctx.exp(x)/c, 'log($c*$y)', 1), + (lambda ctx,x,c: c/ctx.exp(x), 'log($c/$y)', 0), + (lambda ctx,x,c: ctx.ln(x*c), 'exp($y)/$c', 0), + (lambda ctx,x,c: ctx.ln(x/c), '$c*exp($y)', 1), + (lambda ctx,x,c: ctx.ln(c/x), '$c/exp($y)', 0), + (lambda ctx,x,c: c*ctx.ln(x), 'exp($y/$c)', 1), + (lambda ctx,x,c: ctx.ln(x)/c, 'exp($c*$y)', 1), + (lambda ctx,x,c: c/ctx.ln(x), 'exp($c/$y)', 0), +] + +def identify(ctx, x, constants=[], tol=None, maxcoeff=1000, full=False, + verbose=False): + r""" + Given a real number `x`, ``identify(x)`` attempts to find an exact + formula for `x`. This formula is returned as a string. If no match + is found, ``None`` is returned. With ``full=True``, a list of + matching formulas is returned. + + As a simple example, :func:`~mpmath.identify` will find an algebraic + formula for the golden ratio:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> identify(phi) + '((1+sqrt(5))/2)' + + :func:`~mpmath.identify` can identify simple algebraic numbers and simple + combinations of given base constants, as well as certain basic + transformations thereof. More specifically, :func:`~mpmath.identify` + looks for the following: + + 1. Fractions + 2. Quadratic algebraic numbers + 3. Rational linear combinations of the base constants + 4. Any of the above after first transforming `x` into `f(x)` where + `f(x)` is `1/x`, `\sqrt x`, `x^2`, `\log x` or `\exp x`, either + directly or with `x` or `f(x)` multiplied or divided by one of + the base constants + 5. Products of fractional powers of the base constants and + small integers + + Base constants can be given as a list of strings representing mpmath + expressions (:func:`~mpmath.identify` will ``eval`` the strings to numerical + values and use the original strings for the output), or as a dict of + formula:value pairs. + + In order not to produce spurious results, :func:`~mpmath.identify` should + be used with high precision; preferably 50 digits or more. + + **Examples** + + Simple identifications can be performed safely at standard + precision. Here the default recognition of rational, algebraic, + and exp/log of algebraic numbers is demonstrated:: + + >>> mp.dps = 15 + >>> identify(0.22222222222222222) + '(2/9)' + >>> identify(1.9662210973805663) + 'sqrt(((24+sqrt(48))/8))' + >>> identify(4.1132503787829275) + 'exp((sqrt(8)/2))' + >>> identify(0.881373587019543) + 'log(((2+sqrt(8))/2))' + + By default, :func:`~mpmath.identify` does not recognize `\pi`. At standard + precision it finds a not too useful approximation. At slightly + increased precision, this approximation is no longer accurate + enough and :func:`~mpmath.identify` more correctly returns ``None``:: + + >>> identify(pi) + '(2**(176/117)*3**(20/117)*5**(35/39))/(7**(92/117))' + >>> mp.dps = 30 + >>> identify(pi) + >>> + + Numbers such as `\pi`, and simple combinations of user-defined + constants, can be identified if they are provided explicitly:: + + >>> identify(3*pi-2*e, ['pi', 'e']) + '(3*pi + (-2)*e)' + + Here is an example using a dict of constants. Note that the + constants need not be "atomic"; :func:`~mpmath.identify` can just + as well express the given number in terms of expressions + given by formulas:: + + >>> identify(pi+e, {'a':pi+2, 'b':2*e}) + '((-2) + 1*a + (1/2)*b)' + + Next, we attempt some identifications with a set of base constants. + It is necessary to increase the precision a bit. + + >>> mp.dps = 50 + >>> base = ['sqrt(2)','pi','log(2)'] + >>> identify(0.25, base) + '(1/4)' + >>> identify(3*pi + 2*sqrt(2) + 5*log(2)/7, base) + '(2*sqrt(2) + 3*pi + (5/7)*log(2))' + >>> identify(exp(pi+2), base) + 'exp((2 + 1*pi))' + >>> identify(1/(3+sqrt(2)), base) + '((3/7) + (-1/7)*sqrt(2))' + >>> identify(sqrt(2)/(3*pi+4), base) + 'sqrt(2)/(4 + 3*pi)' + >>> identify(5**(mpf(1)/3)*pi*log(2)**2, base) + '5**(1/3)*pi*log(2)**2' + + An example of an erroneous solution being found when too low + precision is used:: + + >>> mp.dps = 15 + >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) + '((11/25) + (-158/75)*pi + (76/75)*e + (44/15)*sqrt(2))' + >>> mp.dps = 50 + >>> identify(1/(3*pi-4*e+sqrt(8)), ['pi', 'e', 'sqrt(2)']) + '1/(3*pi + (-4)*e + 2*sqrt(2))' + + **Finding approximate solutions** + + The tolerance ``tol`` defaults to 3/4 of the working precision. + Lowering the tolerance is useful for finding approximate matches. + We can for example try to generate approximations for pi:: + + >>> mp.dps = 15 + >>> identify(pi, tol=1e-2) + '(22/7)' + >>> identify(pi, tol=1e-3) + '(355/113)' + >>> identify(pi, tol=1e-10) + '(5**(339/269))/(2**(64/269)*3**(13/269)*7**(92/269))' + + With ``full=True``, and by supplying a few base constants, + ``identify`` can generate almost endless lists of approximations + for any number (the output below has been truncated to show only + the first few):: + + >>> for p in identify(pi, ['e', 'catalan'], tol=1e-5, full=True): + ... print(p) + ... # doctest: +ELLIPSIS + e/log((6 + (-4/3)*e)) + (3**3*5*e*catalan**2)/(2*7**2) + sqrt(((-13) + 1*e + 22*catalan)) + log(((-6) + 24*e + 4*catalan)/e) + exp(catalan*((-1/5) + (8/15)*e)) + catalan*(6 + (-6)*e + 15*catalan) + sqrt((5 + 26*e + (-3)*catalan))/e + e*sqrt(((-27) + 2*e + 25*catalan)) + log(((-1) + (-11)*e + 59*catalan)) + ((3/20) + (21/20)*e + (3/20)*catalan) + ... + + The numerical values are roughly as close to `\pi` as permitted by the + specified tolerance: + + >>> e/log(6-4*e/3) + 3.14157719846001 + >>> 135*e*catalan**2/98 + 3.14166950419369 + >>> sqrt(e-13+22*catalan) + 3.14158000062992 + >>> log(24*e-6+4*catalan)-1 + 3.14158791577159 + + **Symbolic processing** + + The output formula can be evaluated as a Python expression. + Note however that if fractions (like '2/3') are present in + the formula, Python's :func:`~mpmath.eval()` may erroneously perform + integer division. Note also that the output is not necessarily + in the algebraically simplest form:: + + >>> identify(sqrt(2)) + '(sqrt(8)/2)' + + As a solution to both problems, consider using SymPy's + :func:`~mpmath.sympify` to convert the formula into a symbolic expression. + SymPy can be used to pretty-print or further simplify the formula + symbolically:: + + >>> from sympy import sympify # doctest: +SKIP + >>> sympify(identify(sqrt(2))) # doctest: +SKIP + 2**(1/2) + + Sometimes :func:`~mpmath.identify` can simplify an expression further than + a symbolic algorithm:: + + >>> from sympy import simplify # doctest: +SKIP + >>> x = sympify('-1/(-3/2+(1/2)*5**(1/2))*(3/2-1/2*5**(1/2))**(1/2)') # doctest: +SKIP + >>> x # doctest: +SKIP + (3/2 - 5**(1/2)/2)**(-1/2) + >>> x = simplify(x) # doctest: +SKIP + >>> x # doctest: +SKIP + 2/(6 - 2*5**(1/2))**(1/2) + >>> mp.dps = 30 # doctest: +SKIP + >>> x = sympify(identify(x.evalf(30))) # doctest: +SKIP + >>> x # doctest: +SKIP + 1/2 + 5**(1/2)/2 + + (In fact, this functionality is available directly in SymPy as the + function :func:`~mpmath.nsimplify`, which is essentially a wrapper for + :func:`~mpmath.identify`.) + + **Miscellaneous issues and limitations** + + The input `x` must be a real number. All base constants must be + positive real numbers and must not be rationals or rational linear + combinations of each other. + + The worst-case computation time grows quickly with the number of + base constants. Already with 3 or 4 base constants, + :func:`~mpmath.identify` may require several seconds to finish. To search + for relations among a large number of constants, you should + consider using :func:`~mpmath.pslq` directly. + + The extended transformations are applied to x, not the constants + separately. As a result, ``identify`` will for example be able to + recognize ``exp(2*pi+3)`` with ``pi`` given as a base constant, but + not ``2*exp(pi)+3``. It will be able to recognize the latter if + ``exp(pi)`` is given explicitly as a base constant. + + """ + + solutions = [] + + def addsolution(s): + if verbose: print("Found: ", s) + solutions.append(s) + + x = ctx.mpf(x) + + # Further along, x will be assumed positive + if x == 0: + if full: return ['0'] + else: return '0' + if x < 0: + sol = ctx.identify(-x, constants, tol, maxcoeff, full, verbose) + if sol is None: + return sol + if full: + return ["-(%s)"%s for s in sol] + else: + return "-(%s)" % sol + + if tol: + tol = ctx.mpf(tol) + else: + tol = ctx.eps**0.7 + M = maxcoeff + + if constants: + if isinstance(constants, dict): + constants = [(ctx.mpf(v), name) for (name, v) in sorted(constants.items())] + else: + namespace = dict((name, getattr(ctx,name)) for name in dir(ctx)) + constants = [(eval(p, namespace), p) for p in constants] + else: + constants = [] + + # We always want to find at least rational terms + if 1 not in [value for (name, value) in constants]: + constants = [(ctx.mpf(1), '1')] + constants + + # PSLQ with simple algebraic and functional transformations + for ft, ftn, red in transforms: + for c, cn in constants: + if red and cn == '1': + continue + t = ft(ctx,x,c) + # Prevent exponential transforms from wreaking havoc + if abs(t) > M**2 or abs(t) < tol: + continue + # Linear combination of base constants + r = ctx.pslq([t] + [a[0] for a in constants], tol, M) + s = None + if r is not None and max(abs(uw) for uw in r) <= M and r[0]: + s = pslqstring(r, constants) + # Quadratic algebraic numbers + else: + q = ctx.pslq([ctx.one, t, t**2], tol, M) + if q is not None and len(q) == 3 and q[2]: + aa, bb, cc = q + if max(abs(aa),abs(bb),abs(cc)) <= M: + s = quadraticstring(ctx,t,aa,bb,cc) + if s: + if cn == '1' and ('/$c' in ftn): + s = ftn.replace('$y', s).replace('/$c', '') + else: + s = ftn.replace('$y', s).replace('$c', cn) + addsolution(s) + if not full: return solutions[0] + + if verbose: + print(".") + + # Check for a direct multiplicative formula + if x != 1: + # Allow fractional powers of fractions + ilogs = [2,3,5,7] + # Watch out for existing fractional powers of fractions + logs = [] + for a, s in constants: + if not sum(bool(ctx.findpoly(ctx.ln(a)/ctx.ln(i),1)) for i in ilogs): + logs.append((ctx.ln(a), s)) + logs = [(ctx.ln(i),str(i)) for i in ilogs] + logs + r = ctx.pslq([ctx.ln(x)] + [a[0] for a in logs], tol, M) + if r is not None and max(abs(uw) for uw in r) <= M and r[0]: + addsolution(prodstring(r, logs)) + if not full: return solutions[0] + + if full: + return sorted(solutions, key=len) + else: + return None + +IdentificationMethods.pslq = pslq +IdentificationMethods.findpoly = findpoly +IdentificationMethods.identify = identify + + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__init__.py b/.venv/lib/python3.11/site-packages/mpmath/libmp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..1573114afc4fbce73f2ba9d2ddc99882c00027c0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/libmp/__init__.py @@ -0,0 +1,77 @@ +from .libmpf import (prec_to_dps, dps_to_prec, repr_dps, + round_down, round_up, round_floor, round_ceiling, round_nearest, + to_pickable, from_pickable, ComplexResult, + fzero, fnzero, fone, fnone, ftwo, ften, fhalf, fnan, finf, fninf, + math_float_inf, round_int, normalize, normalize1, + from_man_exp, from_int, to_man_exp, to_int, mpf_ceil, mpf_floor, + mpf_nint, mpf_frac, + from_float, from_npfloat, from_Decimal, to_float, from_rational, to_rational, to_fixed, + mpf_rand, mpf_eq, mpf_hash, mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_ge, + mpf_pos, mpf_neg, mpf_abs, mpf_sign, mpf_add, mpf_sub, mpf_sum, + mpf_mul, mpf_mul_int, mpf_shift, mpf_frexp, + mpf_div, mpf_rdiv_int, mpf_mod, mpf_pow_int, + mpf_perturb, + to_digits_exp, to_str, str_to_man_exp, from_str, from_bstr, to_bstr, + mpf_sqrt, mpf_hypot) + +from .libmpc import (mpc_one, mpc_zero, mpc_two, mpc_half, + mpc_is_inf, mpc_is_infnan, mpc_to_str, mpc_to_complex, mpc_hash, + mpc_conjugate, mpc_is_nonzero, mpc_add, mpc_add_mpf, + mpc_sub, mpc_sub_mpf, mpc_pos, mpc_neg, mpc_shift, mpc_abs, + mpc_arg, mpc_floor, mpc_ceil, mpc_nint, mpc_frac, mpc_mul, mpc_square, + mpc_mul_mpf, mpc_mul_imag_mpf, mpc_mul_int, + mpc_div, mpc_div_mpf, mpc_reciprocal, mpc_mpf_div, + complex_int_pow, mpc_pow, mpc_pow_mpf, mpc_pow_int, + mpc_sqrt, mpc_nthroot, mpc_cbrt, mpc_exp, mpc_log, mpc_cos, mpc_sin, + mpc_tan, mpc_cos_pi, mpc_sin_pi, mpc_cosh, mpc_sinh, mpc_tanh, + mpc_atan, mpc_acos, mpc_asin, mpc_asinh, mpc_acosh, mpc_atanh, + mpc_fibonacci, mpf_expj, mpf_expjpi, mpc_expj, mpc_expjpi, + mpc_cos_sin, mpc_cos_sin_pi) + +from .libelefun import (ln2_fixed, mpf_ln2, ln10_fixed, mpf_ln10, + pi_fixed, mpf_pi, e_fixed, mpf_e, phi_fixed, mpf_phi, + degree_fixed, mpf_degree, + mpf_pow, mpf_nthroot, mpf_cbrt, log_int_fixed, agm_fixed, + mpf_log, mpf_log_hypot, mpf_exp, mpf_cos_sin, mpf_cos, mpf_sin, mpf_tan, + mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, mpf_cosh_sinh, + mpf_cosh, mpf_sinh, mpf_tanh, mpf_atan, mpf_atan2, mpf_asin, + mpf_acos, mpf_asinh, mpf_acosh, mpf_atanh, mpf_fibonacci) + +from .libhyper import (NoConvergence, make_hyp_summator, + mpf_erf, mpf_erfc, mpf_ei, mpc_ei, mpf_e1, mpc_e1, mpf_expint, + mpf_ci_si, mpf_ci, mpf_si, mpc_ci, mpc_si, mpf_besseljn, + mpc_besseljn, mpf_agm, mpf_agm1, mpc_agm, mpc_agm1, + mpf_ellipk, mpc_ellipk, mpf_ellipe, mpc_ellipe) + +from .gammazeta import (catalan_fixed, mpf_catalan, + khinchin_fixed, mpf_khinchin, glaisher_fixed, mpf_glaisher, + apery_fixed, mpf_apery, euler_fixed, mpf_euler, mertens_fixed, + mpf_mertens, twinprime_fixed, mpf_twinprime, + mpf_bernoulli, bernfrac, mpf_gamma_int, + mpf_factorial, mpc_factorial, mpf_gamma, mpc_gamma, + mpf_loggamma, mpc_loggamma, mpf_rgamma, mpc_rgamma, + mpf_harmonic, mpc_harmonic, mpf_psi0, mpc_psi0, + mpf_psi, mpc_psi, mpf_zeta_int, mpf_zeta, mpc_zeta, + mpf_altzeta, mpc_altzeta, mpf_zetasum, mpc_zetasum) + +from .libmpi import (mpi_str, + mpi_from_str, mpi_to_str, + mpi_eq, mpi_ne, + mpi_lt, mpi_le, mpi_gt, mpi_ge, + mpi_add, mpi_sub, mpi_delta, mpi_mid, + mpi_pos, mpi_neg, mpi_abs, mpi_mul, mpi_div, mpi_exp, + mpi_log, mpi_sqrt, mpi_pow_int, mpi_pow, mpi_cos_sin, + mpi_cos, mpi_sin, mpi_tan, mpi_cot, + mpi_atan, mpi_atan2, + mpci_pos, mpci_neg, mpci_add, mpci_sub, mpci_mul, mpci_div, mpci_pow, + mpci_abs, mpci_pow, mpci_exp, mpci_log, mpci_cos, mpci_sin, + mpi_gamma, mpci_gamma, mpi_loggamma, mpci_loggamma, + mpi_rgamma, mpci_rgamma, mpi_factorial, mpci_factorial) + +from .libintmath import (trailing, bitcount, numeral, bin_to_radix, + isqrt, isqrt_small, isqrt_fast, sqrt_fixed, sqrtrem, ifib, ifac, + list_primes, isprime, moebius, gcd, eulernum, stirling1, stirling2) + +from .backend import (gmpy, sage, BACKEND, STRICT, MPZ, MPZ_TYPE, + MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_THREE, MPZ_FIVE, int_types, + HASH_MODULUS, HASH_BITS) diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..df177e56bc1f80a351ef247f4aecde561ac826bf Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/backend.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/backend.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e6ae6613ee4c2242afc97983f0e6cf058d50385f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/backend.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4a68b14d9299ebd0e7e529bbb431a625bccef78f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/gammazeta.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b75a4da796e03dba23dbaa8f7640a26fc54952fc Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libelefun.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..69faff7252818c170d6ae8f3e7809f1932d524af Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libhyper.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3c7e7a643856bc9c59874291517f116a572860bd Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libintmath.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e813186bc78053eb29a168cef469033e742d4ca6 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpc.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..385be3e9e55ec20b5fca0d25388fccbbe9ce6739 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpf.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b73dd276827c1bc0e0c0f6a03d793cd22e48e932 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/libmp/__pycache__/libmpi.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/backend.py b/.venv/lib/python3.11/site-packages/mpmath/libmp/backend.py new file mode 100644 index 0000000000000000000000000000000000000000..5610221290a05078f21f09df3c1a76b0e4ccdc02 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/libmp/backend.py @@ -0,0 +1,115 @@ +import os +import sys + +#----------------------------------------------------------------------------# +# Support GMPY for high-speed large integer arithmetic. # +# # +# To allow an external module to handle arithmetic, we need to make sure # +# that all high-precision variables are declared of the correct type. MPZ # +# is the constructor for the high-precision type. It defaults to Python's # +# long type but can be assinged another type, typically gmpy.mpz. # +# # +# MPZ must be used for the mantissa component of an mpf and must be used # +# for internal fixed-point operations. # +# # +# Side-effects # +# 1) "is" cannot be used to test for special values. Must use "==". # +# 2) There are bugs in GMPY prior to v1.02 so we must use v1.03 or later. # +#----------------------------------------------------------------------------# + +# So we can import it from this module +gmpy = None +sage = None +sage_utils = None + +if sys.version_info[0] < 3: + python3 = False +else: + python3 = True + +BACKEND = 'python' + +if not python3: + MPZ = long + xrange = xrange + basestring = basestring + + def exec_(_code_, _globs_=None, _locs_=None): + """Execute code in a namespace.""" + if _globs_ is None: + frame = sys._getframe(1) + _globs_ = frame.f_globals + if _locs_ is None: + _locs_ = frame.f_locals + del frame + elif _locs_ is None: + _locs_ = _globs_ + exec("""exec _code_ in _globs_, _locs_""") +else: + MPZ = int + xrange = range + basestring = str + + import builtins + exec_ = getattr(builtins, "exec") + +# Define constants for calculating hash on Python 3.2. +if sys.version_info >= (3, 2): + HASH_MODULUS = sys.hash_info.modulus + if sys.hash_info.width == 32: + HASH_BITS = 31 + else: + HASH_BITS = 61 +else: + HASH_MODULUS = None + HASH_BITS = None + +if 'MPMATH_NOGMPY' not in os.environ: + try: + try: + import gmpy2 as gmpy + except ImportError: + try: + import gmpy + except ImportError: + raise ImportError + if gmpy.version() >= '1.03': + BACKEND = 'gmpy' + MPZ = gmpy.mpz + except: + pass + +if ('MPMATH_NOSAGE' not in os.environ and 'SAGE_ROOT' in os.environ or + 'MPMATH_SAGE' in os.environ): + try: + import sage.all + import sage.libs.mpmath.utils as _sage_utils + sage = sage.all + sage_utils = _sage_utils + BACKEND = 'sage' + MPZ = sage.Integer + except: + pass + +if 'MPMATH_STRICT' in os.environ: + STRICT = True +else: + STRICT = False + +MPZ_TYPE = type(MPZ(0)) +MPZ_ZERO = MPZ(0) +MPZ_ONE = MPZ(1) +MPZ_TWO = MPZ(2) +MPZ_THREE = MPZ(3) +MPZ_FIVE = MPZ(5) + +try: + if BACKEND == 'python': + int_types = (int, long) + else: + int_types = (int, long, MPZ_TYPE) +except NameError: + if BACKEND == 'python': + int_types = (int,) + else: + int_types = (int, MPZ_TYPE) diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/gammazeta.py b/.venv/lib/python3.11/site-packages/mpmath/libmp/gammazeta.py new file mode 100644 index 0000000000000000000000000000000000000000..3b05cc63c5f00e6c76d8383853dba06f15e46030 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/libmp/gammazeta.py @@ -0,0 +1,2167 @@ +""" +----------------------------------------------------------------------- +This module implements gamma- and zeta-related functions: + +* Bernoulli numbers +* Factorials +* The gamma function +* Polygamma functions +* Harmonic numbers +* The Riemann zeta function +* Constants related to these functions + +----------------------------------------------------------------------- +""" + +import math +import sys + +from .backend import xrange +from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_THREE, gmpy + +from .libintmath import list_primes, ifac, ifac2, moebius + +from .libmpf import (\ + round_floor, round_ceiling, round_down, round_up, + round_nearest, round_fast, + lshift, sqrt_fixed, isqrt_fast, + fzero, fone, fnone, fhalf, ftwo, finf, fninf, fnan, + from_int, to_int, to_fixed, from_man_exp, from_rational, + mpf_pos, mpf_neg, mpf_abs, mpf_add, mpf_sub, + mpf_mul, mpf_mul_int, mpf_div, mpf_sqrt, mpf_pow_int, + mpf_rdiv_int, + mpf_perturb, mpf_le, mpf_lt, mpf_gt, mpf_shift, + negative_rnd, reciprocal_rnd, + bitcount, to_float, mpf_floor, mpf_sign, ComplexResult +) + +from .libelefun import (\ + constant_memo, + def_mpf_constant, + mpf_pi, pi_fixed, ln2_fixed, log_int_fixed, mpf_ln2, + mpf_exp, mpf_log, mpf_pow, mpf_cosh, + mpf_cos_sin, mpf_cosh_sinh, mpf_cos_sin_pi, mpf_cos_pi, mpf_sin_pi, + ln_sqrt2pi_fixed, mpf_ln_sqrt2pi, sqrtpi_fixed, mpf_sqrtpi, + cos_sin_fixed, exp_fixed +) + +from .libmpc import (\ + mpc_zero, mpc_one, mpc_half, mpc_two, + mpc_abs, mpc_shift, mpc_pos, mpc_neg, + mpc_add, mpc_sub, mpc_mul, mpc_div, + mpc_add_mpf, mpc_mul_mpf, mpc_div_mpf, mpc_mpf_div, + mpc_mul_int, mpc_pow_int, + mpc_log, mpc_exp, mpc_pow, + mpc_cos_pi, mpc_sin_pi, + mpc_reciprocal, mpc_square, + mpc_sub_mpf +) + + + +# Catalan's constant is computed using Lupas's rapidly convergent series +# (listed on http://mathworld.wolfram.com/CatalansConstant.html) +# oo +# ___ n-1 8n 2 3 2 +# 1 \ (-1) 2 (40n - 24n + 3) [(2n)!] (n!) +# K = --- ) ----------------------------------------- +# 64 /___ 3 2 +# n (2n-1) [(4n)!] +# n = 1 + +@constant_memo +def catalan_fixed(prec): + prec = prec + 20 + a = one = MPZ_ONE << prec + s, t, n = 0, 1, 1 + while t: + a *= 32 * n**3 * (2*n-1) + a //= (3-16*n+16*n**2)**2 + t = a * (-1)**(n-1) * (40*n**2-24*n+3) // (n**3 * (2*n-1)) + s += t + n += 1 + return s >> (20 + 6) + +# Khinchin's constant is relatively difficult to compute. Here +# we use the rational zeta series + +# oo 2*n-1 +# ___ ___ +# \ ` zeta(2*n)-1 \ ` (-1)^(k+1) +# log(K)*log(2) = ) ------------ ) ---------- +# /___. n /___. k +# n = 1 k = 1 + +# which adds half a digit per term. The essential trick for achieving +# reasonable efficiency is to recycle both the values of the zeta +# function (essentially Bernoulli numbers) and the partial terms of +# the inner sum. + +# An alternative might be to use K = 2*exp[1/log(2) X] where + +# / 1 1 [ pi*x*(1-x^2) ] +# X = | ------ log [ ------------ ]. +# / 0 x(1+x) [ sin(pi*x) ] + +# and integrate numerically. In practice, this seems to be slightly +# slower than the zeta series at high precision. + +@constant_memo +def khinchin_fixed(prec): + wp = int(prec + prec**0.5 + 15) + s = MPZ_ZERO + fac = from_int(4) + t = ONE = MPZ_ONE << wp + pi = mpf_pi(wp) + pipow = twopi2 = mpf_shift(mpf_mul(pi, pi, wp), 2) + n = 1 + while 1: + zeta2n = mpf_abs(mpf_bernoulli(2*n, wp)) + zeta2n = mpf_mul(zeta2n, pipow, wp) + zeta2n = mpf_div(zeta2n, fac, wp) + zeta2n = to_fixed(zeta2n, wp) + term = (((zeta2n - ONE) * t) // n) >> wp + if term < 100: + break + #if not n % 10: + # print n, math.log(int(abs(term))) + s += term + t += ONE//(2*n+1) - ONE//(2*n) + n += 1 + fac = mpf_mul_int(fac, (2*n)*(2*n-1), wp) + pipow = mpf_mul(pipow, twopi2, wp) + s = (s << wp) // ln2_fixed(wp) + K = mpf_exp(from_man_exp(s, -wp), wp) + K = to_fixed(K, prec) + return K + + +# Glaisher's constant is defined as A = exp(1/2 - zeta'(-1)). +# One way to compute it would be to perform direct numerical +# differentiation, but computing arbitrary Riemann zeta function +# values at high precision is expensive. We instead use the formula + +# A = exp((6 (-zeta'(2))/pi^2 + log 2 pi + gamma)/12) + +# and compute zeta'(2) from the series representation + +# oo +# ___ +# \ log k +# -zeta'(2) = ) ----- +# /___ 2 +# k +# k = 2 + +# This series converges exceptionally slowly, but can be accelerated +# using Euler-Maclaurin formula. The important insight is that the +# E-M integral can be done in closed form and that the high order +# are given by + +# n / \ +# d | log x | a + b log x +# --- | ----- | = ----------- +# n | 2 | 2 + n +# dx \ x / x + +# where a and b are integers given by a simple recurrence. Note +# that just one logarithm is needed. However, lots of integer +# logarithms are required for the initial summation. + +# This algorithm could possibly be turned into a faster algorithm +# for general evaluation of zeta(s) or zeta'(s); this should be +# looked into. + +@constant_memo +def glaisher_fixed(prec): + wp = prec + 30 + # Number of direct terms to sum before applying the Euler-Maclaurin + # formula to the tail. TODO: choose more intelligently + N = int(0.33*prec + 5) + ONE = MPZ_ONE << wp + # Euler-Maclaurin, step 1: sum log(k)/k**2 for k from 2 to N-1 + s = MPZ_ZERO + for k in range(2, N): + #print k, N + s += log_int_fixed(k, wp) // k**2 + logN = log_int_fixed(N, wp) + #logN = to_fixed(mpf_log(from_int(N), wp+20), wp) + # E-M step 2: integral of log(x)/x**2 from N to inf + s += (ONE + logN) // N + # E-M step 3: endpoint correction term f(N)/2 + s += logN // (N**2 * 2) + # E-M step 4: the series of derivatives + pN = N**3 + a = 1 + b = -2 + j = 3 + fac = from_int(2) + k = 1 + while 1: + # D(2*k-1) * B(2*k) / fac(2*k) [D(n) = nth derivative] + D = ((a << wp) + b*logN) // pN + D = from_man_exp(D, -wp) + B = mpf_bernoulli(2*k, wp) + term = mpf_mul(B, D, wp) + term = mpf_div(term, fac, wp) + term = to_fixed(term, wp) + if abs(term) < 100: + break + #if not k % 10: + # print k, math.log(int(abs(term)), 10) + s -= term + # Advance derivative twice + a, b, pN, j = b-a*j, -j*b, pN*N, j+1 + a, b, pN, j = b-a*j, -j*b, pN*N, j+1 + k += 1 + fac = mpf_mul_int(fac, (2*k)*(2*k-1), wp) + # A = exp((6*s/pi**2 + log(2*pi) + euler)/12) + pi = pi_fixed(wp) + s *= 6 + s = (s << wp) // (pi**2 >> wp) + s += euler_fixed(wp) + s += to_fixed(mpf_log(from_man_exp(2*pi, -wp), wp), wp) + s //= 12 + A = mpf_exp(from_man_exp(s, -wp), wp) + return to_fixed(A, prec) + +# Apery's constant can be computed using the very rapidly convergent +# series +# oo +# ___ 2 10 +# \ n 205 n + 250 n + 77 (n!) +# zeta(3) = ) (-1) ------------------- ---------- +# /___ 64 5 +# n = 0 ((2n+1)!) + +@constant_memo +def apery_fixed(prec): + prec += 20 + d = MPZ_ONE << prec + term = MPZ(77) << prec + n = 1 + s = MPZ_ZERO + while term: + s += term + d *= (n**10) + d //= (((2*n+1)**5) * (2*n)**5) + term = (-1)**n * (205*(n**2) + 250*n + 77) * d + n += 1 + return s >> (20 + 6) + +""" +Euler's constant (gamma) is computed using the Brent-McMillan formula, +gamma ~= I(n)/J(n) - log(n), where + + I(n) = sum_{k=0,1,2,...} (n**k / k!)**2 * H(k) + J(n) = sum_{k=0,1,2,...} (n**k / k!)**2 + H(k) = 1 + 1/2 + 1/3 + ... + 1/k + +The error is bounded by O(exp(-4n)). Choosing n to be a power +of two, 2**p, the logarithm becomes particularly easy to calculate.[1] + +We use the formulation of Algorithm 3.9 in [2] to make the summation +more efficient. + +Reference: +[1] Xavier Gourdon & Pascal Sebah, The Euler constant: gamma +http://numbers.computation.free.fr/Constants/Gamma/gamma.pdf + +[2] [BorweinBailey]_ +""" + +@constant_memo +def euler_fixed(prec): + extra = 30 + prec += extra + # choose p such that exp(-4*(2**p)) < 2**-n + p = int(math.log((prec/4) * math.log(2), 2)) + 1 + n = 2**p + A = U = -p*ln2_fixed(prec) + B = V = MPZ_ONE << prec + k = 1 + while 1: + B = B*n**2//k**2 + A = (A*n**2//k + B)//k + U += A + V += B + if max(abs(A), abs(B)) < 100: + break + k += 1 + return (U<<(prec-extra))//V + +# Use zeta accelerated formulas for the Mertens and twin +# prime constants; see +# http://mathworld.wolfram.com/MertensConstant.html +# http://mathworld.wolfram.com/TwinPrimesConstant.html + +@constant_memo +def mertens_fixed(prec): + wp = prec + 20 + m = 2 + s = mpf_euler(wp) + while 1: + t = mpf_zeta_int(m, wp) + if t == fone: + break + t = mpf_log(t, wp) + t = mpf_mul_int(t, moebius(m), wp) + t = mpf_div(t, from_int(m), wp) + s = mpf_add(s, t) + m += 1 + return to_fixed(s, prec) + +@constant_memo +def twinprime_fixed(prec): + def I(n): + return sum(moebius(d)<<(n//d) for d in xrange(1,n+1) if not n%d)//n + wp = 2*prec + 30 + res = fone + primes = [from_rational(1,p,wp) for p in [2,3,5,7]] + ppowers = [mpf_mul(p,p,wp) for p in primes] + n = 2 + while 1: + a = mpf_zeta_int(n, wp) + for i in range(4): + a = mpf_mul(a, mpf_sub(fone, ppowers[i]), wp) + ppowers[i] = mpf_mul(ppowers[i], primes[i], wp) + a = mpf_pow_int(a, -I(n), wp) + if mpf_pos(a, prec+10, 'n') == fone: + break + #from libmpf import to_str + #print n, to_str(mpf_sub(fone, a), 6) + res = mpf_mul(res, a, wp) + n += 1 + res = mpf_mul(res, from_int(3*15*35), wp) + res = mpf_div(res, from_int(4*16*36), wp) + return to_fixed(res, prec) + + +mpf_euler = def_mpf_constant(euler_fixed) +mpf_apery = def_mpf_constant(apery_fixed) +mpf_khinchin = def_mpf_constant(khinchin_fixed) +mpf_glaisher = def_mpf_constant(glaisher_fixed) +mpf_catalan = def_mpf_constant(catalan_fixed) +mpf_mertens = def_mpf_constant(mertens_fixed) +mpf_twinprime = def_mpf_constant(twinprime_fixed) + + +#-----------------------------------------------------------------------# +# # +# Bernoulli numbers # +# # +#-----------------------------------------------------------------------# + +MAX_BERNOULLI_CACHE = 3000 + + +r""" +Small Bernoulli numbers and factorials are used in numerous summations, +so it is critical for speed that sequential computation is fast and that +values are cached up to a fairly high threshold. + +On the other hand, we also want to support fast computation of isolated +large numbers. Currently, no such acceleration is provided for integer +factorials (though it is for large floating-point factorials, which are +computed via gamma if the precision is low enough). + +For sequential computation of Bernoulli numbers, we use Ramanujan's formula + + / n + 3 \ + B = (A(n) - S(n)) / | | + n \ n / + +where A(n) = (n+3)/3 when n = 0 or 2 (mod 6), A(n) = -(n+3)/6 +when n = 4 (mod 6), and + + [n/6] + ___ + \ / n + 3 \ + S(n) = ) | | * B + /___ \ n - 6*k / n-6*k + k = 1 + +For isolated large Bernoulli numbers, we use the Riemann zeta function +to calculate a numerical value for B_n. The von Staudt-Clausen theorem +can then be used to optionally find the exact value of the +numerator and denominator. +""" + +bernoulli_cache = {} +f3 = from_int(3) +f6 = from_int(6) + +def bernoulli_size(n): + """Accurately estimate the size of B_n (even n > 2 only)""" + lgn = math.log(n,2) + return int(2.326 + 0.5*lgn + n*(lgn - 4.094)) + +BERNOULLI_PREC_CUTOFF = bernoulli_size(MAX_BERNOULLI_CACHE) + +def mpf_bernoulli(n, prec, rnd=None): + """Computation of Bernoulli numbers (numerically)""" + if n < 2: + if n < 0: + raise ValueError("Bernoulli numbers only defined for n >= 0") + if n == 0: + return fone + if n == 1: + return mpf_neg(fhalf) + # For odd n > 1, the Bernoulli numbers are zero + if n & 1: + return fzero + # If precision is extremely high, we can save time by computing + # the Bernoulli number at a lower precision that is sufficient to + # obtain the exact fraction, round to the exact fraction, and + # convert the fraction back to an mpf value at the original precision + if prec > BERNOULLI_PREC_CUTOFF and prec > bernoulli_size(n)*1.1 + 1000: + p, q = bernfrac(n) + return from_rational(p, q, prec, rnd or round_floor) + if n > MAX_BERNOULLI_CACHE: + return mpf_bernoulli_huge(n, prec, rnd) + wp = prec + 30 + # Reuse nearby precisions + wp += 32 - (prec & 31) + cached = bernoulli_cache.get(wp) + if cached: + numbers, state = cached + if n in numbers: + if not rnd: + return numbers[n] + return mpf_pos(numbers[n], prec, rnd) + m, bin, bin1 = state + if n - m > 10: + return mpf_bernoulli_huge(n, prec, rnd) + else: + if n > 10: + return mpf_bernoulli_huge(n, prec, rnd) + numbers = {0:fone} + m, bin, bin1 = state = [2, MPZ(10), MPZ_ONE] + bernoulli_cache[wp] = (numbers, state) + while m <= n: + #print m + case = m % 6 + # Accurately estimate size of B_m so we can use + # fixed point math without using too much precision + szbm = bernoulli_size(m) + s = 0 + sexp = max(0, szbm) - wp + if m < 6: + a = MPZ_ZERO + else: + a = bin1 + for j in xrange(1, m//6+1): + usign, uman, uexp, ubc = u = numbers[m-6*j] + if usign: + uman = -uman + s += lshift(a*uman, uexp-sexp) + # Update inner binomial coefficient + j6 = 6*j + a *= ((m-5-j6)*(m-4-j6)*(m-3-j6)*(m-2-j6)*(m-1-j6)*(m-j6)) + a //= ((4+j6)*(5+j6)*(6+j6)*(7+j6)*(8+j6)*(9+j6)) + if case == 0: b = mpf_rdiv_int(m+3, f3, wp) + if case == 2: b = mpf_rdiv_int(m+3, f3, wp) + if case == 4: b = mpf_rdiv_int(-m-3, f6, wp) + s = from_man_exp(s, sexp, wp) + b = mpf_div(mpf_sub(b, s, wp), from_int(bin), wp) + numbers[m] = b + m += 2 + # Update outer binomial coefficient + bin = bin * ((m+2)*(m+3)) // (m*(m-1)) + if m > 6: + bin1 = bin1 * ((2+m)*(3+m)) // ((m-7)*(m-6)) + state[:] = [m, bin, bin1] + return numbers[n] + +def mpf_bernoulli_huge(n, prec, rnd=None): + wp = prec + 10 + piprec = wp + int(math.log(n,2)) + v = mpf_gamma_int(n+1, wp) + v = mpf_mul(v, mpf_zeta_int(n, wp), wp) + v = mpf_mul(v, mpf_pow_int(mpf_pi(piprec), -n, wp)) + v = mpf_shift(v, 1-n) + if not n & 3: + v = mpf_neg(v) + return mpf_pos(v, prec, rnd or round_fast) + +def bernfrac(n): + r""" + Returns a tuple of integers `(p, q)` such that `p/q = B_n` exactly, + where `B_n` denotes the `n`-th Bernoulli number. The fraction is + always reduced to lowest terms. Note that for `n > 1` and `n` odd, + `B_n = 0`, and `(0, 1)` is returned. + + **Examples** + + The first few Bernoulli numbers are exactly:: + + >>> from mpmath import * + >>> for n in range(15): + ... p, q = bernfrac(n) + ... print("%s %s/%s" % (n, p, q)) + ... + 0 1/1 + 1 -1/2 + 2 1/6 + 3 0/1 + 4 -1/30 + 5 0/1 + 6 1/42 + 7 0/1 + 8 -1/30 + 9 0/1 + 10 5/66 + 11 0/1 + 12 -691/2730 + 13 0/1 + 14 7/6 + + This function works for arbitrarily large `n`:: + + >>> p, q = bernfrac(10**4) + >>> print(q) + 2338224387510 + >>> print(len(str(p))) + 27692 + >>> mp.dps = 15 + >>> print(mpf(p) / q) + -9.04942396360948e+27677 + >>> print(bernoulli(10**4)) + -9.04942396360948e+27677 + + .. note :: + + :func:`~mpmath.bernoulli` computes a floating-point approximation + directly, without computing the exact fraction first. + This is much faster for large `n`. + + **Algorithm** + + :func:`~mpmath.bernfrac` works by computing the value of `B_n` numerically + and then using the von Staudt-Clausen theorem [1] to reconstruct + the exact fraction. For large `n`, this is significantly faster than + computing `B_1, B_2, \ldots, B_2` recursively with exact arithmetic. + The implementation has been tested for `n = 10^m` up to `m = 6`. + + In practice, :func:`~mpmath.bernfrac` appears to be about three times + slower than the specialized program calcbn.exe [2] + + **References** + + 1. MathWorld, von Staudt-Clausen Theorem: + http://mathworld.wolfram.com/vonStaudt-ClausenTheorem.html + + 2. The Bernoulli Number Page: + http://www.bernoulli.org/ + + """ + n = int(n) + if n < 3: + return [(1, 1), (-1, 2), (1, 6)][n] + if n & 1: + return (0, 1) + q = 1 + for k in list_primes(n+1): + if not (n % (k-1)): + q *= k + prec = bernoulli_size(n) + int(math.log(q,2)) + 20 + b = mpf_bernoulli(n, prec) + p = mpf_mul(b, from_int(q)) + pint = to_int(p, round_nearest) + return (pint, q) + + +#-----------------------------------------------------------------------# +# # +# Polygamma functions # +# # +#-----------------------------------------------------------------------# + +r""" +For all polygamma (psi) functions, we use the Euler-Maclaurin summation +formula. It looks slightly different in the m = 0 and m > 0 cases. + +For m = 0, we have + oo + ___ B + (0) 1 \ 2 k -2 k + psi (z) ~ log z + --- - ) ------ z + 2 z /___ (2 k)! + k = 1 + +Experiment shows that the minimum term of the asymptotic series +reaches 2^(-p) when Re(z) > 0.11*p. So we simply use the recurrence +for psi (equivalent, in fact, to summing to the first few terms +directly before applying E-M) to obtain z large enough. + +Since, very crudely, log z ~= 1 for Re(z) > 1, we can use +fixed-point arithmetic (if z is extremely large, log(z) itself +is a sufficient approximation, so we can stop there already). + +For Re(z) << 0, we could use recurrence, but this is of course +inefficient for large negative z, so there we use the +reflection formula instead. + +For m > 0, we have + + N - 1 + ___ + ~~~(m) [ \ 1 ] 1 1 + psi (z) ~ [ ) -------- ] + ---------- + -------- + + [ /___ m+1 ] m+1 m + k = 1 (z+k) ] 2 (z+N) m (z+N) + + oo + ___ B + \ 2 k (m+1) (m+2) ... (m+2k-1) + + ) ------ ------------------------ + /___ (2 k)! m + 2 k + k = 1 (z+N) + +where ~~~ denotes the function rescaled by 1/((-1)^(m+1) m!). + +Here again N is chosen to make z+N large enough for the minimum +term in the last series to become smaller than eps. + +TODO: the current estimation of N for m > 0 is *very suboptimal*. + +TODO: implement the reflection formula for m > 0, Re(z) << 0. +It is generally a combination of multiple cotangents. Need to +figure out a reasonably simple way to generate these formulas +on the fly. + +TODO: maybe use exact algorithms to compute psi for integral +and certain rational arguments, as this can be much more +efficient. (On the other hand, the availability of these +special values provides a convenient way to test the general +algorithm.) +""" + +# Harmonic numbers are just shifted digamma functions +# We should calculate these exactly when x is an integer +# and when doing so is faster. + +def mpf_harmonic(x, prec, rnd): + if x in (fzero, fnan, finf): + return x + a = mpf_psi0(mpf_add(fone, x, prec+5), prec) + return mpf_add(a, mpf_euler(prec+5, rnd), prec, rnd) + +def mpc_harmonic(z, prec, rnd): + if z[1] == fzero: + return (mpf_harmonic(z[0], prec, rnd), fzero) + a = mpc_psi0(mpc_add_mpf(z, fone, prec+5), prec) + return mpc_add_mpf(a, mpf_euler(prec+5, rnd), prec, rnd) + +def mpf_psi0(x, prec, rnd=round_fast): + """ + Computation of the digamma function (psi function of order 0) + of a real argument. + """ + sign, man, exp, bc = x + wp = prec + 10 + if not man: + if x == finf: return x + if x == fninf or x == fnan: return fnan + if x == fzero or (exp >= 0 and sign): + raise ValueError("polygamma pole") + # Near 0 -- fixed-point arithmetic becomes bad + if exp+bc < -5: + v = mpf_psi0(mpf_add(x, fone, prec, rnd), prec, rnd) + return mpf_sub(v, mpf_div(fone, x, wp, rnd), prec, rnd) + # Reflection formula + if sign and exp+bc > 3: + c, s = mpf_cos_sin_pi(x, wp) + q = mpf_mul(mpf_div(c, s, wp), mpf_pi(wp), wp) + p = mpf_psi0(mpf_sub(fone, x, wp), wp) + return mpf_sub(p, q, prec, rnd) + # The logarithmic term is accurate enough + if (not sign) and bc + exp > wp: + return mpf_log(mpf_sub(x, fone, wp), prec, rnd) + # Initial recurrence to obtain a large enough x + m = to_int(x) + n = int(0.11*wp) + 2 + s = MPZ_ZERO + x = to_fixed(x, wp) + one = MPZ_ONE << wp + if m < n: + for k in xrange(m, n): + s -= (one << wp) // x + x += one + x -= one + # Logarithmic term + s += to_fixed(mpf_log(from_man_exp(x, -wp, wp), wp), wp) + # Endpoint term in Euler-Maclaurin expansion + s += (one << wp) // (2*x) + # Euler-Maclaurin remainder sum + x2 = (x*x) >> wp + t = one + prev = 0 + k = 1 + while 1: + t = (t*x2) >> wp + bsign, bman, bexp, bbc = mpf_bernoulli(2*k, wp) + offset = (bexp + 2*wp) + if offset >= 0: term = (bman << offset) // (t*(2*k)) + else: term = (bman >> (-offset)) // (t*(2*k)) + if k & 1: s -= term + else: s += term + if k > 2 and term >= prev: + break + prev = term + k += 1 + return from_man_exp(s, -wp, wp, rnd) + +def mpc_psi0(z, prec, rnd=round_fast): + """ + Computation of the digamma function (psi function of order 0) + of a complex argument. + """ + re, im = z + # Fall back to the real case + if im == fzero: + return (mpf_psi0(re, prec, rnd), fzero) + wp = prec + 20 + sign, man, exp, bc = re + # Reflection formula + if sign and exp+bc > 3: + c = mpc_cos_pi(z, wp) + s = mpc_sin_pi(z, wp) + q = mpc_mul_mpf(mpc_div(c, s, wp), mpf_pi(wp), wp) + p = mpc_psi0(mpc_sub(mpc_one, z, wp), wp) + return mpc_sub(p, q, prec, rnd) + # Just the logarithmic term + if (not sign) and bc + exp > wp: + return mpc_log(mpc_sub(z, mpc_one, wp), prec, rnd) + # Initial recurrence to obtain a large enough z + w = to_int(re) + n = int(0.11*wp) + 2 + s = mpc_zero + if w < n: + for k in xrange(w, n): + s = mpc_sub(s, mpc_reciprocal(z, wp), wp) + z = mpc_add_mpf(z, fone, wp) + z = mpc_sub(z, mpc_one, wp) + # Logarithmic and endpoint term + s = mpc_add(s, mpc_log(z, wp), wp) + s = mpc_add(s, mpc_div(mpc_half, z, wp), wp) + # Euler-Maclaurin remainder sum + z2 = mpc_square(z, wp) + t = mpc_one + prev = mpc_zero + szprev = fzero + k = 1 + eps = mpf_shift(fone, -wp+2) + while 1: + t = mpc_mul(t, z2, wp) + bern = mpf_bernoulli(2*k, wp) + term = mpc_mpf_div(bern, mpc_mul_int(t, 2*k, wp), wp) + s = mpc_sub(s, term, wp) + szterm = mpc_abs(term, 10) + if k > 2 and (mpf_le(szterm, eps) or mpf_le(szprev, szterm)): + break + prev = term + szprev = szterm + k += 1 + return s + +# Currently unoptimized +def mpf_psi(m, x, prec, rnd=round_fast): + """ + Computation of the polygamma function of arbitrary integer order + m >= 0, for a real argument x. + """ + if m == 0: + return mpf_psi0(x, prec, rnd=round_fast) + return mpc_psi(m, (x, fzero), prec, rnd)[0] + +def mpc_psi(m, z, prec, rnd=round_fast): + """ + Computation of the polygamma function of arbitrary integer order + m >= 0, for a complex argument z. + """ + if m == 0: + return mpc_psi0(z, prec, rnd) + re, im = z + wp = prec + 20 + sign, man, exp, bc = re + if not im[1]: + if im in (finf, fninf, fnan): + return (fnan, fnan) + if not man: + if re == finf and im == fzero: + return (fzero, fzero) + if re == fnan: + return (fnan, fnan) + # Recurrence + w = to_int(re) + n = int(0.4*wp + 4*m) + s = mpc_zero + if w < n: + for k in xrange(w, n): + t = mpc_pow_int(z, -m-1, wp) + s = mpc_add(s, t, wp) + z = mpc_add_mpf(z, fone, wp) + zm = mpc_pow_int(z, -m, wp) + z2 = mpc_pow_int(z, -2, wp) + # 1/m*(z+N)^m + integral_term = mpc_div_mpf(zm, from_int(m), wp) + s = mpc_add(s, integral_term, wp) + # 1/2*(z+N)^(-(m+1)) + s = mpc_add(s, mpc_mul_mpf(mpc_div(zm, z, wp), fhalf, wp), wp) + a = m + 1 + b = 2 + k = 1 + # Important: we want to sum up to the *relative* error, + # not the absolute error, because psi^(m)(z) might be tiny + magn = mpc_abs(s, 10) + magn = magn[2]+magn[3] + eps = mpf_shift(fone, magn-wp+2) + while 1: + zm = mpc_mul(zm, z2, wp) + bern = mpf_bernoulli(2*k, wp) + scal = mpf_mul_int(bern, a, wp) + scal = mpf_div(scal, from_int(b), wp) + term = mpc_mul_mpf(zm, scal, wp) + s = mpc_add(s, term, wp) + szterm = mpc_abs(term, 10) + if k > 2 and mpf_le(szterm, eps): + break + #print k, to_str(szterm, 10), to_str(eps, 10) + a *= (m+2*k)*(m+2*k+1) + b *= (2*k+1)*(2*k+2) + k += 1 + # Scale and sign factor + v = mpc_mul_mpf(s, mpf_gamma(from_int(m+1), wp), prec, rnd) + if not (m & 1): + v = mpf_neg(v[0]), mpf_neg(v[1]) + return v + + +#-----------------------------------------------------------------------# +# # +# Riemann zeta function # +# # +#-----------------------------------------------------------------------# + +r""" +We use zeta(s) = eta(s) / (1 - 2**(1-s)) and Borwein's approximation + + n-1 + ___ k + -1 \ (-1) (d_k - d_n) + eta(s) ~= ---- ) ------------------ + d_n /___ s + k = 0 (k + 1) +where + k + ___ i + \ (n + i - 1)! 4 + d_k = n ) ---------------. + /___ (n - i)! (2i)! + i = 0 + +If s = a + b*I, the absolute error for eta(s) is bounded by + + 3 (1 + 2|b|) + ------------ * exp(|b| pi/2) + n + (3+sqrt(8)) + +Disregarding the linear term, we have approximately, + + log(err) ~= log(exp(1.58*|b|)) - log(5.8**n) + log(err) ~= 1.58*|b| - log(5.8)*n + log(err) ~= 1.58*|b| - 1.76*n + log2(err) ~= 2.28*|b| - 2.54*n + +So for p bits, we should choose n > (p + 2.28*|b|) / 2.54. + +References: +----------- + +Peter Borwein, "An Efficient Algorithm for the Riemann Zeta Function" +http://www.cecm.sfu.ca/personal/pborwein/PAPERS/P117.ps + +http://en.wikipedia.org/wiki/Dirichlet_eta_function +""" + +borwein_cache = {} + +def borwein_coefficients(n): + if n in borwein_cache: + return borwein_cache[n] + ds = [MPZ_ZERO] * (n+1) + d = MPZ_ONE + s = ds[0] = MPZ_ONE + for i in range(1, n+1): + d = d * 4 * (n+i-1) * (n-i+1) + d //= ((2*i) * ((2*i)-1)) + s += d + ds[i] = s + borwein_cache[n] = ds + return ds + +ZETA_INT_CACHE_MAX_PREC = 1000 +zeta_int_cache = {} + +def mpf_zeta_int(s, prec, rnd=round_fast): + """ + Optimized computation of zeta(s) for an integer s. + """ + wp = prec + 20 + s = int(s) + if s in zeta_int_cache and zeta_int_cache[s][0] >= wp: + return mpf_pos(zeta_int_cache[s][1], prec, rnd) + if s < 2: + if s == 1: + raise ValueError("zeta(1) pole") + if not s: + return mpf_neg(fhalf) + return mpf_div(mpf_bernoulli(-s+1, wp), from_int(s-1), prec, rnd) + # 2^-s term vanishes? + if s >= wp: + return mpf_perturb(fone, 0, prec, rnd) + # 5^-s term vanishes? + elif s >= wp*0.431: + t = one = 1 << wp + t += 1 << (wp - s) + t += one // (MPZ_THREE ** s) + t += 1 << max(0, wp - s*2) + return from_man_exp(t, -wp, prec, rnd) + else: + # Fast enough to sum directly? + # Even better, we use the Euler product (idea stolen from pari) + m = (float(wp)/(s-1) + 1) + if m < 30: + needed_terms = int(2.0**m + 1) + if needed_terms < int(wp/2.54 + 5) / 10: + t = fone + for k in list_primes(needed_terms): + #print k, needed_terms + powprec = int(wp - s*math.log(k,2)) + if powprec < 2: + break + a = mpf_sub(fone, mpf_pow_int(from_int(k), -s, powprec), wp) + t = mpf_mul(t, a, wp) + return mpf_div(fone, t, wp) + # Use Borwein's algorithm + n = int(wp/2.54 + 5) + d = borwein_coefficients(n) + t = MPZ_ZERO + s = MPZ(s) + for k in xrange(n): + t += (((-1)**k * (d[k] - d[n])) << wp) // (k+1)**s + t = (t << wp) // (-d[n]) + t = (t << wp) // ((1 << wp) - (1 << (wp+1-s))) + if (s in zeta_int_cache and zeta_int_cache[s][0] < wp) or (s not in zeta_int_cache): + zeta_int_cache[s] = (wp, from_man_exp(t, -wp-wp)) + return from_man_exp(t, -wp-wp, prec, rnd) + +def mpf_zeta(s, prec, rnd=round_fast, alt=0): + sign, man, exp, bc = s + if not man: + if s == fzero: + if alt: + return fhalf + else: + return mpf_neg(fhalf) + if s == finf: + return fone + return fnan + wp = prec + 20 + # First term vanishes? + if (not sign) and (exp + bc > (math.log(wp,2) + 2)): + return mpf_perturb(fone, alt, prec, rnd) + # Optimize for integer arguments + elif exp >= 0: + if alt: + if s == fone: + return mpf_ln2(prec, rnd) + z = mpf_zeta_int(to_int(s), wp, negative_rnd[rnd]) + q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp) + return mpf_mul(z, q, prec, rnd) + else: + return mpf_zeta_int(to_int(s), prec, rnd) + # Negative: use the reflection formula + # Borwein only proves the accuracy bound for x >= 1/2. However, based on + # tests, the accuracy without reflection is quite good even some distance + # to the left of 1/2. XXX: verify this. + if sign: + # XXX: could use the separate refl. formula for Dirichlet eta + if alt: + q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp) + return mpf_mul(mpf_zeta(s, wp), q, prec, rnd) + # XXX: -1 should be done exactly + y = mpf_sub(fone, s, 10*wp) + a = mpf_gamma(y, wp) + b = mpf_zeta(y, wp) + c = mpf_sin_pi(mpf_shift(s, -1), wp) + wp2 = wp + max(0,exp+bc) + pi = mpf_pi(wp+wp2) + d = mpf_div(mpf_pow(mpf_shift(pi, 1), s, wp2), pi, wp2) + return mpf_mul(a,mpf_mul(b,mpf_mul(c,d,wp),wp),prec,rnd) + + # Near pole + r = mpf_sub(fone, s, wp) + asign, aman, aexp, abc = mpf_abs(r) + pole_dist = -2*(aexp+abc) + if pole_dist > wp: + if alt: + return mpf_ln2(prec, rnd) + else: + q = mpf_neg(mpf_div(fone, r, wp)) + return mpf_add(q, mpf_euler(wp), prec, rnd) + else: + wp += max(0, pole_dist) + + t = MPZ_ZERO + #wp += 16 - (prec & 15) + # Use Borwein's algorithm + n = int(wp/2.54 + 5) + d = borwein_coefficients(n) + t = MPZ_ZERO + sf = to_fixed(s, wp) + ln2 = ln2_fixed(wp) + for k in xrange(n): + u = (-sf*log_int_fixed(k+1, wp, ln2)) >> wp + #esign, eman, eexp, ebc = mpf_exp(u, wp) + #offset = eexp + wp + #if offset >= 0: + # w = ((d[k] - d[n]) * eman) << offset + #else: + # w = ((d[k] - d[n]) * eman) >> (-offset) + eman = exp_fixed(u, wp, ln2) + w = (d[k] - d[n]) * eman + if k & 1: + t -= w + else: + t += w + t = t // (-d[n]) + t = from_man_exp(t, -wp, wp) + if alt: + return mpf_pos(t, prec, rnd) + else: + q = mpf_sub(fone, mpf_pow(ftwo, mpf_sub(fone, s, wp), wp), wp) + return mpf_div(t, q, prec, rnd) + +def mpc_zeta(s, prec, rnd=round_fast, alt=0, force=False): + re, im = s + if im == fzero: + return mpf_zeta(re, prec, rnd, alt), fzero + + # slow for large s + if (not force) and mpf_gt(mpc_abs(s, 10), from_int(prec)): + raise NotImplementedError + + wp = prec + 20 + + # Near pole + r = mpc_sub(mpc_one, s, wp) + asign, aman, aexp, abc = mpc_abs(r, 10) + pole_dist = -2*(aexp+abc) + if pole_dist > wp: + if alt: + q = mpf_ln2(wp) + y = mpf_mul(q, mpf_euler(wp), wp) + g = mpf_shift(mpf_mul(q, q, wp), -1) + g = mpf_sub(y, g) + z = mpc_mul_mpf(r, mpf_neg(g), wp) + z = mpc_add_mpf(z, q, wp) + return mpc_pos(z, prec, rnd) + else: + q = mpc_neg(mpc_div(mpc_one, r, wp)) + q = mpc_add_mpf(q, mpf_euler(wp), wp) + return mpc_pos(q, prec, rnd) + else: + wp += max(0, pole_dist) + + # Reflection formula. To be rigorous, we should reflect to the left of + # re = 1/2 (see comments for mpf_zeta), but this leads to unnecessary + # slowdown for interesting values of s + if mpf_lt(re, fzero): + # XXX: could use the separate refl. formula for Dirichlet eta + if alt: + q = mpc_sub(mpc_one, mpc_pow(mpc_two, mpc_sub(mpc_one, s, wp), + wp), wp) + return mpc_mul(mpc_zeta(s, wp), q, prec, rnd) + # XXX: -1 should be done exactly + y = mpc_sub(mpc_one, s, 10*wp) + a = mpc_gamma(y, wp) + b = mpc_zeta(y, wp) + c = mpc_sin_pi(mpc_shift(s, -1), wp) + rsign, rman, rexp, rbc = re + isign, iman, iexp, ibc = im + mag = max(rexp+rbc, iexp+ibc) + wp2 = wp + max(0, mag) + pi = mpf_pi(wp+wp2) + pi2 = (mpf_shift(pi, 1), fzero) + d = mpc_div_mpf(mpc_pow(pi2, s, wp2), pi, wp2) + return mpc_mul(a,mpc_mul(b,mpc_mul(c,d,wp),wp),prec,rnd) + n = int(wp/2.54 + 5) + n += int(0.9*abs(to_int(im))) + d = borwein_coefficients(n) + ref = to_fixed(re, wp) + imf = to_fixed(im, wp) + tre = MPZ_ZERO + tim = MPZ_ZERO + one = MPZ_ONE << wp + one_2wp = MPZ_ONE << (2*wp) + critical_line = re == fhalf + ln2 = ln2_fixed(wp) + pi2 = pi_fixed(wp-1) + wp2 = wp+wp + for k in xrange(n): + log = log_int_fixed(k+1, wp, ln2) + # A square root is much cheaper than an exp + if critical_line: + w = one_2wp // isqrt_fast((k+1) << wp2) + else: + w = exp_fixed((-ref*log) >> wp, wp) + if k & 1: + w *= (d[n] - d[k]) + else: + w *= (d[k] - d[n]) + wre, wim = cos_sin_fixed((-imf*log)>>wp, wp, pi2) + tre += (w * wre) >> wp + tim += (w * wim) >> wp + tre //= (-d[n]) + tim //= (-d[n]) + tre = from_man_exp(tre, -wp, wp) + tim = from_man_exp(tim, -wp, wp) + if alt: + return mpc_pos((tre, tim), prec, rnd) + else: + q = mpc_sub(mpc_one, mpc_pow(mpc_two, r, wp), wp) + return mpc_div((tre, tim), q, prec, rnd) + +def mpf_altzeta(s, prec, rnd=round_fast): + return mpf_zeta(s, prec, rnd, 1) + +def mpc_altzeta(s, prec, rnd=round_fast): + return mpc_zeta(s, prec, rnd, 1) + +# Not optimized currently +mpf_zetasum = None + + +def pow_fixed(x, n, wp): + if n == 1: + return x + y = MPZ_ONE << wp + while n: + if n & 1: + y = (y*x) >> wp + n -= 1 + x = (x*x) >> wp + n //= 2 + return y + +# TODO: optimize / cleanup interface / unify with list_primes +sieve_cache = [] +primes_cache = [] +mult_cache = [] + +def primesieve(n): + global sieve_cache, primes_cache, mult_cache + if n < len(sieve_cache): + sieve = sieve_cache#[:n+1] + primes = primes_cache[:primes_cache.index(max(sieve))+1] + mult = mult_cache#[:n+1] + return sieve, primes, mult + sieve = [0] * (n+1) + mult = [0] * (n+1) + primes = list_primes(n) + for p in primes: + #sieve[p::p] = p + for k in xrange(p,n+1,p): + sieve[k] = p + for i, p in enumerate(sieve): + if i >= 2: + m = 1 + n = i // p + while not n % p: + n //= p + m += 1 + mult[i] = m + sieve_cache = sieve + primes_cache = primes + mult_cache = mult + return sieve, primes, mult + +def zetasum_sieved(critical_line, sre, sim, a, n, wp): + if a < 1: + raise ValueError("a cannot be less than 1") + sieve, primes, mult = primesieve(a+n) + basic_powers = {} + one = MPZ_ONE << wp + one_2wp = MPZ_ONE << (2*wp) + wp2 = wp+wp + ln2 = ln2_fixed(wp) + pi2 = pi_fixed(wp-1) + for p in primes: + if p*2 > a+n: + break + log = log_int_fixed(p, wp, ln2) + cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2) + if critical_line: + u = one_2wp // isqrt_fast(p<>wp, wp) + pre = (u*cos) >> wp + pim = (u*sin) >> wp + basic_powers[p] = [(pre, pim)] + tre, tim = pre, pim + for m in range(1,int(math.log(a+n,p)+0.01)+1): + tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp) + basic_powers[p].append((tre,tim)) + xre = MPZ_ZERO + xim = MPZ_ZERO + if a == 1: + xre += one + aa = max(a,2) + for k in xrange(aa, a+n+1): + p = sieve[k] + if p in basic_powers: + m = mult[k] + tre, tim = basic_powers[p][m-1] + while 1: + k //= p**m + if k == 1: + break + p = sieve[k] + m = mult[k] + pre, pim = basic_powers[p][m-1] + tre, tim = ((pre*tre-pim*tim)>>wp), ((pim*tre+pre*tim)>>wp) + else: + log = log_int_fixed(k, wp, ln2) + cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2) + if critical_line: + u = one_2wp // isqrt_fast(k<>wp, wp) + tre = (u*cos) >> wp + tim = (u*sin) >> wp + xre += tre + xim += tim + return xre, xim + +# Set to something large to disable +ZETASUM_SIEVE_CUTOFF = 10 + +def mpc_zetasum(s, a, n, derivatives, reflect, prec): + """ + Fast version of mp._zetasum, assuming s = complex, a = integer. + """ + + wp = prec + 10 + derivatives = list(derivatives) + have_derivatives = derivatives != [0] + have_one_derivative = len(derivatives) == 1 + + # parse s + sre, sim = s + critical_line = (sre == fhalf) + sre = to_fixed(sre, wp) + sim = to_fixed(sim, wp) + + if a > 0 and n > ZETASUM_SIEVE_CUTOFF and not have_derivatives \ + and not reflect and (n < 4e7 or sys.maxsize > 2**32): + re, im = zetasum_sieved(critical_line, sre, sim, a, n, wp) + xs = [(from_man_exp(re, -wp, prec, 'n'), from_man_exp(im, -wp, prec, 'n'))] + return xs, [] + + maxd = max(derivatives) + if not have_one_derivative: + derivatives = range(maxd+1) + + # x_d = 0, y_d = 0 + xre = [MPZ_ZERO for d in derivatives] + xim = [MPZ_ZERO for d in derivatives] + if reflect: + yre = [MPZ_ZERO for d in derivatives] + yim = [MPZ_ZERO for d in derivatives] + else: + yre = yim = [] + + one = MPZ_ONE << wp + one_2wp = MPZ_ONE << (2*wp) + + ln2 = ln2_fixed(wp) + pi2 = pi_fixed(wp-1) + wp2 = wp+wp + + for w in xrange(a, a+n+1): + log = log_int_fixed(w, wp, ln2) + cos, sin = cos_sin_fixed((-sim*log)>>wp, wp, pi2) + if critical_line: + u = one_2wp // isqrt_fast(w<>wp, wp) + xterm_re = (u * cos) >> wp + xterm_im = (u * sin) >> wp + if reflect: + reciprocal = (one_2wp // (u*w)) + yterm_re = (reciprocal * cos) >> wp + yterm_im = (reciprocal * sin) >> wp + + if have_derivatives: + if have_one_derivative: + log = pow_fixed(log, maxd, wp) + xre[0] += (xterm_re * log) >> wp + xim[0] += (xterm_im * log) >> wp + if reflect: + yre[0] += (yterm_re * log) >> wp + yim[0] += (yterm_im * log) >> wp + else: + t = MPZ_ONE << wp + for d in derivatives: + xre[d] += (xterm_re * t) >> wp + xim[d] += (xterm_im * t) >> wp + if reflect: + yre[d] += (yterm_re * t) >> wp + yim[d] += (yterm_im * t) >> wp + t = (t * log) >> wp + else: + xre[0] += xterm_re + xim[0] += xterm_im + if reflect: + yre[0] += yterm_re + yim[0] += yterm_im + if have_derivatives: + if have_one_derivative: + if maxd % 2: + xre[0] = -xre[0] + xim[0] = -xim[0] + if reflect: + yre[0] = -yre[0] + yim[0] = -yim[0] + else: + xre = [(-1)**d * xre[d] for d in derivatives] + xim = [(-1)**d * xim[d] for d in derivatives] + if reflect: + yre = [(-1)**d * yre[d] for d in derivatives] + yim = [(-1)**d * yim[d] for d in derivatives] + xs = [(from_man_exp(xa, -wp, prec, 'n'), from_man_exp(xb, -wp, prec, 'n')) + for (xa, xb) in zip(xre, xim)] + ys = [(from_man_exp(ya, -wp, prec, 'n'), from_man_exp(yb, -wp, prec, 'n')) + for (ya, yb) in zip(yre, yim)] + return xs, ys + + +#-----------------------------------------------------------------------# +# # +# The gamma function (NEW IMPLEMENTATION) # +# # +#-----------------------------------------------------------------------# + +# Higher means faster, but more precomputation time +MAX_GAMMA_TAYLOR_PREC = 5000 +# Need to derive higher bounds for Taylor series to go higher +assert MAX_GAMMA_TAYLOR_PREC < 15000 + +# Use Stirling's series if abs(x) > beta*prec +# Important: must be large enough for convergence! +GAMMA_STIRLING_BETA = 0.2 + +SMALL_FACTORIAL_CACHE_SIZE = 150 + +gamma_taylor_cache = {} +gamma_stirling_cache = {} + +small_factorial_cache = [from_int(ifac(n)) for \ + n in range(SMALL_FACTORIAL_CACHE_SIZE+1)] + +def zeta_array(N, prec): + """ + zeta(n) = A * pi**n / n! + B + + where A is a rational number (A = Bernoulli number + for n even) and B is an infinite sum over powers of exp(2*pi). + (B = 0 for n even). + + TODO: this is currently only used for gamma, but could + be very useful elsewhere. + """ + extra = 30 + wp = prec+extra + zeta_values = [MPZ_ZERO] * (N+2) + pi = pi_fixed(wp) + # STEP 1: + one = MPZ_ONE << wp + zeta_values[0] = -one//2 + f_2pi = mpf_shift(mpf_pi(wp),1) + exp_2pi_k = exp_2pi = mpf_exp(f_2pi, wp) + # Compute exponential series + # Store values of 1/(exp(2*pi*k)-1), + # exp(2*pi*k)/(exp(2*pi*k)-1)**2, 1/(exp(2*pi*k)-1)**2 + # pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2 + exps3 = [] + k = 1 + while 1: + tp = wp - 9*k + if tp < 1: + break + # 1/(exp(2*pi*k-1) + q1 = mpf_div(fone, mpf_sub(exp_2pi_k, fone, tp), tp) + # pi*k*exp(2*pi*k)/(exp(2*pi*k)-1)**2 + q2 = mpf_mul(exp_2pi_k, mpf_mul(q1,q1,tp), tp) + q1 = to_fixed(q1, wp) + q2 = to_fixed(q2, wp) + q2 = (k * q2 * pi) >> wp + exps3.append((q1, q2)) + # Multiply for next round + exp_2pi_k = mpf_mul(exp_2pi_k, exp_2pi, wp) + k += 1 + # Exponential sum + for n in xrange(3, N+1, 2): + s = MPZ_ZERO + k = 1 + for e1, e2 in exps3: + if n%4 == 3: + t = e1 // k**n + else: + U = (n-1)//4 + t = (e1 + e2//U) // k**n + if not t: + break + s += t + k += 1 + zeta_values[n] = -2*s + # Even zeta values + B = [mpf_abs(mpf_bernoulli(k,wp)) for k in xrange(N+2)] + pi_pow = fpi = mpf_pow_int(mpf_shift(mpf_pi(wp), 1), 2, wp) + pi_pow = mpf_div(pi_pow, from_int(4), wp) + for n in xrange(2,N+2,2): + z = mpf_mul(B[n], pi_pow, wp) + zeta_values[n] = to_fixed(z, wp) + pi_pow = mpf_mul(pi_pow, fpi, wp) + pi_pow = mpf_div(pi_pow, from_int((n+1)*(n+2)), wp) + # Zeta sum + reciprocal_pi = (one << wp) // pi + for n in xrange(3, N+1, 4): + U = (n-3)//4 + s = zeta_values[4*U+4]*(4*U+7)//4 + for k in xrange(1, U+1): + s -= (zeta_values[4*k] * zeta_values[4*U+4-4*k]) >> wp + zeta_values[n] += (2*s*reciprocal_pi) >> wp + for n in xrange(5, N+1, 4): + U = (n-1)//4 + s = zeta_values[4*U+2]*(2*U+1) + for k in xrange(1, 2*U+1): + s += ((-1)**k*2*k* zeta_values[2*k] * zeta_values[4*U+2-2*k])>>wp + zeta_values[n] += ((s*reciprocal_pi)>>wp)//(2*U) + return [x>>extra for x in zeta_values] + +def gamma_taylor_coefficients(inprec): + """ + Gives the Taylor coefficients of 1/gamma(1+x) as + a list of fixed-point numbers. Enough coefficients are returned + to ensure that the series converges to the given precision + when x is in [0.5, 1.5]. + """ + # Reuse nearby cache values (small case) + if inprec < 400: + prec = inprec + (10-(inprec%10)) + elif inprec < 1000: + prec = inprec + (30-(inprec%30)) + else: + prec = inprec + if prec in gamma_taylor_cache: + return gamma_taylor_cache[prec], prec + + # Experimentally determined bounds + if prec < 1000: + N = int(prec**0.76 + 2) + else: + # Valid to at least 15000 bits + N = int(prec**0.787 + 2) + + # Reuse higher precision values + for cprec in gamma_taylor_cache: + if cprec > prec: + coeffs = [x>>(cprec-prec) for x in gamma_taylor_cache[cprec][-N:]] + if inprec < 1000: + gamma_taylor_cache[prec] = coeffs + return coeffs, prec + + # Cache at a higher precision (large case) + if prec > 1000: + prec = int(prec * 1.2) + + wp = prec + 20 + A = [0] * N + A[0] = MPZ_ZERO + A[1] = MPZ_ONE << wp + A[2] = euler_fixed(wp) + # SLOW, reference implementation + #zeta_values = [0,0]+[to_fixed(mpf_zeta_int(k,wp),wp) for k in xrange(2,N)] + zeta_values = zeta_array(N, wp) + for k in xrange(3, N): + a = (-A[2]*A[k-1])>>wp + for j in xrange(2,k): + a += ((-1)**j * zeta_values[j] * A[k-j]) >> wp + a //= (1-k) + A[k] = a + A = [a>>20 for a in A] + A = A[::-1] + A = A[:-1] + gamma_taylor_cache[prec] = A + #return A, prec + return gamma_taylor_coefficients(inprec) + +def gamma_fixed_taylor(xmpf, x, wp, prec, rnd, type): + # Determine nearest multiple of N/2 + #n = int(x >> (wp-1)) + #steps = (n-1)>>1 + nearest_int = ((x >> (wp-1)) + MPZ_ONE) >> 1 + one = MPZ_ONE << wp + coeffs, cwp = gamma_taylor_coefficients(wp) + if nearest_int > 0: + r = one + for i in xrange(nearest_int-1): + x -= one + r = (r*x) >> wp + x -= one + p = MPZ_ZERO + for c in coeffs: + p = c + ((x*p)>>wp) + p >>= (cwp-wp) + if type == 0: + return from_man_exp((r<> wp + x += one + p = MPZ_ZERO + for c in coeffs: + p = c + ((x*p)>>wp) + p >>= (cwp-wp) + if wp - bitcount(abs(x)) > 10: + # pass very close to 0, so do floating-point multiply + g = mpf_add(xmpf, from_int(-nearest_int)) # exact + r = from_man_exp(p*r,-wp-wp) + r = mpf_mul(r, g, wp) + if type == 0: + return mpf_div(fone, r, prec, rnd) + if type == 2: + return mpf_pos(r, prec, rnd) + if type == 3: + return mpf_log(mpf_abs(mpf_div(fone, r, wp)), prec, rnd) + else: + r = from_man_exp(x*p*r,-3*wp) + if type == 0: return mpf_div(fone, r, prec, rnd) + if type == 2: return mpf_pos(r, prec, rnd) + if type == 3: return mpf_neg(mpf_log(mpf_abs(r), prec, rnd)) + +def stirling_coefficient(n): + if n in gamma_stirling_cache: + return gamma_stirling_cache[n] + p, q = bernfrac(n) + q *= MPZ(n*(n-1)) + gamma_stirling_cache[n] = p, q, bitcount(abs(p)), bitcount(q) + return gamma_stirling_cache[n] + +def real_stirling_series(x, prec): + """ + Sums the rational part of Stirling's expansion, + + log(sqrt(2*pi)) - z + 1/(12*z) - 1/(360*z^3) + ... + + """ + t = (MPZ_ONE<<(prec+prec)) // x # t = 1/x + u = (t*t)>>prec # u = 1/x**2 + s = ln_sqrt2pi_fixed(prec) - x + # Add initial terms of Stirling's series + s += t//12; t = (t*u)>>prec + s -= t//360; t = (t*u)>>prec + s += t//1260; t = (t*u)>>prec + s -= t//1680; t = (t*u)>>prec + if not t: return s + s += t//1188; t = (t*u)>>prec + s -= 691*t//360360; t = (t*u)>>prec + s += t//156; t = (t*u)>>prec + if not t: return s + s -= 3617*t//122400; t = (t*u)>>prec + s += 43867*t//244188; t = (t*u)>>prec + s -= 174611*t//125400; t = (t*u)>>prec + if not t: return s + k = 22 + # From here on, the coefficients are growing, so we + # have to keep t at a roughly constant size + usize = bitcount(abs(u)) + tsize = bitcount(abs(t)) + texp = 0 + while 1: + p, q, pb, qb = stirling_coefficient(k) + term_mag = tsize + pb + texp + shift = -texp + m = pb - term_mag + if m > 0 and shift < m: + p >>= m + shift -= m + m = tsize - term_mag + if m > 0 and shift < m: + w = t >> m + shift -= m + else: + w = t + term = (t*p//q) >> shift + if not term: + break + s += term + t = (t*u) >> usize + texp -= (prec - usize) + k += 2 + return s + +def complex_stirling_series(x, y, prec): + # t = 1/z + _m = (x*x + y*y) >> prec + tre = (x << prec) // _m + tim = (-y << prec) // _m + # u = 1/z**2 + ure = (tre*tre - tim*tim) >> prec + uim = tim*tre >> (prec-1) + # s = log(sqrt(2*pi)) - z + sre = ln_sqrt2pi_fixed(prec) - x + sim = -y + + # Add initial terms of Stirling's series + sre += tre//12; sim += tim//12; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= tre//360; sim -= tim//360; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre += tre//1260; sim += tim//1260; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= tre//1680; sim -= tim//1680; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + if abs(tre) + abs(tim) < 5: return sre, sim + sre += tre//1188; sim += tim//1188; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= 691*tre//360360; sim -= 691*tim//360360; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre += tre//156; sim += tim//156; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + if abs(tre) + abs(tim) < 5: return sre, sim + sre -= 3617*tre//122400; sim -= 3617*tim//122400; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre += 43867*tre//244188; sim += 43867*tim//244188; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + sre -= 174611*tre//125400; sim -= 174611*tim//125400; + tre, tim = ((tre*ure-tim*uim)>>prec), ((tre*uim+tim*ure)>>prec) + if abs(tre) + abs(tim) < 5: return sre, sim + + k = 22 + # From here on, the coefficients are growing, so we + # have to keep t at a roughly constant size + usize = bitcount(max(abs(ure), abs(uim))) + tsize = bitcount(max(abs(tre), abs(tim))) + texp = 0 + while 1: + p, q, pb, qb = stirling_coefficient(k) + term_mag = tsize + pb + texp + shift = -texp + m = pb - term_mag + if m > 0 and shift < m: + p >>= m + shift -= m + m = tsize - term_mag + if m > 0 and shift < m: + wre = tre >> m + wim = tim >> m + shift -= m + else: + wre = tre + wim = tim + termre = (tre*p//q) >> shift + termim = (tim*p//q) >> shift + if abs(termre) + abs(termim) < 5: + break + sre += termre + sim += termim + tre, tim = ((tre*ure - tim*uim)>>usize), \ + ((tre*uim + tim*ure)>>usize) + texp -= (prec - usize) + k += 2 + return sre, sim + + +def mpf_gamma(x, prec, rnd='d', type=0): + """ + This function implements multipurpose evaluation of the gamma + function, G(x), as well as the following versions of the same: + + type = 0 -- G(x) [standard gamma function] + type = 1 -- G(x+1) = x*G(x+1) = x! [factorial] + type = 2 -- 1/G(x) [reciprocal gamma function] + type = 3 -- log(|G(x)|) [log-gamma function, real part] + """ + + # Specal values + sign, man, exp, bc = x + if not man: + if x == fzero: + if type == 1: return fone + if type == 2: return fzero + raise ValueError("gamma function pole") + if x == finf: + if type == 2: return fzero + return finf + return fnan + + # First of all, for log gamma, numbers can be well beyond the fixed-point + # range, so we must take care of huge numbers before e.g. trying + # to convert x to the nearest integer + if type == 3: + wp = prec+20 + if exp+bc > wp and not sign: + return mpf_sub(mpf_mul(x, mpf_log(x, wp), wp), x, prec, rnd) + + # We strongly want to special-case small integers + is_integer = exp >= 0 + if is_integer: + # Poles + if sign: + if type == 2: + return fzero + raise ValueError("gamma function pole") + # n = x + n = man << exp + if n < SMALL_FACTORIAL_CACHE_SIZE: + if type == 0: + return mpf_pos(small_factorial_cache[n-1], prec, rnd) + if type == 1: + return mpf_pos(small_factorial_cache[n], prec, rnd) + if type == 2: + return mpf_div(fone, small_factorial_cache[n-1], prec, rnd) + if type == 3: + return mpf_log(small_factorial_cache[n-1], prec, rnd) + else: + # floor(abs(x)) + n = int(man >> (-exp)) + + # Estimate size and precision + # Estimate log(gamma(|x|),2) as x*log(x,2) + mag = exp + bc + gamma_size = n*mag + + if type == 3: + wp = prec + 20 + else: + wp = prec + bitcount(gamma_size) + 20 + + # Very close to 0, pole + if mag < -wp: + if type == 0: + return mpf_sub(mpf_div(fone,x, wp),mpf_shift(fone,-wp),prec,rnd) + if type == 1: return mpf_sub(fone, x, prec, rnd) + if type == 2: return mpf_add(x, mpf_shift(fone,mag-wp), prec, rnd) + if type == 3: return mpf_neg(mpf_log(mpf_abs(x), prec, rnd)) + + # From now on, we assume having a gamma function + if type == 1: + return mpf_gamma(mpf_add(x, fone), prec, rnd, 0) + + # Special case integers (those not small enough to be caught above, + # but still small enough for an exact factorial to be faster + # than an approximate algorithm), and half-integers + if exp >= -1: + if is_integer: + if gamma_size < 10*wp: + if type == 0: + return from_int(ifac(n-1), prec, rnd) + if type == 2: + return from_rational(MPZ_ONE, ifac(n-1), prec, rnd) + if type == 3: + return mpf_log(from_int(ifac(n-1)), prec, rnd) + # half-integer + if n < 100 or gamma_size < 10*wp: + if sign: + w = sqrtpi_fixed(wp) + if n % 2: f = ifac2(2*n+1) + else: f = -ifac2(2*n+1) + if type == 0: + return mpf_shift(from_rational(w, f, prec, rnd), -wp+n+1) + if type == 2: + return mpf_shift(from_rational(f, w, prec, rnd), wp-n-1) + if type == 3: + return mpf_log(mpf_shift(from_rational(w, abs(f), + prec, rnd), -wp+n+1), prec, rnd) + elif n == 0: + if type == 0: return mpf_sqrtpi(prec, rnd) + if type == 2: return mpf_div(fone, mpf_sqrtpi(wp), prec, rnd) + if type == 3: return mpf_log(mpf_sqrtpi(wp), prec, rnd) + else: + w = sqrtpi_fixed(wp) + w = from_man_exp(w * ifac2(2*n-1), -wp-n) + if type == 0: return mpf_pos(w, prec, rnd) + if type == 2: return mpf_div(fone, w, prec, rnd) + if type == 3: return mpf_log(mpf_abs(w), prec, rnd) + + # Convert to fixed point + offset = exp + wp + if offset >= 0: absxman = man << offset + else: absxman = man >> (-offset) + + # For log gamma, provide accurate evaluation for x = 1+eps and 2+eps + if type == 3 and not sign: + one = MPZ_ONE << wp + one_dist = abs(absxman-one) + two_dist = abs(absxman-2*one) + cancellation = (wp - bitcount(min(one_dist, two_dist))) + if cancellation > 10: + xsub1 = mpf_sub(fone, x) + xsub2 = mpf_sub(ftwo, x) + xsub1mag = xsub1[2]+xsub1[3] + xsub2mag = xsub2[2]+xsub2[3] + if xsub1mag < -wp: + return mpf_mul(mpf_euler(wp), mpf_sub(fone, x), prec, rnd) + if xsub2mag < -wp: + return mpf_mul(mpf_sub(fone, mpf_euler(wp)), + mpf_sub(x, ftwo), prec, rnd) + # Proceed but increase precision + wp += max(-xsub1mag, -xsub2mag) + offset = exp + wp + if offset >= 0: absxman = man << offset + else: absxman = man >> (-offset) + + # Use Taylor series if appropriate + n_for_stirling = int(GAMMA_STIRLING_BETA*wp) + if n < max(100, n_for_stirling) and wp < MAX_GAMMA_TAYLOR_PREC: + if sign: + absxman = -absxman + return gamma_fixed_taylor(x, absxman, wp, prec, rnd, type) + + # Use Stirling's series + # First ensure that |x| is large enough for rapid convergence + xorig = x + + # Argument reduction + r = 0 + if n < n_for_stirling: + r = one = MPZ_ONE << wp + d = n_for_stirling - n + for k in xrange(d): + r = (r * absxman) >> wp + absxman += one + x = xabs = from_man_exp(absxman, -wp) + if sign: + x = mpf_neg(x) + else: + xabs = mpf_abs(x) + + # Asymptotic series + y = real_stirling_series(absxman, wp) + u = to_fixed(mpf_log(xabs, wp), wp) + u = ((absxman - (MPZ_ONE<<(wp-1))) * u) >> wp + y += u + w = from_man_exp(y, -wp) + + # Compute final value + if sign: + # Reflection formula + A = mpf_mul(mpf_sin_pi(xorig, wp), xorig, wp) + B = mpf_neg(mpf_pi(wp)) + if type == 0 or type == 2: + A = mpf_mul(A, mpf_exp(w, wp)) + if r: + B = mpf_mul(B, from_man_exp(r, -wp), wp) + if type == 0: + return mpf_div(B, A, prec, rnd) + if type == 2: + return mpf_div(A, B, prec, rnd) + if type == 3: + if r: + B = mpf_mul(B, from_man_exp(r, -wp), wp) + A = mpf_add(mpf_log(mpf_abs(A), wp), w, wp) + return mpf_sub(mpf_log(mpf_abs(B), wp), A, prec, rnd) + else: + if type == 0: + if r: + return mpf_div(mpf_exp(w, wp), + from_man_exp(r, -wp), prec, rnd) + return mpf_exp(w, prec, rnd) + if type == 2: + if r: + return mpf_div(from_man_exp(r, -wp), + mpf_exp(w, wp), prec, rnd) + return mpf_exp(mpf_neg(w), prec, rnd) + if type == 3: + if r: + return mpf_sub(w, mpf_log(from_man_exp(r,-wp), wp), prec, rnd) + return mpf_pos(w, prec, rnd) + + +def mpc_gamma(z, prec, rnd='d', type=0): + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + + if b == fzero: + # Imaginary part on negative half-axis for log-gamma function + if type == 3 and asign: + re = mpf_gamma(a, prec, rnd, 3) + n = (-aman) >> (-aexp) + im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd) + return re, im + return mpf_gamma(a, prec, rnd, type), fzero + + # Some kind of complex inf/nan + if (not aman and aexp) or (not bman and bexp): + return (fnan, fnan) + + # Initial working precision + wp = prec + 20 + + amag = aexp+abc + bmag = bexp+bbc + if aman: + mag = max(amag, bmag) + else: + mag = bmag + + # Close to 0 + if mag < -8: + if mag < -wp: + # 1/gamma(z) = z + euler*z^2 + O(z^3) + v = mpc_add(z, mpc_mul_mpf(mpc_mul(z,z,wp),mpf_euler(wp),wp), wp) + if type == 0: return mpc_reciprocal(v, prec, rnd) + if type == 1: return mpc_div(z, v, prec, rnd) + if type == 2: return mpc_pos(v, prec, rnd) + if type == 3: return mpc_log(mpc_reciprocal(v, prec), prec, rnd) + elif type != 1: + wp += (-mag) + + # Handle huge log-gamma values; must do this before converting to + # a fixed-point value. TODO: determine a precise cutoff of validity + # depending on amag and bmag + if type == 3 and mag > wp and ((not asign) or (bmag >= amag)): + return mpc_sub(mpc_mul(z, mpc_log(z, wp), wp), z, prec, rnd) + + # From now on, we assume having a gamma function + if type == 1: + return mpc_gamma((mpf_add(a, fone), b), prec, rnd, 0) + + an = abs(to_int(a)) + bn = abs(to_int(b)) + absn = max(an, bn) + gamma_size = absn*mag + if type == 3: + pass + else: + wp += bitcount(gamma_size) + + # Reflect to the right half-plane. Note that Stirling's expansion + # is valid in the left half-plane too, as long as we're not too close + # to the real axis, but in order to use this argument reduction + # in the negative direction must be implemented. + #need_reflection = asign and ((bmag < 0) or (amag-bmag > 4)) + need_reflection = asign + zorig = z + if need_reflection: + z = mpc_neg(z) + asign, aman, aexp, abc = a = z[0] + bsign, bman, bexp, bbc = b = z[1] + + # Imaginary part very small compared to real one? + yfinal = 0 + balance_prec = 0 + if bmag < -10: + # Check z ~= 1 and z ~= 2 for loggamma + if type == 3: + zsub1 = mpc_sub_mpf(z, fone) + if zsub1[0] == fzero: + cancel1 = -bmag + else: + cancel1 = -max(zsub1[0][2]+zsub1[0][3], bmag) + if cancel1 > wp: + pi = mpf_pi(wp) + x = mpc_mul_mpf(zsub1, pi, wp) + x = mpc_mul(x, x, wp) + x = mpc_div_mpf(x, from_int(12), wp) + y = mpc_mul_mpf(zsub1, mpf_neg(mpf_euler(wp)), wp) + yfinal = mpc_add(x, y, wp) + if not need_reflection: + return mpc_pos(yfinal, prec, rnd) + elif cancel1 > 0: + wp += cancel1 + zsub2 = mpc_sub_mpf(z, ftwo) + if zsub2[0] == fzero: + cancel2 = -bmag + else: + cancel2 = -max(zsub2[0][2]+zsub2[0][3], bmag) + if cancel2 > wp: + pi = mpf_pi(wp) + t = mpf_sub(mpf_mul(pi, pi), from_int(6)) + x = mpc_mul_mpf(mpc_mul(zsub2, zsub2, wp), t, wp) + x = mpc_div_mpf(x, from_int(12), wp) + y = mpc_mul_mpf(zsub2, mpf_sub(fone, mpf_euler(wp)), wp) + yfinal = mpc_add(x, y, wp) + if not need_reflection: + return mpc_pos(yfinal, prec, rnd) + elif cancel2 > 0: + wp += cancel2 + if bmag < -wp: + # Compute directly from the real gamma function. + pp = 2*(wp+10) + aabs = mpf_abs(a) + eps = mpf_shift(fone, amag-wp) + x1 = mpf_gamma(aabs, pp, type=type) + x2 = mpf_gamma(mpf_add(aabs, eps), pp, type=type) + xprime = mpf_div(mpf_sub(x2, x1, pp), eps, pp) + y = mpf_mul(b, xprime, prec, rnd) + yfinal = (x1, y) + # Note: we still need to use the reflection formula for + # near-poles, and the correct branch of the log-gamma function + if not need_reflection: + return mpc_pos(yfinal, prec, rnd) + else: + balance_prec += (-bmag) + + wp += balance_prec + n_for_stirling = int(GAMMA_STIRLING_BETA*wp) + need_reduction = absn < n_for_stirling + + afix = to_fixed(a, wp) + bfix = to_fixed(b, wp) + + r = 0 + if not yfinal: + zprered = z + # Argument reduction + if absn < n_for_stirling: + absn = complex(an, bn) + d = int((1 + n_for_stirling**2 - bn**2)**0.5 - an) + rre = one = MPZ_ONE << wp + rim = MPZ_ZERO + for k in xrange(d): + rre, rim = ((afix*rre-bfix*rim)>>wp), ((afix*rim + bfix*rre)>>wp) + afix += one + r = from_man_exp(rre, -wp), from_man_exp(rim, -wp) + a = from_man_exp(afix, -wp) + z = a, b + + yre, yim = complex_stirling_series(afix, bfix, wp) + # (z-1/2)*log(z) + S + lre, lim = mpc_log(z, wp) + lre = to_fixed(lre, wp) + lim = to_fixed(lim, wp) + yre = ((lre*afix - lim*bfix)>>wp) - (lre>>1) + yre + yim = ((lre*bfix + lim*afix)>>wp) - (lim>>1) + yim + y = from_man_exp(yre, -wp), from_man_exp(yim, -wp) + + if r and type == 3: + # If re(z) > 0 and abs(z) <= 4, the branches of loggamma(z) + # and log(gamma(z)) coincide. Otherwise, use the zeroth order + # Stirling expansion to compute the correct imaginary part. + y = mpc_sub(y, mpc_log(r, wp), wp) + zfa = to_float(zprered[0]) + zfb = to_float(zprered[1]) + zfabs = math.hypot(zfa,zfb) + #if not (zfa > 0.0 and zfabs <= 4): + yfb = to_float(y[1]) + u = math.atan2(zfb, zfa) + if zfabs <= 0.5: + gi = 0.577216*zfb - u + else: + gi = -zfb - 0.5*u + zfa*u + zfb*math.log(zfabs) + n = int(math.floor((gi-yfb)/(2*math.pi)+0.5)) + y = (y[0], mpf_add(y[1], mpf_mul_int(mpf_pi(wp), 2*n, wp), wp)) + + if need_reflection: + if type == 0 or type == 2: + A = mpc_mul(mpc_sin_pi(zorig, wp), zorig, wp) + B = (mpf_neg(mpf_pi(wp)), fzero) + if yfinal: + if type == 2: + A = mpc_div(A, yfinal, wp) + else: + A = mpc_mul(A, yfinal, wp) + else: + A = mpc_mul(A, mpc_exp(y, wp), wp) + if r: + B = mpc_mul(B, r, wp) + if type == 0: return mpc_div(B, A, prec, rnd) + if type == 2: return mpc_div(A, B, prec, rnd) + + # Reflection formula for the log-gamma function with correct branch + # http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0006/ + # LogGamma[z] == -LogGamma[-z] - Log[-z] + + # Sign[Im[z]] Floor[Re[z]] Pi I + Log[Pi] - + # Log[Sin[Pi (z - Floor[Re[z]])]] - + # Pi I (1 - Abs[Sign[Im[z]]]) Abs[Floor[Re[z]]] + if type == 3: + if yfinal: + s1 = mpc_neg(yfinal) + else: + s1 = mpc_neg(y) + # s -= log(-z) + s1 = mpc_sub(s1, mpc_log(mpc_neg(zorig), wp), wp) + # floor(re(z)) + rezfloor = mpf_floor(zorig[0]) + imzsign = mpf_sign(zorig[1]) + pi = mpf_pi(wp) + t = mpf_mul(pi, rezfloor) + t = mpf_mul_int(t, imzsign, wp) + s1 = (s1[0], mpf_add(s1[1], t, wp)) + s1 = mpc_add_mpf(s1, mpf_log(pi, wp), wp) + t = mpc_sin_pi(mpc_sub_mpf(zorig, rezfloor), wp) + t = mpc_log(t, wp) + s1 = mpc_sub(s1, t, wp) + # Note: may actually be unused, because we fall back + # to the mpf_ function for real arguments + if not imzsign: + t = mpf_mul(pi, mpf_floor(rezfloor), wp) + s1 = (s1[0], mpf_sub(s1[1], t, wp)) + return mpc_pos(s1, prec, rnd) + else: + if type == 0: + if r: + return mpc_div(mpc_exp(y, wp), r, prec, rnd) + return mpc_exp(y, prec, rnd) + if type == 2: + if r: + return mpc_div(r, mpc_exp(y, wp), prec, rnd) + return mpc_exp(mpc_neg(y), prec, rnd) + if type == 3: + return mpc_pos(y, prec, rnd) + +def mpf_factorial(x, prec, rnd='d'): + return mpf_gamma(x, prec, rnd, 1) + +def mpc_factorial(x, prec, rnd='d'): + return mpc_gamma(x, prec, rnd, 1) + +def mpf_rgamma(x, prec, rnd='d'): + return mpf_gamma(x, prec, rnd, 2) + +def mpc_rgamma(x, prec, rnd='d'): + return mpc_gamma(x, prec, rnd, 2) + +def mpf_loggamma(x, prec, rnd='d'): + sign, man, exp, bc = x + if sign: + raise ComplexResult + return mpf_gamma(x, prec, rnd, 3) + +def mpc_loggamma(z, prec, rnd='d'): + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if b == fzero and asign: + re = mpf_gamma(a, prec, rnd, 3) + n = (-aman) >> (-aexp) + im = mpf_mul_int(mpf_pi(prec+10), n, prec, rnd) + return re, im + return mpc_gamma(z, prec, rnd, 3) + +def mpf_gamma_int(n, prec, rnd=round_fast): + if n < SMALL_FACTORIAL_CACHE_SIZE: + return mpf_pos(small_factorial_cache[n-1], prec, rnd) + return mpf_gamma(from_int(n), prec, rnd) diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/libhyper.py b/.venv/lib/python3.11/site-packages/mpmath/libmp/libhyper.py new file mode 100644 index 0000000000000000000000000000000000000000..04f52d59710be77819066aea5c1cf4b0883f72d7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/libmp/libhyper.py @@ -0,0 +1,1150 @@ +""" +This module implements computation of hypergeometric and related +functions. In particular, it provides code for generic summation +of hypergeometric series. Optimized versions for various special +cases are also provided. +""" + +import operator +import math + +from .backend import MPZ_ZERO, MPZ_ONE, BACKEND, xrange, exec_ + +from .libintmath import gcd + +from .libmpf import (\ + ComplexResult, round_fast, round_nearest, + negative_rnd, bitcount, to_fixed, from_man_exp, from_int, to_int, + from_rational, + fzero, fone, fnone, ftwo, finf, fninf, fnan, + mpf_sign, mpf_add, mpf_abs, mpf_pos, + mpf_cmp, mpf_lt, mpf_le, mpf_gt, mpf_min_max, + mpf_perturb, mpf_neg, mpf_shift, mpf_sub, mpf_mul, mpf_div, + sqrt_fixed, mpf_sqrt, mpf_rdiv_int, mpf_pow_int, + to_rational, +) + +from .libelefun import (\ + mpf_pi, mpf_exp, mpf_log, pi_fixed, mpf_cos_sin, mpf_cos, mpf_sin, + mpf_sqrt, agm_fixed, +) + +from .libmpc import (\ + mpc_one, mpc_sub, mpc_mul_mpf, mpc_mul, mpc_neg, complex_int_pow, + mpc_div, mpc_add_mpf, mpc_sub_mpf, + mpc_log, mpc_add, mpc_pos, mpc_shift, + mpc_is_infnan, mpc_zero, mpc_sqrt, mpc_abs, + mpc_mpf_div, mpc_square, mpc_exp +) + +from .libintmath import ifac +from .gammazeta import mpf_gamma_int, mpf_euler, euler_fixed + +class NoConvergence(Exception): + pass + + +#-----------------------------------------------------------------------# +# # +# Generic hypergeometric series # +# # +#-----------------------------------------------------------------------# + +""" +TODO: + +1. proper mpq parsing +2. imaginary z special-cased (also: rational, integer?) +3. more clever handling of series that don't converge because of stupid + upwards rounding +4. checking for cancellation + +""" + +def make_hyp_summator(key): + """ + Returns a function that sums a generalized hypergeometric series, + for given parameter types (integer, rational, real, complex). + + """ + p, q, param_types, ztype = key + + pstring = "".join(param_types) + fname = "hypsum_%i_%i_%s_%s_%s" % (p, q, pstring[:p], pstring[p:], ztype) + #print "generating hypsum", fname + + have_complex_param = 'C' in param_types + have_complex_arg = ztype == 'C' + have_complex = have_complex_param or have_complex_arg + + source = [] + add = source.append + + aint = [] + arat = [] + bint = [] + brat = [] + areal = [] + breal = [] + acomplex = [] + bcomplex = [] + + #add("wp = prec + 40") + add("MAX = kwargs.get('maxterms', wp*100)") + add("HIGH = MPZ_ONE<= 0:") + add(" ZRE = xm << offset") + add("else:") + add(" ZRE = xm >> (-offset)") + if have_complex_arg: + add("offset = ye + wp") + add("if offset >= 0:") + add(" ZIM = ym << offset") + add("else:") + add(" ZIM = ym >> (-offset)") + + for i, flag in enumerate(param_types): + W = ["A", "B"][i >= p] + if flag == 'Z': + ([aint,bint][i >= p]).append(i) + add("%sINT_%i = coeffs[%i]" % (W, i, i)) + elif flag == 'Q': + ([arat,brat][i >= p]).append(i) + add("%sP_%i, %sQ_%i = coeffs[%i]._mpq_" % (W, i, W, i, i)) + elif flag == 'R': + ([areal,breal][i >= p]).append(i) + add("xsign, xm, xe, xbc = coeffs[%i]._mpf_" % i) + add("if xsign: xm = -xm") + add("offset = xe + wp") + add("if offset >= 0:") + add(" %sREAL_%i = xm << offset" % (W, i)) + add("else:") + add(" %sREAL_%i = xm >> (-offset)" % (W, i)) + elif flag == 'C': + ([acomplex,bcomplex][i >= p]).append(i) + add("__re, __im = coeffs[%i]._mpc_" % i) + add("xsign, xm, xe, xbc = __re") + add("if xsign: xm = -xm") + add("ysign, ym, ye, ybc = __im") + add("if ysign: ym = -ym") + + add("offset = xe + wp") + add("if offset >= 0:") + add(" %sCRE_%i = xm << offset" % (W, i)) + add("else:") + add(" %sCRE_%i = xm >> (-offset)" % (W, i)) + add("offset = ye + wp") + add("if offset >= 0:") + add(" %sCIM_%i = ym << offset" % (W, i)) + add("else:") + add(" %sCIM_%i = ym >> (-offset)" % (W, i)) + else: + raise ValueError + + l_areal = len(areal) + l_breal = len(breal) + cancellable_real = min(l_areal, l_breal) + noncancellable_real_num = areal[cancellable_real:] + noncancellable_real_den = breal[cancellable_real:] + + # LOOP + add("for n in xrange(1,10**8):") + + add(" if n in magnitude_check:") + add(" p_mag = bitcount(abs(PRE))") + if have_complex: + add(" p_mag = max(p_mag, bitcount(abs(PIM)))") + add(" magnitude_check[n] = wp-p_mag") + + # Real factors + multiplier = " * ".join(["AINT_#".replace("#", str(i)) for i in aint] + \ + ["AP_#".replace("#", str(i)) for i in arat] + \ + ["BQ_#".replace("#", str(i)) for i in brat]) + + divisor = " * ".join(["BINT_#".replace("#", str(i)) for i in bint] + \ + ["BP_#".replace("#", str(i)) for i in brat] + \ + ["AQ_#".replace("#", str(i)) for i in arat] + ["n"]) + + if multiplier: + add(" mul = " + multiplier) + add(" div = " + divisor) + + # Check for singular terms + add(" if not div:") + if multiplier: + add(" if not mul:") + add(" break") + add(" raise ZeroDivisionError") + + # Update product + if have_complex: + + # TODO: when there are several real parameters and just a few complex + # (maybe just the complex argument), we only need to do about + # half as many ops if we accumulate the real factor in a single real variable + for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k])) + for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i))) + for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i))) + for k in range(cancellable_real): add(" PIM = PIM * AREAL_%i // BREAL_%i" % (areal[k], breal[k])) + for i in noncancellable_real_num: add(" PIM = (PIM * AREAL_#) >> wp".replace("#", str(i))) + for i in noncancellable_real_den: add(" PIM = (PIM << wp) // BREAL_#".replace("#", str(i))) + + if multiplier: + if have_complex_arg: + add(" PRE, PIM = (mul*(PRE*ZRE-PIM*ZIM))//div, (mul*(PIM*ZRE+PRE*ZIM))//div") + add(" PRE >>= wp") + add(" PIM >>= wp") + else: + add(" PRE = ((mul * PRE * ZRE) >> wp) // div") + add(" PIM = ((mul * PIM * ZRE) >> wp) // div") + else: + if have_complex_arg: + add(" PRE, PIM = (PRE*ZRE-PIM*ZIM)//div, (PIM*ZRE+PRE*ZIM)//div") + add(" PRE >>= wp") + add(" PIM >>= wp") + else: + add(" PRE = ((PRE * ZRE) >> wp) // div") + add(" PIM = ((PIM * ZRE) >> wp) // div") + + for i in acomplex: + add(" PRE, PIM = PRE*ACRE_#-PIM*ACIM_#, PIM*ACRE_#+PRE*ACIM_#".replace("#", str(i))) + add(" PRE >>= wp") + add(" PIM >>= wp") + + for i in bcomplex: + add(" mag = BCRE_#*BCRE_#+BCIM_#*BCIM_#".replace("#", str(i))) + add(" re = PRE*BCRE_# + PIM*BCIM_#".replace("#", str(i))) + add(" im = PIM*BCRE_# - PRE*BCIM_#".replace("#", str(i))) + add(" PRE = (re << wp) // mag".replace("#", str(i))) + add(" PIM = (im << wp) // mag".replace("#", str(i))) + + else: + for k in range(cancellable_real): add(" PRE = PRE * AREAL_%i // BREAL_%i" % (areal[k], breal[k])) + for i in noncancellable_real_num: add(" PRE = (PRE * AREAL_#) >> wp".replace("#", str(i))) + for i in noncancellable_real_den: add(" PRE = (PRE << wp) // BREAL_#".replace("#", str(i))) + if multiplier: + add(" PRE = ((PRE * mul * ZRE) >> wp) // div") + else: + add(" PRE = ((PRE * ZRE) >> wp) // div") + + # Add product to sum + if have_complex: + add(" SRE += PRE") + add(" SIM += PIM") + add(" if (HIGH > PRE > LOW) and (HIGH > PIM > LOW):") + add(" break") + else: + add(" SRE += PRE") + add(" if HIGH > PRE > LOW:") + add(" break") + + #add(" from mpmath import nprint, log, ldexp") + #add(" nprint([n, log(abs(PRE),2), ldexp(PRE,-wp)])") + + add(" if n > MAX:") + add(" raise NoConvergence('Hypergeometric series converges too slowly. Try increasing maxterms.')") + + # +1 all parameters for next loop + for i in aint: add(" AINT_# += 1".replace("#", str(i))) + for i in bint: add(" BINT_# += 1".replace("#", str(i))) + for i in arat: add(" AP_# += AQ_#".replace("#", str(i))) + for i in brat: add(" BP_# += BQ_#".replace("#", str(i))) + for i in areal: add(" AREAL_# += one".replace("#", str(i))) + for i in breal: add(" BREAL_# += one".replace("#", str(i))) + for i in acomplex: add(" ACRE_# += one".replace("#", str(i))) + for i in bcomplex: add(" BCRE_# += one".replace("#", str(i))) + + if have_complex: + add("a = from_man_exp(SRE, -wp, prec, 'n')") + add("b = from_man_exp(SIM, -wp, prec, 'n')") + + add("if SRE:") + add(" if SIM:") + add(" magn = max(a[2]+a[3], b[2]+b[3])") + add(" else:") + add(" magn = a[2]+a[3]") + add("elif SIM:") + add(" magn = b[2]+b[3]") + add("else:") + add(" magn = -wp+1") + + add("return (a, b), True, magn") + else: + add("a = from_man_exp(SRE, -wp, prec, 'n')") + + add("if SRE:") + add(" magn = a[2]+a[3]") + add("else:") + add(" magn = -wp+1") + + add("return a, False, magn") + + source = "\n".join((" " + line) for line in source) + source = ("def %s(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs):\n" % fname) + source + + namespace = {} + + exec_(source, globals(), namespace) + + #print source + return source, namespace[fname] + + +if BACKEND == 'sage': + + def make_hyp_summator(key): + """ + Returns a function that sums a generalized hypergeometric series, + for given parameter types (integer, rational, real, complex). + """ + from sage.libs.mpmath.ext_main import hypsum_internal + p, q, param_types, ztype = key + def _hypsum(coeffs, z, prec, wp, epsshift, magnitude_check, **kwargs): + return hypsum_internal(p, q, param_types, ztype, coeffs, z, + prec, wp, epsshift, magnitude_check, kwargs) + + return "(none)", _hypsum + + +#-----------------------------------------------------------------------# +# # +# Error functions # +# # +#-----------------------------------------------------------------------# + +# TODO: mpf_erf should call mpf_erfc when appropriate (currently +# only the converse delegation is implemented) + +def mpf_erf(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if not man: + if x == fzero: return fzero + if x == finf: return fone + if x== fninf: return fnone + return fnan + size = exp + bc + lg = math.log + # The approximation erf(x) = 1 is accurate to > x^2 * log(e,2) bits + if size > 3 and 2*(size-1) + 0.528766 > lg(prec,2): + if sign: + return mpf_perturb(fnone, 0, prec, rnd) + else: + return mpf_perturb(fone, 1, prec, rnd) + # erf(x) ~ 2*x/sqrt(pi) close to 0 + if size < -prec: + # 2*x + x = mpf_shift(x,1) + c = mpf_sqrt(mpf_pi(prec+20), prec+20) + # TODO: interval rounding + return mpf_div(x, c, prec, rnd) + wp = prec + abs(size) + 25 + # Taylor series for erf, fixed-point summation + t = abs(to_fixed(x, wp)) + t2 = (t*t) >> wp + s, term, k = t, 12345, 1 + while term: + t = ((t * t2) >> wp) // k + term = t // (2*k+1) + if k & 1: + s -= term + else: + s += term + k += 1 + s = (s << (wp+1)) // sqrt_fixed(pi_fixed(wp), wp) + if sign: + s = -s + return from_man_exp(s, -wp, prec, rnd) + +# If possible, we use the asymptotic series for erfc. +# This is an alternating divergent asymptotic series, so +# the error is at most equal to the first omitted term. +# Here we check if the smallest term is small enough +# for a given x and precision +def erfc_check_series(x, prec): + n = to_int(x) + if n**2 * 1.44 > prec: + return True + return False + +def mpf_erfc(x, prec, rnd=round_fast): + sign, man, exp, bc = x + if not man: + if x == fzero: return fone + if x == finf: return fzero + if x == fninf: return ftwo + return fnan + wp = prec + 20 + mag = bc+exp + # Preserve full accuracy when exponent grows huge + wp += max(0, 2*mag) + regular_erf = sign or mag < 2 + if regular_erf or not erfc_check_series(x, wp): + if regular_erf: + return mpf_sub(fone, mpf_erf(x, prec+10, negative_rnd[rnd]), prec, rnd) + # 1-erf(x) ~ exp(-x^2), increase prec to deal with cancellation + n = to_int(x)+1 + return mpf_sub(fone, mpf_erf(x, prec + int(n**2*1.44) + 10), prec, rnd) + s = term = MPZ_ONE << wp + term_prev = 0 + t = (2 * to_fixed(x, wp) ** 2) >> wp + k = 1 + while 1: + term = ((term * (2*k - 1)) << wp) // t + if k > 4 and term > term_prev or not term: + break + if k & 1: + s -= term + else: + s += term + term_prev = term + #print k, to_str(from_man_exp(term, -wp, 50), 10) + k += 1 + s = (s << wp) // sqrt_fixed(pi_fixed(wp), wp) + s = from_man_exp(s, -wp, wp) + z = mpf_exp(mpf_neg(mpf_mul(x,x,wp),wp),wp) + y = mpf_div(mpf_mul(z, s, wp), x, prec, rnd) + return y + + +#-----------------------------------------------------------------------# +# # +# Exponential integrals # +# # +#-----------------------------------------------------------------------# + +def ei_taylor(x, prec): + s = t = x + k = 2 + while t: + t = ((t*x) >> prec) // k + s += t // k + k += 1 + return s + +def complex_ei_taylor(zre, zim, prec): + _abs = abs + sre = tre = zre + sim = tim = zim + k = 2 + while _abs(tre) + _abs(tim) > 5: + tre, tim = ((tre*zre-tim*zim)//k)>>prec, ((tre*zim+tim*zre)//k)>>prec + sre += tre // k + sim += tim // k + k += 1 + return sre, sim + +def ei_asymptotic(x, prec): + one = MPZ_ONE << prec + x = t = ((one << prec) // x) + s = one + x + k = 2 + while t: + t = (k*t*x) >> prec + s += t + k += 1 + return s + +def complex_ei_asymptotic(zre, zim, prec): + _abs = abs + one = MPZ_ONE << prec + M = (zim*zim + zre*zre) >> prec + # 1 / z + xre = tre = (zre << prec) // M + xim = tim = ((-zim) << prec) // M + sre = one + xre + sim = xim + k = 2 + while _abs(tre) + _abs(tim) > 1000: + #print tre, tim + tre, tim = ((tre*xre-tim*xim)*k)>>prec, ((tre*xim+tim*xre)*k)>>prec + sre += tre + sim += tim + k += 1 + if k > prec: + raise NoConvergence + return sre, sim + +def mpf_ei(x, prec, rnd=round_fast, e1=False): + if e1: + x = mpf_neg(x) + sign, man, exp, bc = x + if e1 and not sign: + if x == fzero: + return finf + raise ComplexResult("E1(x) for x < 0") + if man: + xabs = 0, man, exp, bc + xmag = exp+bc + wp = prec + 20 + can_use_asymp = xmag > wp + if not can_use_asymp: + if exp >= 0: + xabsint = man << exp + else: + xabsint = man >> (-exp) + can_use_asymp = xabsint > int(wp*0.693) + 10 + if can_use_asymp: + if xmag > wp: + v = fone + else: + v = from_man_exp(ei_asymptotic(to_fixed(x, wp), wp), -wp) + v = mpf_mul(v, mpf_exp(x, wp), wp) + v = mpf_div(v, x, prec, rnd) + else: + wp += 2*int(to_int(xabs)) + u = to_fixed(x, wp) + v = ei_taylor(u, wp) + euler_fixed(wp) + t1 = from_man_exp(v,-wp) + t2 = mpf_log(xabs,wp) + v = mpf_add(t1, t2, prec, rnd) + else: + if x == fzero: v = fninf + elif x == finf: v = finf + elif x == fninf: v = fzero + else: v = fnan + if e1: + v = mpf_neg(v) + return v + +def mpc_ei(z, prec, rnd=round_fast, e1=False): + if e1: + z = mpc_neg(z) + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if b == fzero: + if e1: + x = mpf_neg(mpf_ei(a, prec, rnd)) + if not asign: + y = mpf_neg(mpf_pi(prec, rnd)) + else: + y = fzero + return x, y + else: + return mpf_ei(a, prec, rnd), fzero + if a != fzero: + if not aman or not bman: + return (fnan, fnan) + wp = prec + 40 + amag = aexp+abc + bmag = bexp+bbc + zmag = max(amag, bmag) + can_use_asymp = zmag > wp + if not can_use_asymp: + zabsint = abs(to_int(a)) + abs(to_int(b)) + can_use_asymp = zabsint > int(wp*0.693) + 20 + try: + if can_use_asymp: + if zmag > wp: + v = fone, fzero + else: + zre = to_fixed(a, wp) + zim = to_fixed(b, wp) + vre, vim = complex_ei_asymptotic(zre, zim, wp) + v = from_man_exp(vre, -wp), from_man_exp(vim, -wp) + v = mpc_mul(v, mpc_exp(z, wp), wp) + v = mpc_div(v, z, wp) + if e1: + v = mpc_neg(v, prec, rnd) + else: + x, y = v + if bsign: + v = mpf_pos(x, prec, rnd), mpf_sub(y, mpf_pi(wp), prec, rnd) + else: + v = mpf_pos(x, prec, rnd), mpf_add(y, mpf_pi(wp), prec, rnd) + return v + except NoConvergence: + pass + #wp += 2*max(0,zmag) + wp += 2*int(to_int(mpc_abs(z, 5))) + zre = to_fixed(a, wp) + zim = to_fixed(b, wp) + vre, vim = complex_ei_taylor(zre, zim, wp) + vre += euler_fixed(wp) + v = from_man_exp(vre,-wp), from_man_exp(vim,-wp) + if e1: + u = mpc_log(mpc_neg(z),wp) + else: + u = mpc_log(z,wp) + v = mpc_add(v, u, prec, rnd) + if e1: + v = mpc_neg(v) + return v + +def mpf_e1(x, prec, rnd=round_fast): + return mpf_ei(x, prec, rnd, True) + +def mpc_e1(x, prec, rnd=round_fast): + return mpc_ei(x, prec, rnd, True) + +def mpf_expint(n, x, prec, rnd=round_fast, gamma=False): + """ + E_n(x), n an integer, x real + + With gamma=True, computes Gamma(n,x) (upper incomplete gamma function) + + Returns (real, None) if real, otherwise (real, imag) + The imaginary part is an optional branch cut term + + """ + sign, man, exp, bc = x + if not man: + if gamma: + if x == fzero: + # Actually gamma function pole + if n <= 0: + return finf, None + return mpf_gamma_int(n, prec, rnd), None + if x == finf: + return fzero, None + # TODO: could return finite imaginary value at -inf + return fnan, fnan + else: + if x == fzero: + if n > 1: + return from_rational(1, n-1, prec, rnd), None + else: + return finf, None + if x == finf: + return fzero, None + return fnan, fnan + n_orig = n + if gamma: + n = 1-n + wp = prec + 20 + xmag = exp + bc + # Beware of near-poles + if xmag < -10: + raise NotImplementedError + nmag = bitcount(abs(n)) + have_imag = n > 0 and sign + negx = mpf_neg(x) + # Skip series if direct convergence + if n == 0 or 2*nmag - xmag < -wp: + if gamma: + v = mpf_exp(negx, wp) + re = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), prec, rnd) + else: + v = mpf_exp(negx, wp) + re = mpf_div(v, x, prec, rnd) + else: + # Finite number of terms, or... + can_use_asymptotic_series = -3*wp < n <= 0 + # ...large enough? + if not can_use_asymptotic_series: + xi = abs(to_int(x)) + m = min(max(1, xi-n), 2*wp) + siz = -n*nmag + (m+n)*bitcount(abs(m+n)) - m*xmag - (144*m//100) + tol = -wp-10 + can_use_asymptotic_series = siz < tol + if can_use_asymptotic_series: + r = ((-MPZ_ONE) << (wp+wp)) // to_fixed(x, wp) + m = n + t = r*m + s = MPZ_ONE << wp + while m and t: + s += t + m += 1 + t = (m*r*t) >> wp + v = mpf_exp(negx, wp) + if gamma: + # ~ exp(-x) * x^(n-1) * (1 + ...) + v = mpf_mul(v, mpf_pow_int(x, n_orig-1, wp), wp) + else: + # ~ exp(-x)/x * (1 + ...) + v = mpf_div(v, x, wp) + re = mpf_mul(v, from_man_exp(s, -wp), prec, rnd) + elif n == 1: + re = mpf_neg(mpf_ei(negx, prec, rnd)) + elif n > 0 and n < 3*wp: + T1 = mpf_neg(mpf_ei(negx, wp)) + if gamma: + if n_orig & 1: + T1 = mpf_neg(T1) + else: + T1 = mpf_mul(T1, mpf_pow_int(negx, n-1, wp), wp) + r = t = to_fixed(x, wp) + facs = [1] * (n-1) + for k in range(1,n-1): + facs[k] = facs[k-1] * k + facs = facs[::-1] + s = facs[0] << wp + for k in range(1, n-1): + if k & 1: + s -= facs[k] * t + else: + s += facs[k] * t + t = (t*r) >> wp + T2 = from_man_exp(s, -wp, wp) + T2 = mpf_mul(T2, mpf_exp(negx, wp)) + if gamma: + T2 = mpf_mul(T2, mpf_pow_int(x, n_orig, wp), wp) + R = mpf_add(T1, T2) + re = mpf_div(R, from_int(ifac(n-1)), prec, rnd) + else: + raise NotImplementedError + if have_imag: + M = from_int(-ifac(n-1)) + if gamma: + im = mpf_div(mpf_pi(wp), M, prec, rnd) + if n_orig & 1: + im = mpf_neg(im) + else: + im = mpf_div(mpf_mul(mpf_pi(wp), mpf_pow_int(negx, n_orig-1, wp), wp), M, prec, rnd) + return re, im + else: + return re, None + +def mpf_ci_si_taylor(x, wp, which=0): + """ + 0 - Ci(x) - (euler+log(x)) + 1 - Si(x) + """ + x = to_fixed(x, wp) + x2 = -(x*x) >> wp + if which == 0: + s, t, k = 0, (MPZ_ONE<>wp + s += t//k + k += 2 + return from_man_exp(s, -wp) + +def mpc_ci_si_taylor(re, im, wp, which=0): + # The following code is only designed for small arguments, + # and not too small arguments (for relative accuracy) + if re[1]: + mag = re[2]+re[3] + elif im[1]: + mag = im[2]+im[3] + if im[1]: + mag = max(mag, im[2]+im[3]) + if mag > 2 or mag < -wp: + raise NotImplementedError + wp += (2-mag) + zre = to_fixed(re, wp) + zim = to_fixed(im, wp) + z2re = (zim*zim-zre*zre)>>wp + z2im = (-2*zre*zim)>>wp + tre = zre + tim = zim + one = MPZ_ONE< 2: + f = k*(k-1) + tre, tim = ((tre*z2re-tim*z2im)//f)>>wp, ((tre*z2im+tim*z2re)//f)>>wp + sre += tre//k + sim += tim//k + k += 2 + return from_man_exp(sre, -wp), from_man_exp(sim, -wp) + +def mpf_ci_si(x, prec, rnd=round_fast, which=2): + """ + Calculation of Ci(x), Si(x) for real x. + + which = 0 -- returns (Ci(x), -) + which = 1 -- returns (Si(x), -) + which = 2 -- returns (Ci(x), Si(x)) + + Note: if x < 0, Ci(x) needs an additional imaginary term, pi*i. + """ + wp = prec + 20 + sign, man, exp, bc = x + ci, si = None, None + if not man: + if x == fzero: + return (fninf, fzero) + if x == fnan: + return (x, x) + ci = fzero + if which != 0: + if x == finf: + si = mpf_shift(mpf_pi(prec, rnd), -1) + if x == fninf: + si = mpf_neg(mpf_shift(mpf_pi(prec, negative_rnd[rnd]), -1)) + return (ci, si) + # For small x: Ci(x) ~ euler + log(x), Si(x) ~ x + mag = exp+bc + if mag < -wp: + if which != 0: + si = mpf_perturb(x, 1-sign, prec, rnd) + if which != 1: + y = mpf_euler(wp) + xabs = mpf_abs(x) + ci = mpf_add(y, mpf_log(xabs, wp), prec, rnd) + return ci, si + # For huge x: Ci(x) ~ sin(x)/x, Si(x) ~ pi/2 + elif mag > wp: + if which != 0: + if sign: + si = mpf_neg(mpf_pi(prec, negative_rnd[rnd])) + else: + si = mpf_pi(prec, rnd) + si = mpf_shift(si, -1) + if which != 1: + ci = mpf_div(mpf_sin(x, wp), x, prec, rnd) + return ci, si + else: + wp += abs(mag) + # Use an asymptotic series? The smallest value of n!/x^n + # occurs for n ~ x, where the magnitude is ~ exp(-x). + asymptotic = mag-1 > math.log(wp, 2) + # Case 1: convergent series near 0 + if not asymptotic: + if which != 0: + si = mpf_pos(mpf_ci_si_taylor(x, wp, 1), prec, rnd) + if which != 1: + ci = mpf_ci_si_taylor(x, wp, 0) + ci = mpf_add(ci, mpf_euler(wp), wp) + ci = mpf_add(ci, mpf_log(mpf_abs(x), wp), prec, rnd) + return ci, si + x = mpf_abs(x) + # Case 2: asymptotic series for x >> 1 + xf = to_fixed(x, wp) + xr = (MPZ_ONE<<(2*wp)) // xf # 1/x + s1 = (MPZ_ONE << wp) + s2 = xr + t = xr + k = 2 + while t: + t = -t + t = (t*xr*k)>>wp + k += 1 + s1 += t + t = (t*xr*k)>>wp + k += 1 + s2 += t + s1 = from_man_exp(s1, -wp) + s2 = from_man_exp(s2, -wp) + s1 = mpf_div(s1, x, wp) + s2 = mpf_div(s2, x, wp) + cos, sin = mpf_cos_sin(x, wp) + # Ci(x) = sin(x)*s1-cos(x)*s2 + # Si(x) = pi/2-cos(x)*s1-sin(x)*s2 + if which != 0: + si = mpf_add(mpf_mul(cos, s1), mpf_mul(sin, s2), wp) + si = mpf_sub(mpf_shift(mpf_pi(wp), -1), si, wp) + if sign: + si = mpf_neg(si) + si = mpf_pos(si, prec, rnd) + if which != 1: + ci = mpf_sub(mpf_mul(sin, s1), mpf_mul(cos, s2), prec, rnd) + return ci, si + +def mpf_ci(x, prec, rnd=round_fast): + if mpf_sign(x) < 0: + raise ComplexResult + return mpf_ci_si(x, prec, rnd, 0)[0] + +def mpf_si(x, prec, rnd=round_fast): + return mpf_ci_si(x, prec, rnd, 1)[1] + +def mpc_ci(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + ci = mpf_ci_si(re, prec, rnd, 0)[0] + if mpf_sign(re) < 0: + return (ci, mpf_pi(prec, rnd)) + return (ci, fzero) + wp = prec + 20 + cre, cim = mpc_ci_si_taylor(re, im, wp, 0) + cre = mpf_add(cre, mpf_euler(wp), wp) + ci = mpc_add((cre, cim), mpc_log(z, wp), prec, rnd) + return ci + +def mpc_si(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + return (mpf_ci_si(re, prec, rnd, 1)[1], fzero) + wp = prec + 20 + z = mpc_ci_si_taylor(re, im, wp, 1) + return mpc_pos(z, prec, rnd) + + +#-----------------------------------------------------------------------# +# # +# Bessel functions # +# # +#-----------------------------------------------------------------------# + +# A Bessel function of the first kind of integer order, J_n(x), is +# given by the power series + +# oo +# ___ k 2 k + n +# \ (-1) / x \ +# J_n(x) = ) ----------- | - | +# /___ k! (k + n)! \ 2 / +# k = 0 + +# Simplifying the quotient between two successive terms gives the +# ratio x^2 / (-4*k*(k+n)). Hence, we only need one full-precision +# multiplication and one division by a small integer per term. +# The complex version is very similar, the only difference being +# that the multiplication is actually 4 multiplies. + +# In the general case, we have +# J_v(x) = (x/2)**v / v! * 0F1(v+1, (-1/4)*z**2) + +# TODO: for extremely large x, we could use an asymptotic +# trigonometric approximation. + +# TODO: recompute at higher precision if the fixed-point mantissa +# is very small + +def mpf_besseljn(n, x, prec, rounding=round_fast): + prec += 50 + negate = n < 0 and n & 1 + mag = x[2]+x[3] + n = abs(n) + wp = prec + 20 + n*bitcount(n) + if mag < 0: + wp -= n * mag + x = to_fixed(x, wp) + x2 = (x**2) >> wp + if not n: + s = t = MPZ_ONE << wp + else: + s = t = (x**n // ifac(n)) >> ((n-1)*wp + n) + k = 1 + while t: + t = ((t * x2) // (-4*k*(k+n))) >> wp + s += t + k += 1 + if negate: + s = -s + return from_man_exp(s, -wp, prec, rounding) + +def mpc_besseljn(n, z, prec, rounding=round_fast): + negate = n < 0 and n & 1 + n = abs(n) + origprec = prec + zre, zim = z + mag = max(zre[2]+zre[3], zim[2]+zim[3]) + prec += 20 + n*bitcount(n) + abs(mag) + if mag < 0: + prec -= n * mag + zre = to_fixed(zre, prec) + zim = to_fixed(zim, prec) + z2re = (zre**2 - zim**2) >> prec + z2im = (zre*zim) >> (prec-1) + if not n: + sre = tre = MPZ_ONE << prec + sim = tim = MPZ_ZERO + else: + re, im = complex_int_pow(zre, zim, n) + sre = tre = (re // ifac(n)) >> ((n-1)*prec + n) + sim = tim = (im // ifac(n)) >> ((n-1)*prec + n) + k = 1 + while abs(tre) + abs(tim) > 3: + p = -4*k*(k+n) + tre, tim = tre*z2re - tim*z2im, tim*z2re + tre*z2im + tre = (tre // p) >> prec + tim = (tim // p) >> prec + sre += tre + sim += tim + k += 1 + if negate: + sre = -sre + sim = -sim + re = from_man_exp(sre, -prec, origprec, rounding) + im = from_man_exp(sim, -prec, origprec, rounding) + return (re, im) + +def mpf_agm(a, b, prec, rnd=round_fast): + """ + Computes the arithmetic-geometric mean agm(a,b) for + nonnegative mpf values a, b. + """ + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if asign or bsign: + raise ComplexResult("agm of a negative number") + # Handle inf, nan or zero in either operand + if not (aman and bman): + if a == fnan or b == fnan: + return fnan + if a == finf: + if b == fzero: + return fnan + return finf + if b == finf: + if a == fzero: + return fnan + return finf + # agm(0,x) = agm(x,0) = 0 + return fzero + wp = prec + 20 + amag = aexp+abc + bmag = bexp+bbc + mag_delta = amag - bmag + # Reduce to roughly the same magnitude using floating-point AGM + abs_mag_delta = abs(mag_delta) + if abs_mag_delta > 10: + while abs_mag_delta > 10: + a, b = mpf_shift(mpf_add(a,b,wp),-1), \ + mpf_sqrt(mpf_mul(a,b,wp),wp) + abs_mag_delta //= 2 + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + amag = aexp+abc + bmag = bexp+bbc + mag_delta = amag - bmag + #print to_float(a), to_float(b) + # Use agm(a,b) = agm(x*a,x*b)/x to obtain a, b ~= 1 + min_mag = min(amag,bmag) + max_mag = max(amag,bmag) + n = 0 + # If too small, we lose precision when going to fixed-point + if min_mag < -8: + n = -min_mag + # If too large, we waste time using fixed-point with large numbers + elif max_mag > 20: + n = -max_mag + if n: + a = mpf_shift(a, n) + b = mpf_shift(b, n) + #print to_float(a), to_float(b) + af = to_fixed(a, wp) + bf = to_fixed(b, wp) + g = agm_fixed(af, bf, wp) + return from_man_exp(g, -wp-n, prec, rnd) + +def mpf_agm1(a, prec, rnd=round_fast): + """ + Computes the arithmetic-geometric mean agm(1,a) for a nonnegative + mpf value a. + """ + return mpf_agm(fone, a, prec, rnd) + +def mpc_agm(a, b, prec, rnd=round_fast): + """ + Complex AGM. + + TODO: + * check that convergence works as intended + * optimize + * select a nonarbitrary branch + """ + if mpc_is_infnan(a) or mpc_is_infnan(b): + return fnan, fnan + if mpc_zero in (a, b): + return fzero, fzero + if mpc_neg(a) == b: + return fzero, fzero + wp = prec+20 + eps = mpf_shift(fone, -wp+10) + while 1: + a1 = mpc_shift(mpc_add(a, b, wp), -1) + b1 = mpc_sqrt(mpc_mul(a, b, wp), wp) + a, b = a1, b1 + size = mpf_min_max([mpc_abs(a,10), mpc_abs(b,10)])[1] + err = mpc_abs(mpc_sub(a, b, 10), 10) + if size == fzero or mpf_lt(err, mpf_mul(eps, size)): + return a + +def mpc_agm1(a, prec, rnd=round_fast): + return mpc_agm(mpc_one, a, prec, rnd) + +def mpf_ellipk(x, prec, rnd=round_fast): + if not x[1]: + if x == fzero: + return mpf_shift(mpf_pi(prec, rnd), -1) + if x == fninf: + return fzero + if x == fnan: + return x + if x == fone: + return finf + # TODO: for |x| << 1/2, one could use fall back to + # pi/2 * hyp2f1_rat((1,2),(1,2),(1,1), x) + wp = prec + 15 + # Use K(x) = pi/2/agm(1,a) where a = sqrt(1-x) + # The sqrt raises ComplexResult if x > 0 + a = mpf_sqrt(mpf_sub(fone, x, wp), wp) + v = mpf_agm1(a, wp) + r = mpf_div(mpf_pi(wp), v, prec, rnd) + return mpf_shift(r, -1) + +def mpc_ellipk(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + if re == finf: + return mpc_zero + if mpf_le(re, fone): + return mpf_ellipk(re, prec, rnd), fzero + wp = prec + 15 + a = mpc_sqrt(mpc_sub(mpc_one, z, wp), wp) + v = mpc_agm1(a, wp) + r = mpc_mpf_div(mpf_pi(wp), v, prec, rnd) + return mpc_shift(r, -1) + +def mpf_ellipe(x, prec, rnd=round_fast): + # http://functions.wolfram.com/EllipticIntegrals/ + # EllipticK/20/01/0001/ + # E = (1-m)*(K'(m)*2*m + K(m)) + sign, man, exp, bc = x + if not man: + if x == fzero: + return mpf_shift(mpf_pi(prec, rnd), -1) + if x == fninf: + return finf + if x == fnan: + return x + if x == finf: + raise ComplexResult + if x == fone: + return fone + wp = prec+20 + mag = exp+bc + if mag < -wp: + return mpf_shift(mpf_pi(prec, rnd), -1) + # Compute a finite difference for K' + p = max(mag, 0) - wp + h = mpf_shift(fone, p) + K = mpf_ellipk(x, 2*wp) + Kh = mpf_ellipk(mpf_sub(x, h), 2*wp) + Kdiff = mpf_shift(mpf_sub(K, Kh), -p) + t = mpf_sub(fone, x) + b = mpf_mul(Kdiff, mpf_shift(x,1), wp) + return mpf_mul(t, mpf_add(K, b), prec, rnd) + +def mpc_ellipe(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + if re == finf: + return (fzero, finf) + if mpf_le(re, fone): + return mpf_ellipe(re, prec, rnd), fzero + wp = prec + 15 + mag = mpc_abs(z, 1) + p = max(mag[2]+mag[3], 0) - wp + h = mpf_shift(fone, p) + K = mpc_ellipk(z, 2*wp) + Kh = mpc_ellipk(mpc_add_mpf(z, h, 2*wp), 2*wp) + Kdiff = mpc_shift(mpc_sub(Kh, K, wp), -p) + t = mpc_sub(mpc_one, z, wp) + b = mpc_mul(Kdiff, mpc_shift(z,1), wp) + return mpc_mul(t, mpc_add(K, b, wp), prec, rnd) diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/libintmath.py b/.venv/lib/python3.11/site-packages/mpmath/libmp/libintmath.py new file mode 100644 index 0000000000000000000000000000000000000000..7880546e135639208d136488408b102ad41682a2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/libmp/libintmath.py @@ -0,0 +1,584 @@ +""" +Utility functions for integer math. + +TODO: rename, cleanup, perhaps move the gmpy wrapper code +here from settings.py + +""" + +import math +from bisect import bisect + +from .backend import xrange +from .backend import BACKEND, gmpy, sage, sage_utils, MPZ, MPZ_ONE, MPZ_ZERO + +small_trailing = [0] * 256 +for j in range(1,8): + small_trailing[1<>> giant_steps(50,1000) + [66, 128, 253, 502, 1000] + >>> giant_steps(50,1000,4) + [65, 252, 1000] + + """ + L = [target] + while L[-1] > start*n: + L = L + [L[-1]//n + 2] + return L[::-1] + +def rshift(x, n): + """For an integer x, calculate x >> n with the fastest (floor) + rounding. Unlike the plain Python expression (x >> n), n is + allowed to be negative, in which case a left shift is performed.""" + if n >= 0: return x >> n + else: return x << (-n) + +def lshift(x, n): + """For an integer x, calculate x << n. Unlike the plain Python + expression (x << n), n is allowed to be negative, in which case a + right shift with default (floor) rounding is performed.""" + if n >= 0: return x << n + else: return x >> (-n) + +if BACKEND == 'sage': + import operator + rshift = operator.rshift + lshift = operator.lshift + +def python_trailing(n): + """Count the number of trailing zero bits in abs(n).""" + if not n: + return 0 + low_byte = n & 0xff + if low_byte: + return small_trailing[low_byte] + t = 8 + n >>= 8 + while not n & 0xff: + n >>= 8 + t += 8 + return t + small_trailing[n & 0xff] + +if BACKEND == 'gmpy': + if gmpy.version() >= '2': + def gmpy_trailing(n): + """Count the number of trailing zero bits in abs(n) using gmpy.""" + if n: return MPZ(n).bit_scan1() + else: return 0 + else: + def gmpy_trailing(n): + """Count the number of trailing zero bits in abs(n) using gmpy.""" + if n: return MPZ(n).scan1() + else: return 0 + +# Small powers of 2 +powers = [1<<_ for _ in range(300)] + +def python_bitcount(n): + """Calculate bit size of the nonnegative integer n.""" + bc = bisect(powers, n) + if bc != 300: + return bc + bc = int(math.log(n, 2)) - 4 + return bc + bctable[n>>bc] + +def gmpy_bitcount(n): + """Calculate bit size of the nonnegative integer n.""" + if n: return MPZ(n).numdigits(2) + else: return 0 + +#def sage_bitcount(n): +# if n: return MPZ(n).nbits() +# else: return 0 + +def sage_trailing(n): + return MPZ(n).trailing_zero_bits() + +if BACKEND == 'gmpy': + bitcount = gmpy_bitcount + trailing = gmpy_trailing +elif BACKEND == 'sage': + sage_bitcount = sage_utils.bitcount + bitcount = sage_bitcount + trailing = sage_trailing +else: + bitcount = python_bitcount + trailing = python_trailing + +if BACKEND == 'gmpy' and 'bit_length' in dir(gmpy): + bitcount = gmpy.bit_length + +# Used to avoid slow function calls as far as possible +trailtable = [trailing(n) for n in range(256)] +bctable = [bitcount(n) for n in range(1024)] + +# TODO: speed up for bases 2, 4, 8, 16, ... + +def bin_to_radix(x, xbits, base, bdigits): + """Changes radix of a fixed-point number; i.e., converts + x * 2**xbits to floor(x * 10**bdigits).""" + return x * (MPZ(base)**bdigits) >> xbits + +stddigits = '0123456789abcdefghijklmnopqrstuvwxyz' + +def small_numeral(n, base=10, digits=stddigits): + """Return the string numeral of a positive integer in an arbitrary + base. Most efficient for small input.""" + if base == 10: + return str(n) + digs = [] + while n: + n, digit = divmod(n, base) + digs.append(digits[digit]) + return "".join(digs[::-1]) + +def numeral_python(n, base=10, size=0, digits=stddigits): + """Represent the integer n as a string of digits in the given base. + Recursive division is used to make this function about 3x faster + than Python's str() for converting integers to decimal strings. + + The 'size' parameters specifies the number of digits in n; this + number is only used to determine splitting points and need not be + exact.""" + if n <= 0: + if not n: + return "0" + return "-" + numeral(-n, base, size, digits) + # Fast enough to do directly + if size < 250: + return small_numeral(n, base, digits) + # Divide in half + half = (size // 2) + (size & 1) + A, B = divmod(n, base**half) + ad = numeral(A, base, half, digits) + bd = numeral(B, base, half, digits).rjust(half, "0") + return ad + bd + +def numeral_gmpy(n, base=10, size=0, digits=stddigits): + """Represent the integer n as a string of digits in the given base. + Recursive division is used to make this function about 3x faster + than Python's str() for converting integers to decimal strings. + + The 'size' parameters specifies the number of digits in n; this + number is only used to determine splitting points and need not be + exact.""" + if n < 0: + return "-" + numeral(-n, base, size, digits) + # gmpy.digits() may cause a segmentation fault when trying to convert + # extremely large values to a string. The size limit may need to be + # adjusted on some platforms, but 1500000 works on Windows and Linux. + if size < 1500000: + return gmpy.digits(n, base) + # Divide in half + half = (size // 2) + (size & 1) + A, B = divmod(n, MPZ(base)**half) + ad = numeral(A, base, half, digits) + bd = numeral(B, base, half, digits).rjust(half, "0") + return ad + bd + +if BACKEND == "gmpy": + numeral = numeral_gmpy +else: + numeral = numeral_python + +_1_800 = 1<<800 +_1_600 = 1<<600 +_1_400 = 1<<400 +_1_200 = 1<<200 +_1_100 = 1<<100 +_1_50 = 1<<50 + +def isqrt_small_python(x): + """ + Correctly (floor) rounded integer square root, using + division. Fast up to ~200 digits. + """ + if not x: + return x + if x < _1_800: + # Exact with IEEE double precision arithmetic + if x < _1_50: + return int(x**0.5) + # Initial estimate can be any integer >= the true root; round up + r = int(x**0.5 * 1.00000000000001) + 1 + else: + bc = bitcount(x) + n = bc//2 + r = int((x>>(2*n-100))**0.5+2)<<(n-50) # +2 is to round up + # The following iteration now precisely computes floor(sqrt(x)) + # See e.g. Crandall & Pomerance, "Prime Numbers: A Computational + # Perspective" + while 1: + y = (r+x//r)>>1 + if y >= r: + return r + r = y + +def isqrt_fast_python(x): + """ + Fast approximate integer square root, computed using division-free + Newton iteration for large x. For random integers the result is almost + always correct (floor(sqrt(x))), but is 1 ulp too small with a roughly + 0.1% probability. If x is very close to an exact square, the answer is + 1 ulp wrong with high probability. + + With 0 guard bits, the largest error over a set of 10^5 random + inputs of size 1-10^5 bits was 3 ulp. The use of 10 guard bits + almost certainly guarantees a max 1 ulp error. + """ + # Use direct division-based iteration if sqrt(x) < 2^400 + # Assume floating-point square root accurate to within 1 ulp, then: + # 0 Newton iterations good to 52 bits + # 1 Newton iterations good to 104 bits + # 2 Newton iterations good to 208 bits + # 3 Newton iterations good to 416 bits + if x < _1_800: + y = int(x**0.5) + if x >= _1_100: + y = (y + x//y) >> 1 + if x >= _1_200: + y = (y + x//y) >> 1 + if x >= _1_400: + y = (y + x//y) >> 1 + return y + bc = bitcount(x) + guard_bits = 10 + x <<= 2*guard_bits + bc += 2*guard_bits + bc += (bc&1) + hbc = bc//2 + startprec = min(50, hbc) + # Newton iteration for 1/sqrt(x), with floating-point starting value + r = int(2.0**(2*startprec) * (x >> (bc-2*startprec)) ** -0.5) + pp = startprec + for p in giant_steps(startprec, hbc): + # r**2, scaled from real size 2**(-bc) to 2**p + r2 = (r*r) >> (2*pp - p) + # x*r**2, scaled from real size ~1.0 to 2**p + xr2 = ((x >> (bc-p)) * r2) >> p + # New value of r, scaled from real size 2**(-bc/2) to 2**p + r = (r * ((3<> (pp+1) + pp = p + # (1/sqrt(x))*x = sqrt(x) + return (r*(x>>hbc)) >> (p+guard_bits) + +def sqrtrem_python(x): + """Correctly rounded integer (floor) square root with remainder.""" + # to check cutoff: + # plot(lambda x: timing(isqrt, 2**int(x)), [0,2000]) + if x < _1_600: + y = isqrt_small_python(x) + return y, x - y*y + y = isqrt_fast_python(x) + 1 + rem = x - y*y + # Correct remainder + while rem < 0: + y -= 1 + rem += (1+2*y) + else: + if rem: + while rem > 2*(1+y): + y += 1 + rem -= (1+2*y) + return y, rem + +def isqrt_python(x): + """Integer square root with correct (floor) rounding.""" + return sqrtrem_python(x)[0] + +def sqrt_fixed(x, prec): + return isqrt_fast(x<= '2': + isqrt_small = isqrt_fast = isqrt = gmpy.isqrt + sqrtrem = gmpy.isqrt_rem + else: + isqrt_small = isqrt_fast = isqrt = gmpy.sqrt + sqrtrem = gmpy.sqrtrem +elif BACKEND == 'sage': + isqrt_small = isqrt_fast = isqrt = \ + getattr(sage_utils, "isqrt", lambda n: MPZ(n).isqrt()) + sqrtrem = lambda n: MPZ(n).sqrtrem() +else: + isqrt_small = isqrt_small_python + isqrt_fast = isqrt_fast_python + isqrt = isqrt_python + sqrtrem = sqrtrem_python + + +def ifib(n, _cache={}): + """Computes the nth Fibonacci number as an integer, for + integer n.""" + if n < 0: + return (-1)**(-n+1) * ifib(-n) + if n in _cache: + return _cache[n] + m = n + # Use Dijkstra's logarithmic algorithm + # The following implementation is basically equivalent to + # http://en.literateprograms.org/Fibonacci_numbers_(Scheme) + a, b, p, q = MPZ_ONE, MPZ_ZERO, MPZ_ZERO, MPZ_ONE + while n: + if n & 1: + aq = a*q + a, b = b*q+aq+a*p, b*p+aq + n -= 1 + else: + qq = q*q + p, q = p*p+qq, qq+2*p*q + n >>= 1 + if m < 250: + _cache[m] = b + return b + +MAX_FACTORIAL_CACHE = 1000 + +def ifac(n, memo={0:1, 1:1}): + """Return n factorial (for integers n >= 0 only).""" + f = memo.get(n) + if f: + return f + k = len(memo) + p = memo[k-1] + MAX = MAX_FACTORIAL_CACHE + while k <= n: + p *= k + if k <= MAX: + memo[k] = p + k += 1 + return p + +def ifac2(n, memo_pair=[{0:1}, {1:1}]): + """Return n!! (double factorial), integers n >= 0 only.""" + memo = memo_pair[n&1] + f = memo.get(n) + if f: + return f + k = max(memo) + p = memo[k] + MAX = MAX_FACTORIAL_CACHE + while k < n: + k += 2 + p *= k + if k <= MAX: + memo[k] = p + return p + +if BACKEND == 'gmpy': + ifac = gmpy.fac +elif BACKEND == 'sage': + ifac = lambda n: int(sage.factorial(n)) + ifib = sage.fibonacci + +def list_primes(n): + n = n + 1 + sieve = list(xrange(n)) + sieve[:2] = [0, 0] + for i in xrange(2, int(n**0.5)+1): + if sieve[i]: + for j in xrange(i**2, n, i): + sieve[j] = 0 + return [p for p in sieve if p] + +if BACKEND == 'sage': + # Note: it is *VERY* important for performance that we convert + # the list to Python ints. + def list_primes(n): + return [int(_) for _ in sage.primes(n+1)] + +small_odd_primes = (3,5,7,11,13,17,19,23,29,31,37,41,43,47) +small_odd_primes_set = set(small_odd_primes) + +def isprime(n): + """ + Determines whether n is a prime number. A probabilistic test is + performed if n is very large. No special trick is used for detecting + perfect powers. + + >>> sum(list_primes(100000)) + 454396537 + >>> sum(n*isprime(n) for n in range(100000)) + 454396537 + + """ + n = int(n) + if not n & 1: + return n == 2 + if n < 50: + return n in small_odd_primes_set + for p in small_odd_primes: + if not n % p: + return False + m = n-1 + s = trailing(m) + d = m >> s + def test(a): + x = pow(a,d,n) + if x == 1 or x == m: + return True + for r in xrange(1,s): + x = x**2 % n + if x == m: + return True + return False + # See http://primes.utm.edu/prove/prove2_3.html + if n < 1373653: + witnesses = [2,3] + elif n < 341550071728321: + witnesses = [2,3,5,7,11,13,17] + else: + witnesses = small_odd_primes + for a in witnesses: + if not test(a): + return False + return True + +def moebius(n): + """ + Evaluates the Moebius function which is `mu(n) = (-1)^k` if `n` + is a product of `k` distinct primes and `mu(n) = 0` otherwise. + + TODO: speed up using factorization + """ + n = abs(int(n)) + if n < 2: + return n + factors = [] + for p in xrange(2, n+1): + if not (n % p): + if not (n % p**2): + return 0 + if not sum(p % f for f in factors): + factors.append(p) + return (-1)**len(factors) + +def gcd(*args): + a = 0 + for b in args: + if a: + while b: + a, b = b, a % b + else: + a = b + return a + + +# Comment by Juan Arias de Reyna: +# +# I learn this method to compute EulerE[2n] from van de Lune. +# +# We apply the formula EulerE[2n] = (-1)^n 2**(-2n) sum_{j=0}^n a(2n,2j+1) +# +# where the numbers a(n,j) vanish for j > n+1 or j <= -1 and satisfies +# +# a(0,-1) = a(0,0) = 0; a(0,1)= 1; a(0,2) = a(0,3) = 0 +# +# a(n,j) = a(n-1,j) when n+j is even +# a(n,j) = (j-1) a(n-1,j-1) + (j+1) a(n-1,j+1) when n+j is odd +# +# +# But we can use only one array unidimensional a(j) since to compute +# a(n,j) we only need to know a(n-1,k) where k and j are of different parity +# and we have not to conserve the used values. +# +# We cached up the values of Euler numbers to sufficiently high order. +# +# Important Observation: If we pretend to use the numbers +# EulerE[1], EulerE[2], ... , EulerE[n] +# it is convenient to compute first EulerE[n], since the algorithm +# computes first all +# the previous ones, and keeps them in the CACHE + +MAX_EULER_CACHE = 500 + +def eulernum(m, _cache={0:MPZ_ONE}): + r""" + Computes the Euler numbers `E(n)`, which can be defined as + coefficients of the Taylor expansion of `1/cosh x`: + + .. math :: + + \frac{1}{\cosh x} = \sum_{n=0}^\infty \frac{E_n}{n!} x^n + + Example:: + + >>> [int(eulernum(n)) for n in range(11)] + [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521] + >>> [int(eulernum(n)) for n in range(11)] # test cache + [1, 0, -1, 0, 5, 0, -61, 0, 1385, 0, -50521] + + """ + # for odd m > 1, the Euler numbers are zero + if m & 1: + return MPZ_ZERO + f = _cache.get(m) + if f: + return f + MAX = MAX_EULER_CACHE + n = m + a = [MPZ(_) for _ in [0,0,1,0,0,0]] + for n in range(1, m+1): + for j in range(n+1, -1, -2): + a[j+1] = (j-1)*a[j] + (j+1)*a[j+2] + a.append(0) + suma = 0 + for k in range(n+1, -1, -2): + suma += a[k+1] + if n <= MAX: + _cache[n] = ((-1)**(n//2))*(suma // 2**n) + if n == m: + return ((-1)**(n//2))*suma // 2**n + +def stirling1(n, k): + """ + Stirling number of the first kind. + """ + if n < 0 or k < 0: + raise ValueError + if k >= n: + return MPZ(n == k) + if k < 1: + return MPZ_ZERO + L = [MPZ_ZERO] * (k+1) + L[1] = MPZ_ONE + for m in xrange(2, n+1): + for j in xrange(min(k, m), 0, -1): + L[j] = (m-1) * L[j] + L[j-1] + return (-1)**(n+k) * L[k] + +def stirling2(n, k): + """ + Stirling number of the second kind. + """ + if n < 0 or k < 0: + raise ValueError + if k >= n: + return MPZ(n == k) + if k <= 1: + return MPZ(k == 1) + s = MPZ_ZERO + t = MPZ_ONE + for j in xrange(k+1): + if (k + j) & 1: + s -= t * MPZ(j)**n + else: + s += t * MPZ(j)**n + t = t * (k - j) // (j + 1) + return s // ifac(k) diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpc.py b/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpc.py new file mode 100644 index 0000000000000000000000000000000000000000..cc22d0e73674676c8a9249ebc2d48da7f3be8b0d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpc.py @@ -0,0 +1,835 @@ +""" +Low-level functions for complex arithmetic. +""" + +import sys + +from .backend import MPZ, MPZ_ZERO, MPZ_ONE, MPZ_TWO, BACKEND + +from .libmpf import (\ + round_floor, round_ceiling, round_down, round_up, + round_nearest, round_fast, bitcount, + bctable, normalize, normalize1, reciprocal_rnd, rshift, lshift, giant_steps, + negative_rnd, + to_str, to_fixed, from_man_exp, from_float, to_float, from_int, to_int, + fzero, fone, ftwo, fhalf, finf, fninf, fnan, fnone, + mpf_abs, mpf_pos, mpf_neg, mpf_add, mpf_sub, mpf_mul, + mpf_div, mpf_mul_int, mpf_shift, mpf_sqrt, mpf_hypot, + mpf_rdiv_int, mpf_floor, mpf_ceil, mpf_nint, mpf_frac, + mpf_sign, mpf_hash, + ComplexResult +) + +from .libelefun import (\ + mpf_pi, mpf_exp, mpf_log, mpf_cos_sin, mpf_cosh_sinh, mpf_tan, mpf_pow_int, + mpf_log_hypot, + mpf_cos_sin_pi, mpf_phi, + mpf_cos, mpf_sin, mpf_cos_pi, mpf_sin_pi, + mpf_atan, mpf_atan2, mpf_cosh, mpf_sinh, mpf_tanh, + mpf_asin, mpf_acos, mpf_acosh, mpf_nthroot, mpf_fibonacci +) + +# An mpc value is a (real, imag) tuple +mpc_one = fone, fzero +mpc_zero = fzero, fzero +mpc_two = ftwo, fzero +mpc_half = (fhalf, fzero) + +_infs = (finf, fninf) +_infs_nan = (finf, fninf, fnan) + +def mpc_is_inf(z): + """Check if either real or imaginary part is infinite""" + re, im = z + if re in _infs: return True + if im in _infs: return True + return False + +def mpc_is_infnan(z): + """Check if either real or imaginary part is infinite or nan""" + re, im = z + if re in _infs_nan: return True + if im in _infs_nan: return True + return False + +def mpc_to_str(z, dps, **kwargs): + re, im = z + rs = to_str(re, dps) + if im[0]: + return rs + " - " + to_str(mpf_neg(im), dps, **kwargs) + "j" + else: + return rs + " + " + to_str(im, dps, **kwargs) + "j" + +def mpc_to_complex(z, strict=False, rnd=round_fast): + re, im = z + return complex(to_float(re, strict, rnd), to_float(im, strict, rnd)) + +def mpc_hash(z): + if sys.version_info >= (3, 2): + re, im = z + h = mpf_hash(re) + sys.hash_info.imag * mpf_hash(im) + # Need to reduce either module 2^32 or 2^64 + h = h % (2**sys.hash_info.width) + return int(h) + else: + try: + return hash(mpc_to_complex(z, strict=True)) + except OverflowError: + return hash(z) + +def mpc_conjugate(z, prec, rnd=round_fast): + re, im = z + return re, mpf_neg(im, prec, rnd) + +def mpc_is_nonzero(z): + return z != mpc_zero + +def mpc_add(z, w, prec, rnd=round_fast): + a, b = z + c, d = w + return mpf_add(a, c, prec, rnd), mpf_add(b, d, prec, rnd) + +def mpc_add_mpf(z, x, prec, rnd=round_fast): + a, b = z + return mpf_add(a, x, prec, rnd), b + +def mpc_sub(z, w, prec=0, rnd=round_fast): + a, b = z + c, d = w + return mpf_sub(a, c, prec, rnd), mpf_sub(b, d, prec, rnd) + +def mpc_sub_mpf(z, p, prec=0, rnd=round_fast): + a, b = z + return mpf_sub(a, p, prec, rnd), b + +def mpc_pos(z, prec, rnd=round_fast): + a, b = z + return mpf_pos(a, prec, rnd), mpf_pos(b, prec, rnd) + +def mpc_neg(z, prec=None, rnd=round_fast): + a, b = z + return mpf_neg(a, prec, rnd), mpf_neg(b, prec, rnd) + +def mpc_shift(z, n): + a, b = z + return mpf_shift(a, n), mpf_shift(b, n) + +def mpc_abs(z, prec, rnd=round_fast): + """Absolute value of a complex number, |a+bi|. + Returns an mpf value.""" + a, b = z + return mpf_hypot(a, b, prec, rnd) + +def mpc_arg(z, prec, rnd=round_fast): + """Argument of a complex number. Returns an mpf value.""" + a, b = z + return mpf_atan2(b, a, prec, rnd) + +def mpc_floor(z, prec, rnd=round_fast): + a, b = z + return mpf_floor(a, prec, rnd), mpf_floor(b, prec, rnd) + +def mpc_ceil(z, prec, rnd=round_fast): + a, b = z + return mpf_ceil(a, prec, rnd), mpf_ceil(b, prec, rnd) + +def mpc_nint(z, prec, rnd=round_fast): + a, b = z + return mpf_nint(a, prec, rnd), mpf_nint(b, prec, rnd) + +def mpc_frac(z, prec, rnd=round_fast): + a, b = z + return mpf_frac(a, prec, rnd), mpf_frac(b, prec, rnd) + + +def mpc_mul(z, w, prec, rnd=round_fast): + """ + Complex multiplication. + + Returns the real and imaginary part of (a+bi)*(c+di), rounded to + the specified precision. The rounding mode applies to the real and + imaginary parts separately. + """ + a, b = z + c, d = w + p = mpf_mul(a, c) + q = mpf_mul(b, d) + r = mpf_mul(a, d) + s = mpf_mul(b, c) + re = mpf_sub(p, q, prec, rnd) + im = mpf_add(r, s, prec, rnd) + return re, im + +def mpc_square(z, prec, rnd=round_fast): + # (a+b*I)**2 == a**2 - b**2 + 2*I*a*b + a, b = z + p = mpf_mul(a,a) + q = mpf_mul(b,b) + r = mpf_mul(a,b, prec, rnd) + re = mpf_sub(p, q, prec, rnd) + im = mpf_shift(r, 1) + return re, im + +def mpc_mul_mpf(z, p, prec, rnd=round_fast): + a, b = z + re = mpf_mul(a, p, prec, rnd) + im = mpf_mul(b, p, prec, rnd) + return re, im + +def mpc_mul_imag_mpf(z, x, prec, rnd=round_fast): + """ + Multiply the mpc value z by I*x where x is an mpf value. + """ + a, b = z + re = mpf_neg(mpf_mul(b, x, prec, rnd)) + im = mpf_mul(a, x, prec, rnd) + return re, im + +def mpc_mul_int(z, n, prec, rnd=round_fast): + a, b = z + re = mpf_mul_int(a, n, prec, rnd) + im = mpf_mul_int(b, n, prec, rnd) + return re, im + +def mpc_div(z, w, prec, rnd=round_fast): + a, b = z + c, d = w + wp = prec + 10 + # mag = c*c + d*d + mag = mpf_add(mpf_mul(c, c), mpf_mul(d, d), wp) + # (a*c+b*d)/mag, (b*c-a*d)/mag + t = mpf_add(mpf_mul(a,c), mpf_mul(b,d), wp) + u = mpf_sub(mpf_mul(b,c), mpf_mul(a,d), wp) + return mpf_div(t,mag,prec,rnd), mpf_div(u,mag,prec,rnd) + +def mpc_div_mpf(z, p, prec, rnd=round_fast): + """Calculate z/p where p is real""" + a, b = z + re = mpf_div(a, p, prec, rnd) + im = mpf_div(b, p, prec, rnd) + return re, im + +def mpc_reciprocal(z, prec, rnd=round_fast): + """Calculate 1/z efficiently""" + a, b = z + m = mpf_add(mpf_mul(a,a),mpf_mul(b,b),prec+10) + re = mpf_div(a, m, prec, rnd) + im = mpf_neg(mpf_div(b, m, prec, rnd)) + return re, im + +def mpc_mpf_div(p, z, prec, rnd=round_fast): + """Calculate p/z where p is real efficiently""" + a, b = z + m = mpf_add(mpf_mul(a,a),mpf_mul(b,b), prec+10) + re = mpf_div(mpf_mul(a,p), m, prec, rnd) + im = mpf_div(mpf_neg(mpf_mul(b,p)), m, prec, rnd) + return re, im + +def complex_int_pow(a, b, n): + """Complex integer power: computes (a+b*I)**n exactly for + nonnegative n (a and b must be Python ints).""" + wre = 1 + wim = 0 + while n: + if n & 1: + wre, wim = wre*a - wim*b, wim*a + wre*b + n -= 1 + a, b = a*a - b*b, 2*a*b + n //= 2 + return wre, wim + +def mpc_pow(z, w, prec, rnd=round_fast): + if w[1] == fzero: + return mpc_pow_mpf(z, w[0], prec, rnd) + return mpc_exp(mpc_mul(mpc_log(z, prec+10), w, prec+10), prec, rnd) + +def mpc_pow_mpf(z, p, prec, rnd=round_fast): + psign, pman, pexp, pbc = p + if pexp >= 0: + return mpc_pow_int(z, (-1)**psign * (pman< 0: + aman <<= de + aexp = bexp + else: + bman <<= (-de) + bexp = aexp + re, im = complex_int_pow(aman, bman, n) + re = from_man_exp(re, int(n*aexp), prec, rnd) + im = from_man_exp(im, int(n*bexp), prec, rnd) + return re, im + return mpc_exp(mpc_mul_int(mpc_log(z, prec+10), n, prec+10), prec, rnd) + +def mpc_sqrt(z, prec, rnd=round_fast): + """Complex square root (principal branch). + + We have sqrt(a+bi) = sqrt((r+a)/2) + b/sqrt(2*(r+a))*i where + r = abs(a+bi), when a+bi is not a negative real number.""" + a, b = z + if b == fzero: + if a == fzero: + return (a, b) + # When a+bi is a negative real number, we get a real sqrt times i + if a[0]: + im = mpf_sqrt(mpf_neg(a), prec, rnd) + return (fzero, im) + else: + re = mpf_sqrt(a, prec, rnd) + return (re, fzero) + wp = prec+20 + if not a[0]: # case a positive + t = mpf_add(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) + a + u = mpf_shift(t, -1) # u = t/2 + re = mpf_sqrt(u, prec, rnd) # re = sqrt(u) + v = mpf_shift(t, 1) # v = 2*t + w = mpf_sqrt(v, wp) # w = sqrt(v) + im = mpf_div(b, w, prec, rnd) # im = b / w + else: # case a negative + t = mpf_sub(mpc_abs((a, b), wp), a, wp) # t = abs(a+bi) - a + u = mpf_shift(t, -1) # u = t/2 + im = mpf_sqrt(u, prec, rnd) # im = sqrt(u) + v = mpf_shift(t, 1) # v = 2*t + w = mpf_sqrt(v, wp) # w = sqrt(v) + re = mpf_div(b, w, prec, rnd) # re = b/w + if b[0]: + re = mpf_neg(re) + im = mpf_neg(im) + return re, im + +def mpc_nthroot_fixed(a, b, n, prec): + # a, b signed integers at fixed precision prec + start = 50 + a1 = int(rshift(a, prec - n*start)) + b1 = int(rshift(b, prec - n*start)) + try: + r = (a1 + 1j * b1)**(1.0/n) + re = r.real + im = r.imag + re = MPZ(int(re)) + im = MPZ(int(im)) + except OverflowError: + a1 = from_int(a1, start) + b1 = from_int(b1, start) + fn = from_int(n) + nth = mpf_rdiv_int(1, fn, start) + re, im = mpc_pow((a1, b1), (nth, fzero), start) + re = to_int(re) + im = to_int(im) + extra = 10 + prevp = start + extra1 = n + for p in giant_steps(start, prec+extra): + # this is slow for large n, unlike int_pow_fixed + re2, im2 = complex_int_pow(re, im, n-1) + re2 = rshift(re2, (n-1)*prevp - p - extra1) + im2 = rshift(im2, (n-1)*prevp - p - extra1) + r4 = (re2*re2 + im2*im2) >> (p + extra1) + ap = rshift(a, prec - p) + bp = rshift(b, prec - p) + rec = (ap * re2 + bp * im2) >> p + imc = (-ap * im2 + bp * re2) >> p + reb = (rec << p) // r4 + imb = (imc << p) // r4 + re = (reb + (n-1)*lshift(re, p-prevp))//n + im = (imb + (n-1)*lshift(im, p-prevp))//n + prevp = p + return re, im + +def mpc_nthroot(z, n, prec, rnd=round_fast): + """ + Complex n-th root. + + Use Newton method as in the real case when it is faster, + otherwise use z**(1/n) + """ + a, b = z + if a[0] == 0 and b == fzero: + re = mpf_nthroot(a, n, prec, rnd) + return (re, fzero) + if n < 2: + if n == 0: + return mpc_one + if n == 1: + return mpc_pos((a, b), prec, rnd) + if n == -1: + return mpc_div(mpc_one, (a, b), prec, rnd) + inverse = mpc_nthroot((a, b), -n, prec+5, reciprocal_rnd[rnd]) + return mpc_div(mpc_one, inverse, prec, rnd) + if n <= 20: + prec2 = int(1.2 * (prec + 10)) + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + pf = mpc_abs((a,b), prec) + if pf[-2] + pf[-1] > -10 and pf[-2] + pf[-1] < prec: + af = to_fixed(a, prec2) + bf = to_fixed(b, prec2) + re, im = mpc_nthroot_fixed(af, bf, n, prec2) + extra = 10 + re = from_man_exp(re, -prec2-extra, prec2, rnd) + im = from_man_exp(im, -prec2-extra, prec2, rnd) + return re, im + fn = from_int(n) + prec2 = prec+10 + 10 + nth = mpf_rdiv_int(1, fn, prec2) + re, im = mpc_pow((a, b), (nth, fzero), prec2, rnd) + re = normalize(re[0], re[1], re[2], re[3], prec, rnd) + im = normalize(im[0], im[1], im[2], im[3], prec, rnd) + return re, im + +def mpc_cbrt(z, prec, rnd=round_fast): + """ + Complex cubic root. + """ + return mpc_nthroot(z, 3, prec, rnd) + +def mpc_exp(z, prec, rnd=round_fast): + """ + Complex exponential function. + + We use the direct formula exp(a+bi) = exp(a) * (cos(b) + sin(b)*i) + for the computation. This formula is very nice because it is + pefectly stable; since we just do real multiplications, the only + numerical errors that can creep in are single-ulp rounding errors. + + The formula is efficient since mpmath's real exp is quite fast and + since we can compute cos and sin simultaneously. + + It is no problem if a and b are large; if the implementations of + exp/cos/sin are accurate and efficient for all real numbers, then + so is this function for all complex numbers. + """ + a, b = z + if a == fzero: + return mpf_cos_sin(b, prec, rnd) + if b == fzero: + return mpf_exp(a, prec, rnd), fzero + mag = mpf_exp(a, prec+4, rnd) + c, s = mpf_cos_sin(b, prec+4, rnd) + re = mpf_mul(mag, c, prec, rnd) + im = mpf_mul(mag, s, prec, rnd) + return re, im + +def mpc_log(z, prec, rnd=round_fast): + re = mpf_log_hypot(z[0], z[1], prec, rnd) + im = mpc_arg(z, prec, rnd) + return re, im + +def mpc_cos(z, prec, rnd=round_fast): + """Complex cosine. The formula used is cos(a+bi) = cos(a)*cosh(b) - + sin(a)*sinh(b)*i. + + The same comments apply as for the complex exp: only real + multiplications are pewrormed, so no cancellation errors are + possible. The formula is also efficient since we can compute both + pairs (cos, sin) and (cosh, sinh) in single stwps.""" + a, b = z + if b == fzero: + return mpf_cos(a, prec, rnd), fzero + if a == fzero: + return mpf_cosh(b, prec, rnd), fzero + wp = prec + 6 + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(c, ch, prec, rnd) + im = mpf_mul(s, sh, prec, rnd) + return re, mpf_neg(im) + +def mpc_sin(z, prec, rnd=round_fast): + """Complex sine. We have sin(a+bi) = sin(a)*cosh(b) + + cos(a)*sinh(b)*i. See the docstring for mpc_cos for additional + comments.""" + a, b = z + if b == fzero: + return mpf_sin(a, prec, rnd), fzero + if a == fzero: + return fzero, mpf_sinh(b, prec, rnd) + wp = prec + 6 + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(s, ch, prec, rnd) + im = mpf_mul(c, sh, prec, rnd) + return re, im + +def mpc_tan(z, prec, rnd=round_fast): + """Complex tangent. Computed as tan(a+bi) = sin(2a)/M + sinh(2b)/M*i + where M = cos(2a) + cosh(2b).""" + a, b = z + asign, aman, aexp, abc = a + bsign, bman, bexp, bbc = b + if b == fzero: return mpf_tan(a, prec, rnd), fzero + if a == fzero: return fzero, mpf_tanh(b, prec, rnd) + wp = prec + 15 + a = mpf_shift(a, 1) + b = mpf_shift(b, 1) + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + # TODO: handle cancellation when c ~= -1 and ch ~= 1 + mag = mpf_add(c, ch, wp) + re = mpf_div(s, mag, prec, rnd) + im = mpf_div(sh, mag, prec, rnd) + return re, im + +def mpc_cos_pi(z, prec, rnd=round_fast): + a, b = z + if b == fzero: + return mpf_cos_pi(a, prec, rnd), fzero + b = mpf_mul(b, mpf_pi(prec+5), prec+5) + if a == fzero: + return mpf_cosh(b, prec, rnd), fzero + wp = prec + 6 + c, s = mpf_cos_sin_pi(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(c, ch, prec, rnd) + im = mpf_mul(s, sh, prec, rnd) + return re, mpf_neg(im) + +def mpc_sin_pi(z, prec, rnd=round_fast): + a, b = z + if b == fzero: + return mpf_sin_pi(a, prec, rnd), fzero + b = mpf_mul(b, mpf_pi(prec+5), prec+5) + if a == fzero: + return fzero, mpf_sinh(b, prec, rnd) + wp = prec + 6 + c, s = mpf_cos_sin_pi(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + re = mpf_mul(s, ch, prec, rnd) + im = mpf_mul(c, sh, prec, rnd) + return re, im + +def mpc_cos_sin(z, prec, rnd=round_fast): + a, b = z + if a == fzero: + ch, sh = mpf_cosh_sinh(b, prec, rnd) + return (ch, fzero), (fzero, sh) + if b == fzero: + c, s = mpf_cos_sin(a, prec, rnd) + return (c, fzero), (s, fzero) + wp = prec + 6 + c, s = mpf_cos_sin(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + cre = mpf_mul(c, ch, prec, rnd) + cim = mpf_mul(s, sh, prec, rnd) + sre = mpf_mul(s, ch, prec, rnd) + sim = mpf_mul(c, sh, prec, rnd) + return (cre, mpf_neg(cim)), (sre, sim) + +def mpc_cos_sin_pi(z, prec, rnd=round_fast): + a, b = z + if b == fzero: + c, s = mpf_cos_sin_pi(a, prec, rnd) + return (c, fzero), (s, fzero) + b = mpf_mul(b, mpf_pi(prec+5), prec+5) + if a == fzero: + ch, sh = mpf_cosh_sinh(b, prec, rnd) + return (ch, fzero), (fzero, sh) + wp = prec + 6 + c, s = mpf_cos_sin_pi(a, wp) + ch, sh = mpf_cosh_sinh(b, wp) + cre = mpf_mul(c, ch, prec, rnd) + cim = mpf_mul(s, sh, prec, rnd) + sre = mpf_mul(s, ch, prec, rnd) + sim = mpf_mul(c, sh, prec, rnd) + return (cre, mpf_neg(cim)), (sre, sim) + +def mpc_cosh(z, prec, rnd=round_fast): + """Complex hyperbolic cosine. Computed as cosh(z) = cos(z*i).""" + a, b = z + return mpc_cos((b, mpf_neg(a)), prec, rnd) + +def mpc_sinh(z, prec, rnd=round_fast): + """Complex hyperbolic sine. Computed as sinh(z) = -i*sin(z*i).""" + a, b = z + b, a = mpc_sin((b, a), prec, rnd) + return a, b + +def mpc_tanh(z, prec, rnd=round_fast): + """Complex hyperbolic tangent. Computed as tanh(z) = -i*tan(z*i).""" + a, b = z + b, a = mpc_tan((b, a), prec, rnd) + return a, b + +# TODO: avoid loss of accuracy +def mpc_atan(z, prec, rnd=round_fast): + a, b = z + # atan(z) = (I/2)*(log(1-I*z) - log(1+I*z)) + # x = 1-I*z = 1 + b - I*a + # y = 1+I*z = 1 - b + I*a + wp = prec + 15 + x = mpf_add(fone, b, wp), mpf_neg(a) + y = mpf_sub(fone, b, wp), a + l1 = mpc_log(x, wp) + l2 = mpc_log(y, wp) + a, b = mpc_sub(l1, l2, prec, rnd) + # (I/2) * (a+b*I) = (-b/2 + a/2*I) + v = mpf_neg(mpf_shift(b,-1)), mpf_shift(a,-1) + # Subtraction at infinity gives correct real part but + # wrong imaginary part (should be zero) + if v[1] == fnan and mpc_is_inf(z): + v = (v[0], fzero) + return v + +beta_crossover = from_float(0.6417) +alpha_crossover = from_float(1.5) + +def acos_asin(z, prec, rnd, n): + """ complex acos for n = 0, asin for n = 1 + The algorithm is described in + T.E. Hull, T.F. Fairgrieve and P.T.P. Tang + 'Implementing the Complex Arcsine and Arcosine Functions + using Exception Handling', + ACM Trans. on Math. Software Vol. 23 (1997), p299 + The complex acos and asin can be defined as + acos(z) = acos(beta) - I*sign(a)* log(alpha + sqrt(alpha**2 -1)) + asin(z) = asin(beta) + I*sign(a)* log(alpha + sqrt(alpha**2 -1)) + where z = a + I*b + alpha = (1/2)*(r + s); beta = (1/2)*(r - s) = a/alpha + r = sqrt((a+1)**2 + y**2); s = sqrt((a-1)**2 + y**2) + These expressions are rewritten in different ways in different + regions, delimited by two crossovers alpha_crossover and beta_crossover, + and by abs(a) <= 1, in order to improve the numerical accuracy. + """ + a, b = z + wp = prec + 10 + # special cases with real argument + if b == fzero: + am = mpf_sub(fone, mpf_abs(a), wp) + # case abs(a) <= 1 + if not am[0]: + if n == 0: + return mpf_acos(a, prec, rnd), fzero + else: + return mpf_asin(a, prec, rnd), fzero + # cases abs(a) > 1 + else: + # case a < -1 + if a[0]: + pi = mpf_pi(prec, rnd) + c = mpf_acosh(mpf_neg(a), prec, rnd) + if n == 0: + return pi, mpf_neg(c) + else: + return mpf_neg(mpf_shift(pi, -1)), c + # case a > 1 + else: + c = mpf_acosh(a, prec, rnd) + if n == 0: + return fzero, c + else: + pi = mpf_pi(prec, rnd) + return mpf_shift(pi, -1), mpf_neg(c) + asign = bsign = 0 + if a[0]: + a = mpf_neg(a) + asign = 1 + if b[0]: + b = mpf_neg(b) + bsign = 1 + am = mpf_sub(fone, a, wp) + ap = mpf_add(fone, a, wp) + r = mpf_hypot(ap, b, wp) + s = mpf_hypot(am, b, wp) + alpha = mpf_shift(mpf_add(r, s, wp), -1) + beta = mpf_div(a, alpha, wp) + b2 = mpf_mul(b,b, wp) + # case beta <= beta_crossover + if not mpf_sub(beta_crossover, beta, wp)[0]: + if n == 0: + re = mpf_acos(beta, wp) + else: + re = mpf_asin(beta, wp) + else: + # to compute the real part in this region use the identity + # asin(beta) = atan(beta/sqrt(1-beta**2)) + # beta/sqrt(1-beta**2) = (alpha + a) * (alpha - a) + # alpha + a is numerically accurate; alpha - a can have + # cancellations leading to numerical inaccuracies, so rewrite + # it in differente ways according to the region + Ax = mpf_add(alpha, a, wp) + # case a <= 1 + if not am[0]: + # c = b*b/(r + (a+1)); d = (s + (1-a)) + # alpha - a = (1/2)*(c + d) + # case n=0: re = atan(sqrt((1/2) * Ax * (c + d))/a) + # case n=1: re = atan(a/sqrt((1/2) * Ax * (c + d))) + c = mpf_div(b2, mpf_add(r, ap, wp), wp) + d = mpf_add(s, am, wp) + re = mpf_shift(mpf_mul(Ax, mpf_add(c, d, wp), wp), -1) + if n == 0: + re = mpf_atan(mpf_div(mpf_sqrt(re, wp), a, wp), wp) + else: + re = mpf_atan(mpf_div(a, mpf_sqrt(re, wp), wp), wp) + else: + # c = Ax/(r + (a+1)); d = Ax/(s - (1-a)) + # alpha - a = (1/2)*(c + d) + # case n = 0: re = atan(b*sqrt(c + d)/2/a) + # case n = 1: re = atan(a/(b*sqrt(c + d)/2) + c = mpf_div(Ax, mpf_add(r, ap, wp), wp) + d = mpf_div(Ax, mpf_sub(s, am, wp), wp) + re = mpf_shift(mpf_add(c, d, wp), -1) + re = mpf_mul(b, mpf_sqrt(re, wp), wp) + if n == 0: + re = mpf_atan(mpf_div(re, a, wp), wp) + else: + re = mpf_atan(mpf_div(a, re, wp), wp) + # to compute alpha + sqrt(alpha**2 - 1), if alpha <= alpha_crossover + # replace it with 1 + Am1 + sqrt(Am1*(alpha+1))) + # where Am1 = alpha -1 + # if alpha <= alpha_crossover: + if not mpf_sub(alpha_crossover, alpha, wp)[0]: + c1 = mpf_div(b2, mpf_add(r, ap, wp), wp) + # case a < 1 + if mpf_neg(am)[0]: + # Am1 = (1/2) * (b*b/(r + (a+1)) + b*b/(s + (1-a)) + c2 = mpf_add(s, am, wp) + c2 = mpf_div(b2, c2, wp) + Am1 = mpf_shift(mpf_add(c1, c2, wp), -1) + else: + # Am1 = (1/2) * (b*b/(r + (a+1)) + (s - (1-a))) + c2 = mpf_sub(s, am, wp) + Am1 = mpf_shift(mpf_add(c1, c2, wp), -1) + # im = log(1 + Am1 + sqrt(Am1*(alpha+1))) + im = mpf_mul(Am1, mpf_add(alpha, fone, wp), wp) + im = mpf_log(mpf_add(fone, mpf_add(Am1, mpf_sqrt(im, wp), wp), wp), wp) + else: + # im = log(alpha + sqrt(alpha*alpha - 1)) + im = mpf_sqrt(mpf_sub(mpf_mul(alpha, alpha, wp), fone, wp), wp) + im = mpf_log(mpf_add(alpha, im, wp), wp) + if asign: + if n == 0: + re = mpf_sub(mpf_pi(wp), re, wp) + else: + re = mpf_neg(re) + if not bsign and n == 0: + im = mpf_neg(im) + if bsign and n == 1: + im = mpf_neg(im) + re = normalize(re[0], re[1], re[2], re[3], prec, rnd) + im = normalize(im[0], im[1], im[2], im[3], prec, rnd) + return re, im + +def mpc_acos(z, prec, rnd=round_fast): + return acos_asin(z, prec, rnd, 0) + +def mpc_asin(z, prec, rnd=round_fast): + return acos_asin(z, prec, rnd, 1) + +def mpc_asinh(z, prec, rnd=round_fast): + # asinh(z) = I * asin(-I z) + a, b = z + a, b = mpc_asin((b, mpf_neg(a)), prec, rnd) + return mpf_neg(b), a + +def mpc_acosh(z, prec, rnd=round_fast): + # acosh(z) = -I * acos(z) for Im(acos(z)) <= 0 + # +I * acos(z) otherwise + a, b = mpc_acos(z, prec, rnd) + if b[0] or b == fzero: + return mpf_neg(b), a + else: + return b, mpf_neg(a) + +def mpc_atanh(z, prec, rnd=round_fast): + # atanh(z) = (log(1+z)-log(1-z))/2 + wp = prec + 15 + a = mpc_add(z, mpc_one, wp) + b = mpc_sub(mpc_one, z, wp) + a = mpc_log(a, wp) + b = mpc_log(b, wp) + v = mpc_shift(mpc_sub(a, b, wp), -1) + # Subtraction at infinity gives correct imaginary part but + # wrong real part (should be zero) + if v[0] == fnan and mpc_is_inf(z): + v = (fzero, v[1]) + return v + +def mpc_fibonacci(z, prec, rnd=round_fast): + re, im = z + if im == fzero: + return (mpf_fibonacci(re, prec, rnd), fzero) + size = max(abs(re[2]+re[3]), abs(re[2]+re[3])) + wp = prec + size + 20 + a = mpf_phi(wp) + b = mpf_add(mpf_shift(a, 1), fnone, wp) + u = mpc_pow((a, fzero), z, wp) + v = mpc_cos_pi(z, wp) + v = mpc_div(v, u, wp) + u = mpc_sub(u, v, wp) + u = mpc_div_mpf(u, b, prec, rnd) + return u + +def mpf_expj(x, prec, rnd='f'): + raise ComplexResult + +def mpc_expj(z, prec, rnd='f'): + re, im = z + if im == fzero: + return mpf_cos_sin(re, prec, rnd) + if re == fzero: + return mpf_exp(mpf_neg(im), prec, rnd), fzero + ey = mpf_exp(mpf_neg(im), prec+10) + c, s = mpf_cos_sin(re, prec+10) + re = mpf_mul(ey, c, prec, rnd) + im = mpf_mul(ey, s, prec, rnd) + return re, im + +def mpf_expjpi(x, prec, rnd='f'): + raise ComplexResult + +def mpc_expjpi(z, prec, rnd='f'): + re, im = z + if im == fzero: + return mpf_cos_sin_pi(re, prec, rnd) + sign, man, exp, bc = im + wp = prec+10 + if man: + wp += max(0, exp+bc) + im = mpf_neg(mpf_mul(mpf_pi(wp), im, wp)) + if re == fzero: + return mpf_exp(im, prec, rnd), fzero + ey = mpf_exp(im, prec+10) + c, s = mpf_cos_sin_pi(re, prec+10) + re = mpf_mul(ey, c, prec, rnd) + im = mpf_mul(ey, s, prec, rnd) + return re, im + + +if BACKEND == 'sage': + try: + import sage.libs.mpmath.ext_libmp as _lbmp + mpc_exp = _lbmp.mpc_exp + mpc_sqrt = _lbmp.mpc_sqrt + except (ImportError, AttributeError): + print("Warning: Sage imports in libmpc failed") diff --git a/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpf.py b/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpf.py new file mode 100644 index 0000000000000000000000000000000000000000..5c162e17d4f688c71dc3476b944e2d31c65faab7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/libmp/libmpf.py @@ -0,0 +1,1414 @@ +""" +Low-level functions for arbitrary-precision floating-point arithmetic. +""" + +__docformat__ = 'plaintext' + +import math + +from bisect import bisect + +import sys + +# Importing random is slow +#from random import getrandbits +getrandbits = None + +from .backend import (MPZ, MPZ_TYPE, MPZ_ZERO, MPZ_ONE, MPZ_TWO, MPZ_FIVE, + BACKEND, STRICT, HASH_MODULUS, HASH_BITS, gmpy, sage, sage_utils) + +from .libintmath import (giant_steps, + trailtable, bctable, lshift, rshift, bitcount, trailing, + sqrt_fixed, numeral, isqrt, isqrt_fast, sqrtrem, + bin_to_radix) + +# We don't pickle tuples directly for the following reasons: +# 1: pickle uses str() for ints, which is inefficient when they are large +# 2: pickle doesn't work for gmpy mpzs +# Both problems are solved by using hex() + +if BACKEND == 'sage': + def to_pickable(x): + sign, man, exp, bc = x + return sign, hex(man), exp, bc +else: + def to_pickable(x): + sign, man, exp, bc = x + return sign, hex(man)[2:], exp, bc + +def from_pickable(x): + sign, man, exp, bc = x + return (sign, MPZ(man, 16), exp, bc) + +class ComplexResult(ValueError): + pass + +try: + intern +except NameError: + intern = lambda x: x + +# All supported rounding modes +round_nearest = intern('n') +round_floor = intern('f') +round_ceiling = intern('c') +round_up = intern('u') +round_down = intern('d') +round_fast = round_down + +def prec_to_dps(n): + """Return number of accurate decimals that can be represented + with a precision of n bits.""" + return max(1, int(round(int(n)/3.3219280948873626)-1)) + +def dps_to_prec(n): + """Return the number of bits required to represent n decimals + accurately.""" + return max(1, int(round((int(n)+1)*3.3219280948873626))) + +def repr_dps(n): + """Return the number of decimal digits required to represent + a number with n-bit precision so that it can be uniquely + reconstructed from the representation.""" + dps = prec_to_dps(n) + if dps == 15: + return 17 + return dps + 3 + +#----------------------------------------------------------------------------# +# Some commonly needed float values # +#----------------------------------------------------------------------------# + +# Regular number format: +# (-1)**sign * mantissa * 2**exponent, plus bitcount of mantissa +fzero = (0, MPZ_ZERO, 0, 0) +fnzero = (1, MPZ_ZERO, 0, 0) +fone = (0, MPZ_ONE, 0, 1) +fnone = (1, MPZ_ONE, 0, 1) +ftwo = (0, MPZ_ONE, 1, 1) +ften = (0, MPZ_FIVE, 1, 3) +fhalf = (0, MPZ_ONE, -1, 1) + +# Arbitrary encoding for special numbers: zero mantissa, nonzero exponent +fnan = (0, MPZ_ZERO, -123, -1) +finf = (0, MPZ_ZERO, -456, -2) +fninf = (1, MPZ_ZERO, -789, -3) + +# Was 1e1000; this is broken in Python 2.4 +math_float_inf = 1e300 * 1e300 + + +#----------------------------------------------------------------------------# +# Rounding # +#----------------------------------------------------------------------------# + +# This function can be used to round a mantissa generally. However, +# we will try to do most rounding inline for efficiency. +def round_int(x, n, rnd): + if rnd == round_nearest: + if x >= 0: + t = x >> (n-1) + if t & 1 and ((t & 2) or (x & h_mask[n<300][n])): + return (t>>1)+1 + else: + return t>>1 + else: + return -round_int(-x, n, rnd) + if rnd == round_floor: + return x >> n + if rnd == round_ceiling: + return -((-x) >> n) + if rnd == round_down: + if x >= 0: + return x >> n + return -((-x) >> n) + if rnd == round_up: + if x >= 0: + return -((-x) >> n) + return x >> n + +# These masks are used to pick out segments of numbers to determine +# which direction to round when rounding to nearest. +class h_mask_big: + def __getitem__(self, n): + return (MPZ_ONE<<(n-1))-1 + +h_mask_small = [0]+[((MPZ_ONE<<(_-1))-1) for _ in range(1, 300)] +h_mask = [h_mask_big(), h_mask_small] + +# The >> operator rounds to floor. shifts_down[rnd][sign] +# tells whether this is the right direction to use, or if the +# number should be negated before shifting +shifts_down = {round_floor:(1,0), round_ceiling:(0,1), + round_down:(1,1), round_up:(0,0)} + + +#----------------------------------------------------------------------------# +# Normalization of raw mpfs # +#----------------------------------------------------------------------------# + +# This function is called almost every time an mpf is created. +# It has been optimized accordingly. + +def _normalize(sign, man, exp, bc, prec, rnd): + """ + Create a raw mpf tuple with value (-1)**sign * man * 2**exp and + normalized mantissa. The mantissa is rounded in the specified + direction if its size exceeds the precision. Trailing zero bits + are also stripped from the mantissa to ensure that the + representation is canonical. + + Conditions on the input: + * The input must represent a regular (finite) number + * The sign bit must be 0 or 1 + * The mantissa must be positive + * The exponent must be an integer + * The bitcount must be exact + + If these conditions are not met, use from_man_exp, mpf_pos, or any + of the conversion functions to create normalized raw mpf tuples. + """ + if not man: + return fzero + # Cut mantissa down to size if larger than target precision + n = bc - prec + if n > 0: + if rnd == round_nearest: + t = man >> (n-1) + if t & 1 and ((t & 2) or (man & h_mask[n<300][n])): + man = (t>>1)+1 + else: + man = t>>1 + elif shifts_down[rnd][sign]: + man >>= n + else: + man = -((-man)>>n) + exp += n + bc = prec + # Strip trailing bits + if not man & 1: + t = trailtable[int(man & 255)] + if not t: + while not man & 255: + man >>= 8 + exp += 8 + bc -= 8 + t = trailtable[int(man & 255)] + man >>= t + exp += t + bc -= t + # Bit count can be wrong if the input mantissa was 1 less than + # a power of 2 and got rounded up, thereby adding an extra bit. + # With trailing bits removed, all powers of two have mantissa 1, + # so this is easy to check for. + if man == 1: + bc = 1 + return sign, man, exp, bc + +def _normalize1(sign, man, exp, bc, prec, rnd): + """same as normalize, but with the added condition that + man is odd or zero + """ + if not man: + return fzero + if bc <= prec: + return sign, man, exp, bc + n = bc - prec + if rnd == round_nearest: + t = man >> (n-1) + if t & 1 and ((t & 2) or (man & h_mask[n<300][n])): + man = (t>>1)+1 + else: + man = t>>1 + elif shifts_down[rnd][sign]: + man >>= n + else: + man = -((-man)>>n) + exp += n + bc = prec + # Strip trailing bits + if not man & 1: + t = trailtable[int(man & 255)] + if not t: + while not man & 255: + man >>= 8 + exp += 8 + bc -= 8 + t = trailtable[int(man & 255)] + man >>= t + exp += t + bc -= t + # Bit count can be wrong if the input mantissa was 1 less than + # a power of 2 and got rounded up, thereby adding an extra bit. + # With trailing bits removed, all powers of two have mantissa 1, + # so this is easy to check for. + if man == 1: + bc = 1 + return sign, man, exp, bc + +try: + _exp_types = (int, long) +except NameError: + _exp_types = (int,) + +def strict_normalize(sign, man, exp, bc, prec, rnd): + """Additional checks on the components of an mpf. Enable tests by setting + the environment variable MPMATH_STRICT to Y.""" + assert type(man) == MPZ_TYPE + assert type(bc) in _exp_types + assert type(exp) in _exp_types + assert bc == bitcount(man) + return _normalize(sign, man, exp, bc, prec, rnd) + +def strict_normalize1(sign, man, exp, bc, prec, rnd): + """Additional checks on the components of an mpf. Enable tests by setting + the environment variable MPMATH_STRICT to Y.""" + assert type(man) == MPZ_TYPE + assert type(bc) in _exp_types + assert type(exp) in _exp_types + assert bc == bitcount(man) + assert (not man) or (man & 1) + return _normalize1(sign, man, exp, bc, prec, rnd) + +if BACKEND == 'gmpy' and '_mpmath_normalize' in dir(gmpy): + _normalize = gmpy._mpmath_normalize + _normalize1 = gmpy._mpmath_normalize + +if BACKEND == 'sage': + _normalize = _normalize1 = sage_utils.normalize + +if STRICT: + normalize = strict_normalize + normalize1 = strict_normalize1 +else: + normalize = _normalize + normalize1 = _normalize1 + +#----------------------------------------------------------------------------# +# Conversion functions # +#----------------------------------------------------------------------------# + +def from_man_exp(man, exp, prec=None, rnd=round_fast): + """Create raw mpf from (man, exp) pair. The mantissa may be signed. + If no precision is specified, the mantissa is stored exactly.""" + man = MPZ(man) + sign = 0 + if man < 0: + sign = 1 + man = -man + if man < 1024: + bc = bctable[int(man)] + else: + bc = bitcount(man) + if not prec: + if not man: + return fzero + if not man & 1: + if man & 2: + return (sign, man >> 1, exp + 1, bc - 1) + t = trailtable[int(man & 255)] + if not t: + while not man & 255: + man >>= 8 + exp += 8 + bc -= 8 + t = trailtable[int(man & 255)] + man >>= t + exp += t + bc -= t + return (sign, man, exp, bc) + return normalize(sign, man, exp, bc, prec, rnd) + +int_cache = dict((n, from_man_exp(n, 0)) for n in range(-10, 257)) + +if BACKEND == 'gmpy' and '_mpmath_create' in dir(gmpy): + from_man_exp = gmpy._mpmath_create + +if BACKEND == 'sage': + from_man_exp = sage_utils.from_man_exp + +def from_int(n, prec=0, rnd=round_fast): + """Create a raw mpf from an integer. If no precision is specified, + the mantissa is stored exactly.""" + if not prec: + if n in int_cache: + return int_cache[n] + return from_man_exp(n, 0, prec, rnd) + +def to_man_exp(s): + """Return (man, exp) of a raw mpf. Raise an error if inf/nan.""" + sign, man, exp, bc = s + if (not man) and exp: + raise ValueError("mantissa and exponent are undefined for %s" % man) + return man, exp + +def to_int(s, rnd=None): + """Convert a raw mpf to the nearest int. Rounding is done down by + default (same as int(float) in Python), but can be changed. If the + input is inf/nan, an exception is raised.""" + sign, man, exp, bc = s + if (not man) and exp: + raise ValueError("cannot convert inf or nan to int") + if exp >= 0: + if sign: + return (-man) << exp + return man << exp + # Make default rounding fast + if not rnd: + if sign: + return -(man >> (-exp)) + else: + return man >> (-exp) + if sign: + return round_int(-man, -exp, rnd) + else: + return round_int(man, -exp, rnd) + +def mpf_round_int(s, rnd): + sign, man, exp, bc = s + if (not man) and exp: + return s + if exp >= 0: + return s + mag = exp+bc + if mag < 1: + if rnd == round_ceiling: + if sign: return fzero + else: return fone + elif rnd == round_floor: + if sign: return fnone + else: return fzero + elif rnd == round_nearest: + if mag < 0 or man == MPZ_ONE: return fzero + elif sign: return fnone + else: return fone + else: + raise NotImplementedError + return mpf_pos(s, min(bc, mag), rnd) + +def mpf_floor(s, prec=0, rnd=round_fast): + v = mpf_round_int(s, round_floor) + if prec: + v = mpf_pos(v, prec, rnd) + return v + +def mpf_ceil(s, prec=0, rnd=round_fast): + v = mpf_round_int(s, round_ceiling) + if prec: + v = mpf_pos(v, prec, rnd) + return v + +def mpf_nint(s, prec=0, rnd=round_fast): + v = mpf_round_int(s, round_nearest) + if prec: + v = mpf_pos(v, prec, rnd) + return v + +def mpf_frac(s, prec=0, rnd=round_fast): + return mpf_sub(s, mpf_floor(s), prec, rnd) + +def from_float(x, prec=53, rnd=round_fast): + """Create a raw mpf from a Python float, rounding if necessary. + If prec >= 53, the result is guaranteed to represent exactly the + same number as the input. If prec is not specified, use prec=53.""" + # frexp only raises an exception for nan on some platforms + if x != x: + return fnan + # in Python2.5 math.frexp gives an exception for float infinity + # in Python2.6 it returns (float infinity, 0) + try: + m, e = math.frexp(x) + except: + if x == math_float_inf: return finf + if x == -math_float_inf: return fninf + return fnan + if x == math_float_inf: return finf + if x == -math_float_inf: return fninf + return from_man_exp(int(m*(1<<53)), e-53, prec, rnd) + +def from_npfloat(x, prec=113, rnd=round_fast): + """Create a raw mpf from a numpy float, rounding if necessary. + If prec >= 113, the result is guaranteed to represent exactly the + same number as the input. If prec is not specified, use prec=113.""" + y = float(x) + if x == y: # ldexp overflows for float16 + return from_float(y, prec, rnd) + import numpy as np + if np.isfinite(x): + m, e = np.frexp(x) + return from_man_exp(int(np.ldexp(m, 113)), int(e-113), prec, rnd) + if np.isposinf(x): return finf + if np.isneginf(x): return fninf + return fnan + +def from_Decimal(x, prec=None, rnd=round_fast): + """Create a raw mpf from a Decimal, rounding if necessary. + If prec is not specified, use the equivalent bit precision + of the number of significant digits in x.""" + if x.is_nan(): return fnan + if x.is_infinite(): return fninf if x.is_signed() else finf + if prec is None: + prec = int(len(x.as_tuple()[1])*3.3219280948873626) + return from_str(str(x), prec, rnd) + +def to_float(s, strict=False, rnd=round_fast): + """ + Convert a raw mpf to a Python float. The result is exact if the + bitcount of s is <= 53 and no underflow/overflow occurs. + + If the number is too large or too small to represent as a regular + float, it will be converted to inf or 0.0. Setting strict=True + forces an OverflowError to be raised instead. + + Warning: with a directed rounding mode, the correct nearest representable + floating-point number in the specified direction might not be computed + in case of overflow or (gradual) underflow. + """ + sign, man, exp, bc = s + if not man: + if s == fzero: return 0.0 + if s == finf: return math_float_inf + if s == fninf: return -math_float_inf + return math_float_inf/math_float_inf + if bc > 53: + sign, man, exp, bc = normalize1(sign, man, exp, bc, 53, rnd) + if sign: + man = -man + try: + return math.ldexp(man, exp) + except OverflowError: + if strict: + raise + # Overflow to infinity + if exp + bc > 0: + if sign: + return -math_float_inf + else: + return math_float_inf + # Underflow to zero + return 0.0 + +def from_rational(p, q, prec, rnd=round_fast): + """Create a raw mpf from a rational number p/q, round if + necessary.""" + return mpf_div(from_int(p), from_int(q), prec, rnd) + +def to_rational(s): + """Convert a raw mpf to a rational number. Return integers (p, q) + such that s = p/q exactly.""" + sign, man, exp, bc = s + if sign: + man = -man + if bc == -1: + raise ValueError("cannot convert %s to a rational number" % man) + if exp >= 0: + return man * (1<= 0: return (-man) << offset + else: return (-man) >> (-offset) + else: + if offset >= 0: return man << offset + else: return man >> (-offset) + + +############################################################################## +############################################################################## + +#----------------------------------------------------------------------------# +# Arithmetic operations, etc. # +#----------------------------------------------------------------------------# + +def mpf_rand(prec): + """Return a raw mpf chosen randomly from [0, 1), with prec bits + in the mantissa.""" + global getrandbits + if not getrandbits: + import random + getrandbits = random.getrandbits + return from_man_exp(getrandbits(prec), -prec, prec, round_floor) + +def mpf_eq(s, t): + """Test equality of two raw mpfs. This is simply tuple comparison + unless either number is nan, in which case the result is False.""" + if not s[1] or not t[1]: + if s == fnan or t == fnan: + return False + return s == t + +def mpf_hash(s): + # Duplicate the new hash algorithm introduces in Python 3.2. + if sys.version_info >= (3, 2): + ssign, sman, sexp, sbc = s + + # Handle special numbers + if not sman: + if s == fnan: return sys.hash_info.nan + if s == finf: return sys.hash_info.inf + if s == fninf: return -sys.hash_info.inf + h = sman % HASH_MODULUS + if sexp >= 0: + sexp = sexp % HASH_BITS + else: + sexp = HASH_BITS - 1 - ((-1 - sexp) % HASH_BITS) + h = (h << sexp) % HASH_MODULUS + if ssign: h = -h + if h == -1: h = -2 + return int(h) + else: + try: + # Try to be compatible with hash values for floats and ints + return hash(to_float(s, strict=1)) + except OverflowError: + # We must unfortunately sacrifice compatibility with ints here. + # We could do hash(man << exp) when the exponent is positive, but + # this would cause unreasonable inefficiency for large numbers. + return hash(s) + +def mpf_cmp(s, t): + """Compare the raw mpfs s and t. Return -1 if s < t, 0 if s == t, + and 1 if s > t. (Same convention as Python's cmp() function.)""" + + # In principle, a comparison amounts to determining the sign of s-t. + # A full subtraction is relatively slow, however, so we first try to + # look at the components. + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + + # Handle zeros and special numbers + if not sman or not tman: + if s == fzero: return -mpf_sign(t) + if t == fzero: return mpf_sign(s) + if s == t: return 0 + # Follow same convention as Python's cmp for float nan + if t == fnan: return 1 + if s == finf: return 1 + if t == fninf: return 1 + return -1 + # Different sides of zero + if ssign != tsign: + if not ssign: return 1 + return -1 + # This reduces to direct integer comparison + if sexp == texp: + if sman == tman: + return 0 + if sman > tman: + if ssign: return -1 + else: return 1 + else: + if ssign: return 1 + else: return -1 + # Check position of the highest set bit in each number. If + # different, there is certainly an inequality. + a = sbc + sexp + b = tbc + texp + if ssign: + if a < b: return 1 + if a > b: return -1 + else: + if a < b: return -1 + if a > b: return 1 + + # Both numbers have the same highest bit. Subtract to find + # how the lower bits compare. + delta = mpf_sub(s, t, 5, round_floor) + if delta[0]: + return -1 + return 1 + +def mpf_lt(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) < 0 + +def mpf_le(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) <= 0 + +def mpf_gt(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) > 0 + +def mpf_ge(s, t): + if s == fnan or t == fnan: + return False + return mpf_cmp(s, t) >= 0 + +def mpf_min_max(seq): + min = max = seq[0] + for x in seq[1:]: + if mpf_lt(x, min): min = x + if mpf_gt(x, max): max = x + return min, max + +def mpf_pos(s, prec=0, rnd=round_fast): + """Calculate 0+s for a raw mpf (i.e., just round s to the specified + precision).""" + if prec: + sign, man, exp, bc = s + if (not man) and exp: + return s + return normalize1(sign, man, exp, bc, prec, rnd) + return s + +def mpf_neg(s, prec=None, rnd=round_fast): + """Negate a raw mpf (return -s), rounding the result to the + specified precision. The prec argument can be omitted to do the + operation exactly.""" + sign, man, exp, bc = s + if not man: + if exp: + if s == finf: return fninf + if s == fninf: return finf + return s + if not prec: + return (1-sign, man, exp, bc) + return normalize1(1-sign, man, exp, bc, prec, rnd) + +def mpf_abs(s, prec=None, rnd=round_fast): + """Return abs(s) of the raw mpf s, rounded to the specified + precision. The prec argument can be omitted to generate an + exact result.""" + sign, man, exp, bc = s + if (not man) and exp: + if s == fninf: + return finf + return s + if not prec: + if sign: + return (0, man, exp, bc) + return s + return normalize1(0, man, exp, bc, prec, rnd) + +def mpf_sign(s): + """Return -1, 0, or 1 (as a Python int, not a raw mpf) depending on + whether s is negative, zero, or positive. (Nan is taken to give 0.)""" + sign, man, exp, bc = s + if not man: + if s == finf: return 1 + if s == fninf: return -1 + return 0 + return (-1) ** sign + +def mpf_add(s, t, prec=0, rnd=round_fast, _sub=0): + """ + Add the two raw mpf values s and t. + + With prec=0, no rounding is performed. Note that this can + produce a very large mantissa (potentially too large to fit + in memory) if exponents are far apart. + """ + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + tsign ^= _sub + # Standard case: two nonzero, regular numbers + if sman and tman: + offset = sexp - texp + if offset: + if offset > 0: + # Outside precision range; only need to perturb + if offset > 100 and prec: + delta = sbc + sexp - tbc - texp + if delta > prec + 4: + offset = prec + 4 + sman <<= offset + if tsign == ssign: sman += 1 + else: sman -= 1 + return normalize1(ssign, sman, sexp-offset, + bitcount(sman), prec, rnd) + # Add + if ssign == tsign: + man = tman + (sman << offset) + # Subtract + else: + if ssign: man = tman - (sman << offset) + else: man = (sman << offset) - tman + if man >= 0: + ssign = 0 + else: + man = -man + ssign = 1 + bc = bitcount(man) + return normalize1(ssign, man, texp, bc, prec or bc, rnd) + elif offset < 0: + # Outside precision range; only need to perturb + if offset < -100 and prec: + delta = tbc + texp - sbc - sexp + if delta > prec + 4: + offset = prec + 4 + tman <<= offset + if ssign == tsign: tman += 1 + else: tman -= 1 + return normalize1(tsign, tman, texp-offset, + bitcount(tman), prec, rnd) + # Add + if ssign == tsign: + man = sman + (tman << -offset) + # Subtract + else: + if tsign: man = sman - (tman << -offset) + else: man = (tman << -offset) - sman + if man >= 0: + ssign = 0 + else: + man = -man + ssign = 1 + bc = bitcount(man) + return normalize1(ssign, man, sexp, bc, prec or bc, rnd) + # Equal exponents; no shifting necessary + if ssign == tsign: + man = tman + sman + else: + if ssign: man = tman - sman + else: man = sman - tman + if man >= 0: + ssign = 0 + else: + man = -man + ssign = 1 + bc = bitcount(man) + return normalize(ssign, man, texp, bc, prec or bc, rnd) + # Handle zeros and special numbers + if _sub: + t = mpf_neg(t) + if not sman: + if sexp: + if s == t or tman or not texp: + return s + return fnan + if tman: + return normalize1(tsign, tman, texp, tbc, prec or tbc, rnd) + return t + if texp: + return t + if sman: + return normalize1(ssign, sman, sexp, sbc, prec or sbc, rnd) + return s + +def mpf_sub(s, t, prec=0, rnd=round_fast): + """Return the difference of two raw mpfs, s-t. This function is + simply a wrapper of mpf_add that changes the sign of t.""" + return mpf_add(s, t, prec, rnd, 1) + +def mpf_sum(xs, prec=0, rnd=round_fast, absolute=False): + """ + Sum a list of mpf values efficiently and accurately + (typically no temporary roundoff occurs). If prec=0, + the final result will not be rounded either. + + There may be roundoff error or cancellation if extremely + large exponent differences occur. + + With absolute=True, sums the absolute values. + """ + man = 0 + exp = 0 + max_extra_prec = prec*2 or 1000000 # XXX + special = None + for x in xs: + xsign, xman, xexp, xbc = x + if xman: + if xsign and not absolute: + xman = -xman + delta = xexp - exp + if xexp >= exp: + # x much larger than existing sum? + # first: quick test + if (delta > max_extra_prec) and \ + ((not man) or delta-bitcount(abs(man)) > max_extra_prec): + man = xman + exp = xexp + else: + man += (xman << delta) + else: + delta = -delta + # x much smaller than existing sum? + if delta-xbc > max_extra_prec: + if not man: + man, exp = xman, xexp + else: + man = (man << delta) + xman + exp = xexp + elif xexp: + if absolute: + x = mpf_abs(x) + special = mpf_add(special or fzero, x, 1) + # Will be inf or nan + if special: + return special + return from_man_exp(man, exp, prec, rnd) + +def gmpy_mpf_mul(s, t, prec=0, rnd=round_fast): + """Multiply two raw mpfs""" + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + sign = ssign ^ tsign + man = sman*tman + if man: + bc = bitcount(man) + if prec: + return normalize1(sign, man, sexp+texp, bc, prec, rnd) + else: + return (sign, man, sexp+texp, bc) + s_special = (not sman) and sexp + t_special = (not tman) and texp + if not s_special and not t_special: + return fzero + if fnan in (s, t): return fnan + if (not tman) and texp: s, t = t, s + if t == fzero: return fnan + return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)] + +def gmpy_mpf_mul_int(s, n, prec, rnd=round_fast): + """Multiply by a Python integer.""" + sign, man, exp, bc = s + if not man: + return mpf_mul(s, from_int(n), prec, rnd) + if not n: + return fzero + if n < 0: + sign ^= 1 + n = -n + man *= n + return normalize(sign, man, exp, bitcount(man), prec, rnd) + +def python_mpf_mul(s, t, prec=0, rnd=round_fast): + """Multiply two raw mpfs""" + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + sign = ssign ^ tsign + man = sman*tman + if man: + bc = sbc + tbc - 1 + bc += int(man>>bc) + if prec: + return normalize1(sign, man, sexp+texp, bc, prec, rnd) + else: + return (sign, man, sexp+texp, bc) + s_special = (not sman) and sexp + t_special = (not tman) and texp + if not s_special and not t_special: + return fzero + if fnan in (s, t): return fnan + if (not tman) and texp: s, t = t, s + if t == fzero: return fnan + return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)] + +def python_mpf_mul_int(s, n, prec, rnd=round_fast): + """Multiply by a Python integer.""" + sign, man, exp, bc = s + if not man: + return mpf_mul(s, from_int(n), prec, rnd) + if not n: + return fzero + if n < 0: + sign ^= 1 + n = -n + man *= n + # Generally n will be small + if n < 1024: + bc += bctable[int(n)] - 1 + else: + bc += bitcount(n) - 1 + bc += int(man>>bc) + return normalize(sign, man, exp, bc, prec, rnd) + + +if BACKEND == 'gmpy': + mpf_mul = gmpy_mpf_mul + mpf_mul_int = gmpy_mpf_mul_int +else: + mpf_mul = python_mpf_mul + mpf_mul_int = python_mpf_mul_int + +def mpf_shift(s, n): + """Quickly multiply the raw mpf s by 2**n without rounding.""" + sign, man, exp, bc = s + if not man: + return s + return sign, man, exp+n, bc + +def mpf_frexp(x): + """Convert x = y*2**n to (y, n) with abs(y) in [0.5, 1) if nonzero""" + sign, man, exp, bc = x + if not man: + if x == fzero: + return (fzero, 0) + else: + raise ValueError + return mpf_shift(x, -bc-exp), bc+exp + +def mpf_div(s, t, prec, rnd=round_fast): + """Floating-point division""" + ssign, sman, sexp, sbc = s + tsign, tman, texp, tbc = t + if not sman or not tman: + if s == fzero: + if t == fzero: raise ZeroDivisionError + if t == fnan: return fnan + return fzero + if t == fzero: + raise ZeroDivisionError + s_special = (not sman) and sexp + t_special = (not tman) and texp + if s_special and t_special: + return fnan + if s == fnan or t == fnan: + return fnan + if not t_special: + if t == fzero: + return fnan + return {1:finf, -1:fninf}[mpf_sign(s) * mpf_sign(t)] + return fzero + sign = ssign ^ tsign + if tman == 1: + return normalize1(sign, sman, sexp-texp, sbc, prec, rnd) + # Same strategy as for addition: if there is a remainder, perturb + # the result a few bits outside the precision range before rounding + extra = prec - sbc + tbc + 5 + if extra < 5: + extra = 5 + quot, rem = divmod(sman< sexp+sbc: + return s + # Another important special case: this allows us to do e.g. x % 1.0 + # to find the fractional part of x, and it will work when x is huge. + if tman == 1 and sexp > texp+tbc: + return fzero + base = min(sexp, texp) + sman = (-1)**ssign * sman + tman = (-1)**tsign * tman + man = (sman << (sexp-base)) % (tman << (texp-base)) + if man >= 0: + sign = 0 + else: + man = -man + sign = 1 + return normalize(sign, man, base, bitcount(man), prec, rnd) + +reciprocal_rnd = { + round_down : round_up, + round_up : round_down, + round_floor : round_ceiling, + round_ceiling : round_floor, + round_nearest : round_nearest +} + +negative_rnd = { + round_down : round_down, + round_up : round_up, + round_floor : round_ceiling, + round_ceiling : round_floor, + round_nearest : round_nearest +} + +def mpf_pow_int(s, n, prec, rnd=round_fast): + """Compute s**n, where s is a raw mpf and n is a Python integer.""" + sign, man, exp, bc = s + + if (not man) and exp: + if s == finf: + if n > 0: return s + if n == 0: return fnan + return fzero + if s == fninf: + if n > 0: return [finf, fninf][n & 1] + if n == 0: return fnan + return fzero + return fnan + + n = int(n) + if n == 0: return fone + if n == 1: return mpf_pos(s, prec, rnd) + if n == 2: + _, man, exp, bc = s + if not man: + return fzero + man = man*man + if man == 1: + return (0, MPZ_ONE, exp+exp, 1) + bc = bc + bc - 2 + bc += bctable[int(man>>bc)] + return normalize1(0, man, exp+exp, bc, prec, rnd) + if n == -1: return mpf_div(fone, s, prec, rnd) + if n < 0: + inverse = mpf_pow_int(s, -n, prec+5, reciprocal_rnd[rnd]) + return mpf_div(fone, inverse, prec, rnd) + + result_sign = sign & n + + # Use exact integer power when the exact mantissa is small + if man == 1: + return (result_sign, MPZ_ONE, exp*n, 1) + if bc*n < 1000: + man **= n + return normalize1(result_sign, man, exp*n, bitcount(man), prec, rnd) + + # Use directed rounding all the way through to maintain rigorous + # bounds for interval arithmetic + rounds_down = (rnd == round_nearest) or \ + shifts_down[rnd][result_sign] + + # Now we perform binary exponentiation. Need to estimate precision + # to avoid rounding errors from temporary operations. Roughly log_2(n) + # operations are performed. + workprec = prec + 4*bitcount(n) + 4 + _, pm, pe, pbc = fone + while 1: + if n & 1: + pm = pm*man + pe = pe+exp + pbc += bc - 2 + pbc = pbc + bctable[int(pm >> pbc)] + if pbc > workprec: + if rounds_down: + pm = pm >> (pbc-workprec) + else: + pm = -((-pm) >> (pbc-workprec)) + pe += pbc - workprec + pbc = workprec + n -= 1 + if not n: + break + man = man*man + exp = exp+exp + bc = bc + bc - 2 + bc = bc + bctable[int(man >> bc)] + if bc > workprec: + if rounds_down: + man = man >> (bc-workprec) + else: + man = -((-man) >> (bc-workprec)) + exp += bc - workprec + bc = workprec + n = n // 2 + + return normalize(result_sign, pm, pe, pbc, prec, rnd) + + +def mpf_perturb(x, eps_sign, prec, rnd): + """ + For nonzero x, calculate x + eps with directed rounding, where + eps < prec relatively and eps has the given sign (0 for + positive, 1 for negative). + + With rounding to nearest, this is taken to simply normalize + x to the given precision. + """ + if rnd == round_nearest: + return mpf_pos(x, prec, rnd) + sign, man, exp, bc = x + eps = (eps_sign, MPZ_ONE, exp+bc-prec-1, 1) + if sign: + away = (rnd in (round_down, round_ceiling)) ^ eps_sign + else: + away = (rnd in (round_up, round_ceiling)) ^ eps_sign + if away: + return mpf_add(x, eps, prec, rnd) + else: + return mpf_pos(x, prec, rnd) + + +#----------------------------------------------------------------------------# +# Radix conversion # +#----------------------------------------------------------------------------# + +def to_digits_exp(s, dps): + """Helper function for representing the floating-point number s as + a decimal with dps digits. Returns (sign, string, exponent) where + sign is '' or '-', string is the digit string, and exponent is + the decimal exponent as an int. + + If inexact, the decimal representation is rounded toward zero.""" + + # Extract sign first so it doesn't mess up the string digit count + if s[0]: + sign = '-' + s = mpf_neg(s) + else: + sign = '' + _sign, man, exp, bc = s + + if not man: + return '', '0', 0 + + bitprec = int(dps * math.log(10,2)) + 10 + + # Cut down to size + # TODO: account for precision when doing this + exp_from_1 = exp + bc + if abs(exp_from_1) > 3500: + from .libelefun import mpf_ln2, mpf_ln10 + # Set b = int(exp * log(2)/log(10)) + # If exp is huge, we must use high-precision arithmetic to + # find the nearest power of ten + expprec = bitcount(abs(exp)) + 5 + tmp = from_int(exp) + tmp = mpf_mul(tmp, mpf_ln2(expprec)) + tmp = mpf_div(tmp, mpf_ln10(expprec), expprec) + b = to_int(tmp) + s = mpf_div(s, mpf_pow_int(ften, b, bitprec), bitprec) + _sign, man, exp, bc = s + exponent = b + else: + exponent = 0 + + # First, calculate mantissa digits by converting to a binary + # fixed-point number and then converting that number to + # a decimal fixed-point number. + fixprec = max(bitprec - exp - bc, 0) + fixdps = int(fixprec / math.log(10,2) + 0.5) + sf = to_fixed(s, fixprec) + sd = bin_to_radix(sf, fixprec, 10, fixdps) + digits = numeral(sd, base=10, size=dps) + + exponent += len(digits) - fixdps - 1 + return sign, digits, exponent + +def to_str(s, dps, strip_zeros=True, min_fixed=None, max_fixed=None, + show_zero_exponent=False): + """ + Convert a raw mpf to a decimal floating-point literal with at + most `dps` decimal digits in the mantissa (not counting extra zeros + that may be inserted for visual purposes). + + The number will be printed in fixed-point format if the position + of the leading digit is strictly between min_fixed + (default = min(-dps/3,-5)) and max_fixed (default = dps). + + To force fixed-point format always, set min_fixed = -inf, + max_fixed = +inf. To force floating-point format, set + min_fixed >= max_fixed. + + The literal is formatted so that it can be parsed back to a number + by to_str, float() or Decimal(). + """ + + # Special numbers + if not s[1]: + if s == fzero: + if dps: t = '0.0' + else: t = '.0' + if show_zero_exponent: + t += 'e+0' + return t + if s == finf: return '+inf' + if s == fninf: return '-inf' + if s == fnan: return 'nan' + raise ValueError + + if min_fixed is None: min_fixed = min(-(dps//3), -5) + if max_fixed is None: max_fixed = dps + + # to_digits_exp rounds to floor. + # This sometimes kills some instances of "...00001" + sign, digits, exponent = to_digits_exp(s, dps+3) + + # No digits: show only .0; round exponent to nearest + if not dps: + if digits[0] in '56789': + exponent += 1 + digits = ".0" + + else: + # Rounding up kills some instances of "...99999" + if len(digits) > dps and digits[dps] in '56789': + digits = digits[:dps] + i = dps - 1 + while i >= 0 and digits[i] == '9': + i -= 1 + if i >= 0: + digits = digits[:i] + str(int(digits[i]) + 1) + '0' * (dps - i - 1) + else: + digits = '1' + '0' * (dps - 1) + exponent += 1 + else: + digits = digits[:dps] + + # Prettify numbers close to unit magnitude + if min_fixed < exponent < max_fixed: + if exponent < 0: + digits = ("0"*int(-exponent)) + digits + split = 1 + else: + split = exponent + 1 + if split > dps: + digits += "0"*(split-dps) + exponent = 0 + else: + split = 1 + + digits = (digits[:split] + "." + digits[split:]) + + if strip_zeros: + # Clean up trailing zeros + digits = digits.rstrip('0') + if digits[-1] == ".": + digits += "0" + + if exponent == 0 and dps and not show_zero_exponent: return sign + digits + if exponent >= 0: return sign + digits + "e+" + str(exponent) + if exponent < 0: return sign + digits + "e" + str(exponent) + +def str_to_man_exp(x, base=10): + """Helper function for from_str.""" + x = x.lower().rstrip('l') + # Verify that the input is a valid float literal + float(x) + # Split into mantissa, exponent + parts = x.split('e') + if len(parts) == 1: + exp = 0 + else: # == 2 + x = parts[0] + exp = int(parts[1]) + # Look for radix point in mantissa + parts = x.split('.') + if len(parts) == 2: + a, b = parts[0], parts[1].rstrip('0') + exp -= len(b) + x = a + b + x = MPZ(int(x, base)) + return x, exp + +special_str = {'inf':finf, '+inf':finf, '-inf':fninf, 'nan':fnan} + +def from_str(x, prec, rnd=round_fast): + """Create a raw mpf from a decimal literal, rounding in the + specified direction if the input number cannot be represented + exactly as a binary floating-point number with the given number of + bits. The literal syntax accepted is the same as for Python + floats. + + TODO: the rounding does not work properly for large exponents. + """ + x = x.lower().strip() + if x in special_str: + return special_str[x] + + if '/' in x: + p, q = x.split('/') + p, q = p.rstrip('l'), q.rstrip('l') + return from_rational(int(p), int(q), prec, rnd) + + man, exp = str_to_man_exp(x, base=10) + + # XXX: appropriate cutoffs & track direction + # note no factors of 5 + if abs(exp) > 400: + s = from_int(man, prec+10) + s = mpf_mul(s, mpf_pow_int(ften, exp, prec+10), prec, rnd) + else: + if exp >= 0: + s = from_int(man * 10**exp, prec, rnd) + else: + s = from_rational(man, 10**-exp, prec, rnd) + return s + +# Binary string conversion. These are currently mainly used for debugging +# and could use some improvement in the future + +def from_bstr(x): + man, exp = str_to_man_exp(x, base=2) + man = MPZ(man) + sign = 0 + if man < 0: + man = -man + sign = 1 + bc = bitcount(man) + return normalize(sign, man, exp, bc, bc, round_floor) + +def to_bstr(x): + sign, man, exp, bc = x + return ['','-'][sign] + numeral(man, size=bitcount(man), base=2) + ("e%i" % exp) + + +#----------------------------------------------------------------------------# +# Square roots # +#----------------------------------------------------------------------------# + + +def mpf_sqrt(s, prec, rnd=round_fast): + """ + Compute the square root of a nonnegative mpf value. The + result is correctly rounded. + """ + sign, man, exp, bc = s + if sign: + raise ComplexResult("square root of a negative number") + if not man: + return s + if exp & 1: + exp -= 1 + man <<= 1 + bc += 1 + elif man == 1: + return normalize1(sign, man, exp//2, bc, prec, rnd) + shift = max(4, 2*prec-bc+4) + shift += shift & 1 + if rnd in 'fd': + man = isqrt(man<= 0: + a = mpf_pos(sa, prec, round_floor) + b = mpf_pos(sb, prec, round_ceiling) + # Upper point nonnegative? + elif sbs >= 0: + a = fzero + negsa = mpf_neg(sa) + if mpf_lt(negsa, sb): + b = mpf_pos(sb, prec, round_ceiling) + else: + b = mpf_pos(negsa, prec, round_ceiling) + # Both negative? + else: + a = mpf_neg(sb, prec, round_floor) + b = mpf_neg(sa, prec, round_ceiling) + return a, b + +# TODO: optimize +def mpi_mul_mpf(s, t, prec): + return mpi_mul(s, (t, t), prec) + +def mpi_div_mpf(s, t, prec): + return mpi_div(s, (t, t), prec) + +def mpi_mul(s, t, prec=0): + sa, sb = s + ta, tb = t + sas = mpf_sign(sa) + sbs = mpf_sign(sb) + tas = mpf_sign(ta) + tbs = mpf_sign(tb) + if sas == sbs == 0: + # Should maybe be undefined + if ta == fninf or tb == finf: + return fninf, finf + return fzero, fzero + if tas == tbs == 0: + # Should maybe be undefined + if sa == fninf or sb == finf: + return fninf, finf + return fzero, fzero + if sas >= 0: + # positive * positive + if tas >= 0: + a = mpf_mul(sa, ta, prec, round_floor) + b = mpf_mul(sb, tb, prec, round_ceiling) + if a == fnan: a = fzero + if b == fnan: b = finf + # positive * negative + elif tbs <= 0: + a = mpf_mul(sb, ta, prec, round_floor) + b = mpf_mul(sa, tb, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = fzero + # positive * both signs + else: + a = mpf_mul(sb, ta, prec, round_floor) + b = mpf_mul(sb, tb, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = finf + elif sbs <= 0: + # negative * positive + if tas >= 0: + a = mpf_mul(sa, tb, prec, round_floor) + b = mpf_mul(sb, ta, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = fzero + # negative * negative + elif tbs <= 0: + a = mpf_mul(sb, tb, prec, round_floor) + b = mpf_mul(sa, ta, prec, round_ceiling) + if a == fnan: a = fzero + if b == fnan: b = finf + # negative * both signs + else: + a = mpf_mul(sa, tb, prec, round_floor) + b = mpf_mul(sa, ta, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = finf + else: + # General case: perform all cross-multiplications and compare + # Since the multiplications can be done exactly, we need only + # do 4 (instead of 8: two for each rounding mode) + cases = [mpf_mul(sa, ta), mpf_mul(sa, tb), mpf_mul(sb, ta), mpf_mul(sb, tb)] + if fnan in cases: + a, b = (fninf, finf) + else: + a, b = mpf_min_max(cases) + a = mpf_pos(a, prec, round_floor) + b = mpf_pos(b, prec, round_ceiling) + return a, b + +def mpi_square(s, prec=0): + sa, sb = s + if mpf_ge(sa, fzero): + a = mpf_mul(sa, sa, prec, round_floor) + b = mpf_mul(sb, sb, prec, round_ceiling) + elif mpf_le(sb, fzero): + a = mpf_mul(sb, sb, prec, round_floor) + b = mpf_mul(sa, sa, prec, round_ceiling) + else: + sa = mpf_neg(sa) + sa, sb = mpf_min_max([sa, sb]) + a = fzero + b = mpf_mul(sb, sb, prec, round_ceiling) + return a, b + +def mpi_div(s, t, prec): + sa, sb = s + ta, tb = t + sas = mpf_sign(sa) + sbs = mpf_sign(sb) + tas = mpf_sign(ta) + tbs = mpf_sign(tb) + # 0 / X + if sas == sbs == 0: + # 0 / + if (tas < 0 and tbs > 0) or (tas == 0 or tbs == 0): + return fninf, finf + return fzero, fzero + # Denominator contains both negative and positive numbers; + # this should properly be a multi-interval, but the closest + # match is the entire (extended) real line + if tas < 0 and tbs > 0: + return fninf, finf + # Assume denominator to be nonnegative + if tas < 0: + return mpi_div(mpi_neg(s), mpi_neg(t), prec) + # Division by zero + # XXX: make sure all results make sense + if tas == 0: + # Numerator contains both signs? + if sas < 0 and sbs > 0: + return fninf, finf + if tas == tbs: + return fninf, finf + # Numerator positive? + if sas >= 0: + a = mpf_div(sa, tb, prec, round_floor) + b = finf + if sbs <= 0: + a = fninf + b = mpf_div(sb, tb, prec, round_ceiling) + # Division with positive denominator + # We still have to handle nans resulting from inf/0 or inf/inf + else: + # Nonnegative numerator + if sas >= 0: + a = mpf_div(sa, tb, prec, round_floor) + b = mpf_div(sb, ta, prec, round_ceiling) + if a == fnan: a = fzero + if b == fnan: b = finf + # Nonpositive numerator + elif sbs <= 0: + a = mpf_div(sa, ta, prec, round_floor) + b = mpf_div(sb, tb, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = fzero + # Numerator contains both signs? + else: + a = mpf_div(sa, ta, prec, round_floor) + b = mpf_div(sb, ta, prec, round_ceiling) + if a == fnan: a = fninf + if b == fnan: b = finf + return a, b + +def mpi_pi(prec): + a = mpf_pi(prec, round_floor) + b = mpf_pi(prec, round_ceiling) + return a, b + +def mpi_exp(s, prec): + sa, sb = s + # exp is monotonic + a = mpf_exp(sa, prec, round_floor) + b = mpf_exp(sb, prec, round_ceiling) + return a, b + +def mpi_log(s, prec): + sa, sb = s + # log is monotonic + a = mpf_log(sa, prec, round_floor) + b = mpf_log(sb, prec, round_ceiling) + return a, b + +def mpi_sqrt(s, prec): + sa, sb = s + # sqrt is monotonic + a = mpf_sqrt(sa, prec, round_floor) + b = mpf_sqrt(sb, prec, round_ceiling) + return a, b + +def mpi_atan(s, prec): + sa, sb = s + a = mpf_atan(sa, prec, round_floor) + b = mpf_atan(sb, prec, round_ceiling) + return a, b + +def mpi_pow_int(s, n, prec): + sa, sb = s + if n < 0: + return mpi_div((fone, fone), mpi_pow_int(s, -n, prec+20), prec) + if n == 0: + return (fone, fone) + if n == 1: + return s + if n == 2: + return mpi_square(s, prec) + # Odd -- signs are preserved + if n & 1: + a = mpf_pow_int(sa, n, prec, round_floor) + b = mpf_pow_int(sb, n, prec, round_ceiling) + # Even -- important to ensure positivity + else: + sas = mpf_sign(sa) + sbs = mpf_sign(sb) + # Nonnegative? + if sas >= 0: + a = mpf_pow_int(sa, n, prec, round_floor) + b = mpf_pow_int(sb, n, prec, round_ceiling) + # Nonpositive? + elif sbs <= 0: + a = mpf_pow_int(sb, n, prec, round_floor) + b = mpf_pow_int(sa, n, prec, round_ceiling) + # Mixed signs? + else: + a = fzero + # max(-a,b)**n + sa = mpf_neg(sa) + if mpf_ge(sa, sb): + b = mpf_pow_int(sa, n, prec, round_ceiling) + else: + b = mpf_pow_int(sb, n, prec, round_ceiling) + return a, b + +def mpi_pow(s, t, prec): + ta, tb = t + if ta == tb and ta not in (finf, fninf): + if ta == from_int(to_int(ta)): + return mpi_pow_int(s, to_int(ta), prec) + if ta == fhalf: + return mpi_sqrt(s, prec) + u = mpi_log(s, prec + 20) + v = mpi_mul(u, t, prec + 20) + return mpi_exp(v, prec) + +def MIN(x, y): + if mpf_le(x, y): + return x + return y + +def MAX(x, y): + if mpf_ge(x, y): + return x + return y + +def cos_sin_quadrant(x, wp): + sign, man, exp, bc = x + if x == fzero: + return fone, fzero, 0 + # TODO: combine evaluation code to avoid duplicate modulo + c, s = mpf_cos_sin(x, wp) + t, n, wp_ = mod_pi2(man, exp, exp+bc, 15) + if sign: + n = -1-n + return c, s, n + +def mpi_cos_sin(x, prec): + a, b = x + if a == b == fzero: + return (fone, fone), (fzero, fzero) + # Guaranteed to contain both -1 and 1 + if (finf in x) or (fninf in x): + return (fnone, fone), (fnone, fone) + wp = prec + 20 + ca, sa, na = cos_sin_quadrant(a, wp) + cb, sb, nb = cos_sin_quadrant(b, wp) + ca, cb = mpf_min_max([ca, cb]) + sa, sb = mpf_min_max([sa, sb]) + # Both functions are monotonic within one quadrant + if na == nb: + pass + # Guaranteed to contain both -1 and 1 + elif nb - na >= 4: + return (fnone, fone), (fnone, fone) + else: + # cos has maximum between a and b + if na//4 != nb//4: + cb = fone + # cos has minimum + if (na-2)//4 != (nb-2)//4: + ca = fnone + # sin has maximum + if (na-1)//4 != (nb-1)//4: + sb = fone + # sin has minimum + if (na-3)//4 != (nb-3)//4: + sa = fnone + # Perturb to force interval rounding + more = from_man_exp((MPZ_ONE<= 1: + if sign: + return fnone + return fone + return v + ca = finalize(ca, round_floor) + cb = finalize(cb, round_ceiling) + sa = finalize(sa, round_floor) + sb = finalize(sb, round_ceiling) + return (ca,cb), (sa,sb) + +def mpi_cos(x, prec): + return mpi_cos_sin(x, prec)[0] + +def mpi_sin(x, prec): + return mpi_cos_sin(x, prec)[1] + +def mpi_tan(x, prec): + cos, sin = mpi_cos_sin(x, prec+20) + return mpi_div(sin, cos, prec) + +def mpi_cot(x, prec): + cos, sin = mpi_cos_sin(x, prec+20) + return mpi_div(cos, sin, prec) + +def mpi_from_str_a_b(x, y, percent, prec): + wp = prec + 20 + xa = from_str(x, wp, round_floor) + xb = from_str(x, wp, round_ceiling) + #ya = from_str(y, wp, round_floor) + y = from_str(y, wp, round_ceiling) + assert mpf_ge(y, fzero) + if percent: + y = mpf_mul(MAX(mpf_abs(xa), mpf_abs(xb)), y, wp, round_ceiling) + y = mpf_div(y, from_int(100), wp, round_ceiling) + a = mpf_sub(xa, y, prec, round_floor) + b = mpf_add(xb, y, prec, round_ceiling) + return a, b + +def mpi_from_str(s, prec): + """ + Parse an interval number given as a string. + + Allowed forms are + + "-1.23e-27" + Any single decimal floating-point literal. + "a +- b" or "a (b)" + a is the midpoint of the interval and b is the half-width + "a +- b%" or "a (b%)" + a is the midpoint of the interval and the half-width + is b percent of a (`a \times b / 100`). + "[a, b]" + The interval indicated directly. + "x[y,z]e" + x are shared digits, y and z are unequal digits, e is the exponent. + + """ + e = ValueError("Improperly formed interval number '%s'" % s) + s = s.replace(" ", "") + wp = prec + 20 + if "+-" in s: + x, y = s.split("+-") + return mpi_from_str_a_b(x, y, False, prec) + # case 2 + elif "(" in s: + # Don't confuse with a complex number (x,y) + if s[0] == "(" or ")" not in s: + raise e + s = s.replace(")", "") + percent = False + if "%" in s: + if s[-1] != "%": + raise e + percent = True + s = s.replace("%", "") + x, y = s.split("(") + return mpi_from_str_a_b(x, y, percent, prec) + elif "," in s: + if ('[' not in s) or (']' not in s): + raise e + if s[0] == '[': + # case 3 + s = s.replace("[", "") + s = s.replace("]", "") + a, b = s.split(",") + a = from_str(a, prec, round_floor) + b = from_str(b, prec, round_ceiling) + return a, b + else: + # case 4 + x, y = s.split('[') + y, z = y.split(',') + if 'e' in s: + z, e = z.split(']') + else: + z, e = z.rstrip(']'), '' + a = from_str(x+y+e, prec, round_floor) + b = from_str(x+z+e, prec, round_ceiling) + return a, b + else: + a = from_str(s, prec, round_floor) + b = from_str(s, prec, round_ceiling) + return a, b + +def mpi_to_str(x, dps, use_spaces=True, brackets='[]', mode='brackets', error_dps=4, **kwargs): + """ + Convert a mpi interval to a string. + + **Arguments** + + *dps* + decimal places to use for printing + *use_spaces* + use spaces for more readable output, defaults to true + *brackets* + pair of strings (or two-character string) giving left and right brackets + *mode* + mode of display: 'plusminus', 'percent', 'brackets' (default) or 'diff' + *error_dps* + limit the error to *error_dps* digits (mode 'plusminus and 'percent') + + Additional keyword arguments are forwarded to the mpf-to-string conversion + for the components of the output. + + **Examples** + + >>> from mpmath import mpi, mp + >>> mp.dps = 30 + >>> x = mpi(1, 2)._mpi_ + >>> mpi_to_str(x, 2, mode='plusminus') + '1.5 +- 0.5' + >>> mpi_to_str(x, 2, mode='percent') + '1.5 (33.33%)' + >>> mpi_to_str(x, 2, mode='brackets') + '[1.0, 2.0]' + >>> mpi_to_str(x, 2, mode='brackets' , brackets=('<', '>')) + '<1.0, 2.0>' + >>> x = mpi('5.2582327113062393041', '5.2582327113062749951')._mpi_ + >>> mpi_to_str(x, 15, mode='diff') + '5.2582327113062[4, 7]' + >>> mpi_to_str(mpi(0)._mpi_, 2, mode='percent') + '0.0 (0.0%)' + + """ + prec = dps_to_prec(dps) + wp = prec + 20 + a, b = x + mid = mpi_mid(x, prec) + delta = mpi_delta(x, prec) + a_str = to_str(a, dps, **kwargs) + b_str = to_str(b, dps, **kwargs) + mid_str = to_str(mid, dps, **kwargs) + sp = "" + if use_spaces: + sp = " " + br1, br2 = brackets + if mode == 'plusminus': + delta_str = to_str(mpf_shift(delta,-1), dps, **kwargs) + s = mid_str + sp + "+-" + sp + delta_str + elif mode == 'percent': + if mid == fzero: + p = fzero + else: + # p = 100 * delta(x) / (2*mid(x)) + p = mpf_mul(delta, from_int(100)) + p = mpf_div(p, mpf_mul(mid, from_int(2)), wp) + s = mid_str + sp + "(" + to_str(p, error_dps) + "%)" + elif mode == 'brackets': + s = br1 + a_str + "," + sp + b_str + br2 + elif mode == 'diff': + # use more digits if str(x.a) and str(x.b) are equal + if a_str == b_str: + a_str = to_str(a, dps+3, **kwargs) + b_str = to_str(b, dps+3, **kwargs) + # separate mantissa and exponent + a = a_str.split('e') + if len(a) == 1: + a.append('') + b = b_str.split('e') + if len(b) == 1: + b.append('') + if a[1] == b[1]: + if a[0] != b[0]: + for i in xrange(len(a[0]) + 1): + if a[0][i] != b[0][i]: + break + s = (a[0][:i] + br1 + a[0][i:] + ',' + sp + b[0][i:] + br2 + + 'e'*min(len(a[1]), 1) + a[1]) + else: # no difference + s = a[0] + br1 + br2 + 'e'*min(len(a[1]), 1) + a[1] + else: + s = br1 + 'e'.join(a) + ',' + sp + 'e'.join(b) + br2 + else: + raise ValueError("'%s' is unknown mode for printing mpi" % mode) + return s + +def mpci_add(x, y, prec): + a, b = x + c, d = y + return mpi_add(a, c, prec), mpi_add(b, d, prec) + +def mpci_sub(x, y, prec): + a, b = x + c, d = y + return mpi_sub(a, c, prec), mpi_sub(b, d, prec) + +def mpci_neg(x, prec=0): + a, b = x + return mpi_neg(a, prec), mpi_neg(b, prec) + +def mpci_pos(x, prec): + a, b = x + return mpi_pos(a, prec), mpi_pos(b, prec) + +def mpci_mul(x, y, prec): + # TODO: optimize for real/imag cases + a, b = x + c, d = y + r1 = mpi_mul(a,c) + r2 = mpi_mul(b,d) + re = mpi_sub(r1,r2,prec) + i1 = mpi_mul(a,d) + i2 = mpi_mul(b,c) + im = mpi_add(i1,i2,prec) + return re, im + +def mpci_div(x, y, prec): + # TODO: optimize for real/imag cases + a, b = x + c, d = y + wp = prec+20 + m1 = mpi_square(c) + m2 = mpi_square(d) + m = mpi_add(m1,m2,wp) + re = mpi_add(mpi_mul(a,c), mpi_mul(b,d), wp) + im = mpi_sub(mpi_mul(b,c), mpi_mul(a,d), wp) + re = mpi_div(re, m, prec) + im = mpi_div(im, m, prec) + return re, im + +def mpci_exp(x, prec): + a, b = x + wp = prec+20 + r = mpi_exp(a, wp) + c, s = mpi_cos_sin(b, wp) + a = mpi_mul(r, c, prec) + b = mpi_mul(r, s, prec) + return a, b + +def mpi_shift(x, n): + a, b = x + return mpf_shift(a,n), mpf_shift(b,n) + +def mpi_cosh_sinh(x, prec): + # TODO: accuracy for small x + wp = prec+20 + e1 = mpi_exp(x, wp) + e2 = mpi_div(mpi_one, e1, wp) + c = mpi_add(e1, e2, prec) + s = mpi_sub(e1, e2, prec) + c = mpi_shift(c, -1) + s = mpi_shift(s, -1) + return c, s + +def mpci_cos(x, prec): + a, b = x + wp = prec+10 + c, s = mpi_cos_sin(a, wp) + ch, sh = mpi_cosh_sinh(b, wp) + re = mpi_mul(c, ch, prec) + im = mpi_mul(s, sh, prec) + return re, mpi_neg(im) + +def mpci_sin(x, prec): + a, b = x + wp = prec+10 + c, s = mpi_cos_sin(a, wp) + ch, sh = mpi_cosh_sinh(b, wp) + re = mpi_mul(s, ch, prec) + im = mpi_mul(c, sh, prec) + return re, im + +def mpci_abs(x, prec): + a, b = x + if a == mpi_zero: + return mpi_abs(b) + if b == mpi_zero: + return mpi_abs(a) + # Important: nonnegative + a = mpi_square(a) + b = mpi_square(b) + t = mpi_add(a, b, prec+20) + return mpi_sqrt(t, prec) + +def mpi_atan2(y, x, prec): + ya, yb = y + xa, xb = x + # Constrained to the real line + if ya == yb == fzero: + if mpf_ge(xa, fzero): + return mpi_zero + return mpi_pi(prec) + # Right half-plane + if mpf_ge(xa, fzero): + if mpf_ge(ya, fzero): + a = mpf_atan2(ya, xb, prec, round_floor) + else: + a = mpf_atan2(ya, xa, prec, round_floor) + if mpf_ge(yb, fzero): + b = mpf_atan2(yb, xa, prec, round_ceiling) + else: + b = mpf_atan2(yb, xb, prec, round_ceiling) + # Upper half-plane + elif mpf_ge(ya, fzero): + b = mpf_atan2(ya, xa, prec, round_ceiling) + if mpf_le(xb, fzero): + a = mpf_atan2(yb, xb, prec, round_floor) + else: + a = mpf_atan2(ya, xb, prec, round_floor) + # Lower half-plane + elif mpf_le(yb, fzero): + a = mpf_atan2(yb, xa, prec, round_floor) + if mpf_le(xb, fzero): + b = mpf_atan2(ya, xb, prec, round_ceiling) + else: + b = mpf_atan2(yb, xb, prec, round_ceiling) + # Covering the origin + else: + b = mpf_pi(prec, round_ceiling) + a = mpf_neg(b) + return a, b + +def mpci_arg(z, prec): + x, y = z + return mpi_atan2(y, x, prec) + +def mpci_log(z, prec): + x, y = z + re = mpi_log(mpci_abs(z, prec+20), prec) + im = mpci_arg(z, prec) + return re, im + +def mpci_pow(x, y, prec): + # TODO: recognize/speed up real cases, integer y + yre, yim = y + if yim == mpi_zero: + ya, yb = yre + if ya == yb: + sign, man, exp, bc = yb + if man and exp >= 0: + return mpci_pow_int(x, (-1)**sign * int(man<>= 1 + return mpci_pos(result, prec) + +gamma_min_a = from_float(1.46163214496) +gamma_min_b = from_float(1.46163214497) +gamma_min = (gamma_min_a, gamma_min_b) +gamma_mono_imag_a = from_float(-1.1) +gamma_mono_imag_b = from_float(1.1) + +def mpi_overlap(x, y): + a, b = x + c, d = y + if mpf_lt(d, a): return False + if mpf_gt(c, b): return False + return True + +# type = 0 -- gamma +# type = 1 -- factorial +# type = 2 -- 1/gamma +# type = 3 -- log-gamma + +def mpi_gamma(z, prec, type=0): + a, b = z + wp = prec+20 + + if type == 1: + return mpi_gamma(mpi_add(z, mpi_one, wp), prec, 0) + + # increasing + if mpf_gt(a, gamma_min_b): + if type == 0: + c = mpf_gamma(a, prec, round_floor) + d = mpf_gamma(b, prec, round_ceiling) + elif type == 2: + c = mpf_rgamma(b, prec, round_floor) + d = mpf_rgamma(a, prec, round_ceiling) + elif type == 3: + c = mpf_loggamma(a, prec, round_floor) + d = mpf_loggamma(b, prec, round_ceiling) + # decreasing + elif mpf_gt(a, fzero) and mpf_lt(b, gamma_min_a): + if type == 0: + c = mpf_gamma(b, prec, round_floor) + d = mpf_gamma(a, prec, round_ceiling) + elif type == 2: + c = mpf_rgamma(a, prec, round_floor) + d = mpf_rgamma(b, prec, round_ceiling) + elif type == 3: + c = mpf_loggamma(b, prec, round_floor) + d = mpf_loggamma(a, prec, round_ceiling) + else: + # TODO: reflection formula + znew = mpi_add(z, mpi_one, wp) + if type == 0: return mpi_div(mpi_gamma(znew, prec+2, 0), z, prec) + if type == 2: return mpi_mul(mpi_gamma(znew, prec+2, 2), z, prec) + if type == 3: return mpi_sub(mpi_gamma(znew, prec+2, 3), mpi_log(z, prec+2), prec) + return c, d + +def mpci_gamma(z, prec, type=0): + (a1,a2), (b1,b2) = z + + # Real case + if b1 == b2 == fzero and (type != 3 or mpf_gt(a1,fzero)): + return mpi_gamma(z, prec, type), mpi_zero + + # Estimate precision + wp = prec+20 + if type != 3: + amag = a2[2]+a2[3] + bmag = b2[2]+b2[3] + if a2 != fzero: + mag = max(amag, bmag) + else: + mag = bmag + an = abs(to_int(a2)) + bn = abs(to_int(b2)) + absn = max(an, bn) + gamma_size = max(0,absn*mag) + wp += bitcount(gamma_size) + + # Assume type != 1 + if type == 1: + (a1,a2) = mpi_add((a1,a2), mpi_one, wp); z = (a1,a2), (b1,b2) + type = 0 + + # Avoid non-monotonic region near the negative real axis + if mpf_lt(a1, gamma_min_b): + if mpi_overlap((b1,b2), (gamma_mono_imag_a, gamma_mono_imag_b)): + # TODO: reflection formula + #if mpf_lt(a2, mpf_shift(fone,-1)): + # znew = mpci_sub((mpi_one,mpi_zero),z,wp) + # ... + # Recurrence: + # gamma(z) = gamma(z+1)/z + znew = mpi_add((a1,a2), mpi_one, wp), (b1,b2) + if type == 0: return mpci_div(mpci_gamma(znew, prec+2, 0), z, prec) + if type == 2: return mpci_mul(mpci_gamma(znew, prec+2, 2), z, prec) + if type == 3: return mpci_sub(mpci_gamma(znew, prec+2, 3), mpci_log(z,prec+2), prec) + + # Use monotonicity (except for a small region close to the + # origin and near poles) + # upper half-plane + if mpf_ge(b1, fzero): + minre = mpc_loggamma((a1,b2), wp, round_floor) + maxre = mpc_loggamma((a2,b1), wp, round_ceiling) + minim = mpc_loggamma((a1,b1), wp, round_floor) + maxim = mpc_loggamma((a2,b2), wp, round_ceiling) + # lower half-plane + elif mpf_le(b2, fzero): + minre = mpc_loggamma((a1,b1), wp, round_floor) + maxre = mpc_loggamma((a2,b2), wp, round_ceiling) + minim = mpc_loggamma((a2,b1), wp, round_floor) + maxim = mpc_loggamma((a1,b2), wp, round_ceiling) + # crosses real axis + else: + maxre = mpc_loggamma((a2,fzero), wp, round_ceiling) + # stretches more into the lower half-plane + if mpf_gt(mpf_neg(b1), b2): + minre = mpc_loggamma((a1,b1), wp, round_ceiling) + else: + minre = mpc_loggamma((a1,b2), wp, round_ceiling) + minim = mpc_loggamma((a2,b1), wp, round_floor) + maxim = mpc_loggamma((a2,b2), wp, round_floor) + + w = (minre[0], maxre[0]), (minim[1], maxim[1]) + if type == 3: + return mpi_pos(w[0], prec), mpi_pos(w[1], prec) + if type == 2: + w = mpci_neg(w) + return mpci_exp(w, prec) + +def mpi_loggamma(z, prec): return mpi_gamma(z, prec, type=3) +def mpci_loggamma(z, prec): return mpci_gamma(z, prec, type=3) + +def mpi_rgamma(z, prec): return mpi_gamma(z, prec, type=2) +def mpci_rgamma(z, prec): return mpci_gamma(z, prec, type=2) + +def mpi_factorial(z, prec): return mpi_gamma(z, prec, type=1) +def mpci_factorial(z, prec): return mpci_gamma(z, prec, type=1) diff --git a/.venv/lib/python3.11/site-packages/mpmath/math2.py b/.venv/lib/python3.11/site-packages/mpmath/math2.py new file mode 100644 index 0000000000000000000000000000000000000000..302e25f509c18b2c76a2b62611f2765db84ab13e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/math2.py @@ -0,0 +1,672 @@ +""" +This module complements the math and cmath builtin modules by providing +fast machine precision versions of some additional functions (gamma, ...) +and wrapping math/cmath functions so that they can be called with either +real or complex arguments. +""" + +import operator +import math +import cmath + +# Irrational (?) constants +pi = 3.1415926535897932385 +e = 2.7182818284590452354 +sqrt2 = 1.4142135623730950488 +sqrt5 = 2.2360679774997896964 +phi = 1.6180339887498948482 +ln2 = 0.69314718055994530942 +ln10 = 2.302585092994045684 +euler = 0.57721566490153286061 +catalan = 0.91596559417721901505 +khinchin = 2.6854520010653064453 +apery = 1.2020569031595942854 + +logpi = 1.1447298858494001741 + +def _mathfun_real(f_real, f_complex): + def f(x, **kwargs): + if type(x) is float: + return f_real(x) + if type(x) is complex: + return f_complex(x) + try: + x = float(x) + return f_real(x) + except (TypeError, ValueError): + x = complex(x) + return f_complex(x) + f.__name__ = f_real.__name__ + return f + +def _mathfun(f_real, f_complex): + def f(x, **kwargs): + if type(x) is complex: + return f_complex(x) + try: + return f_real(float(x)) + except (TypeError, ValueError): + return f_complex(complex(x)) + f.__name__ = f_real.__name__ + return f + +def _mathfun_n(f_real, f_complex): + def f(*args, **kwargs): + try: + return f_real(*(float(x) for x in args)) + except (TypeError, ValueError): + return f_complex(*(complex(x) for x in args)) + f.__name__ = f_real.__name__ + return f + +# Workaround for non-raising log and sqrt in Python 2.5 and 2.4 +# on Unix system +try: + math.log(-2.0) + def math_log(x): + if x <= 0.0: + raise ValueError("math domain error") + return math.log(x) + def math_sqrt(x): + if x < 0.0: + raise ValueError("math domain error") + return math.sqrt(x) +except (ValueError, TypeError): + math_log = math.log + math_sqrt = math.sqrt + +pow = _mathfun_n(operator.pow, lambda x, y: complex(x)**y) +log = _mathfun_n(math_log, cmath.log) +sqrt = _mathfun(math_sqrt, cmath.sqrt) +exp = _mathfun_real(math.exp, cmath.exp) + +cos = _mathfun_real(math.cos, cmath.cos) +sin = _mathfun_real(math.sin, cmath.sin) +tan = _mathfun_real(math.tan, cmath.tan) + +acos = _mathfun(math.acos, cmath.acos) +asin = _mathfun(math.asin, cmath.asin) +atan = _mathfun_real(math.atan, cmath.atan) + +cosh = _mathfun_real(math.cosh, cmath.cosh) +sinh = _mathfun_real(math.sinh, cmath.sinh) +tanh = _mathfun_real(math.tanh, cmath.tanh) + +floor = _mathfun_real(math.floor, + lambda z: complex(math.floor(z.real), math.floor(z.imag))) +ceil = _mathfun_real(math.ceil, + lambda z: complex(math.ceil(z.real), math.ceil(z.imag))) + + +cos_sin = _mathfun_real(lambda x: (math.cos(x), math.sin(x)), + lambda z: (cmath.cos(z), cmath.sin(z))) + +cbrt = _mathfun(lambda x: x**(1./3), lambda z: z**(1./3)) + +def nthroot(x, n): + r = 1./n + try: + return float(x) ** r + except (ValueError, TypeError): + return complex(x) ** r + +def _sinpi_real(x): + if x < 0: + return -_sinpi_real(-x) + n, r = divmod(x, 0.5) + r *= pi + n %= 4 + if n == 0: return math.sin(r) + if n == 1: return math.cos(r) + if n == 2: return -math.sin(r) + if n == 3: return -math.cos(r) + +def _cospi_real(x): + if x < 0: + x = -x + n, r = divmod(x, 0.5) + r *= pi + n %= 4 + if n == 0: return math.cos(r) + if n == 1: return -math.sin(r) + if n == 2: return -math.cos(r) + if n == 3: return math.sin(r) + +def _sinpi_complex(z): + if z.real < 0: + return -_sinpi_complex(-z) + n, r = divmod(z.real, 0.5) + z = pi*complex(r, z.imag) + n %= 4 + if n == 0: return cmath.sin(z) + if n == 1: return cmath.cos(z) + if n == 2: return -cmath.sin(z) + if n == 3: return -cmath.cos(z) + +def _cospi_complex(z): + if z.real < 0: + z = -z + n, r = divmod(z.real, 0.5) + z = pi*complex(r, z.imag) + n %= 4 + if n == 0: return cmath.cos(z) + if n == 1: return -cmath.sin(z) + if n == 2: return -cmath.cos(z) + if n == 3: return cmath.sin(z) + +cospi = _mathfun_real(_cospi_real, _cospi_complex) +sinpi = _mathfun_real(_sinpi_real, _sinpi_complex) + +def tanpi(x): + try: + return sinpi(x) / cospi(x) + except OverflowError: + if complex(x).imag > 10: + return 1j + if complex(x).imag < 10: + return -1j + raise + +def cotpi(x): + try: + return cospi(x) / sinpi(x) + except OverflowError: + if complex(x).imag > 10: + return -1j + if complex(x).imag < 10: + return 1j + raise + +INF = 1e300*1e300 +NINF = -INF +NAN = INF-INF +EPS = 2.2204460492503131e-16 + +_exact_gamma = (INF, 1.0, 1.0, 2.0, 6.0, 24.0, 120.0, 720.0, 5040.0, 40320.0, + 362880.0, 3628800.0, 39916800.0, 479001600.0, 6227020800.0, 87178291200.0, + 1307674368000.0, 20922789888000.0, 355687428096000.0, 6402373705728000.0, + 121645100408832000.0, 2432902008176640000.0) + +_max_exact_gamma = len(_exact_gamma)-1 + +# Lanczos coefficients used by the GNU Scientific Library +_lanczos_g = 7 +_lanczos_p = (0.99999999999980993, 676.5203681218851, -1259.1392167224028, + 771.32342877765313, -176.61502916214059, 12.507343278686905, + -0.13857109526572012, 9.9843695780195716e-6, 1.5056327351493116e-7) + +def _gamma_real(x): + _intx = int(x) + if _intx == x: + if _intx <= 0: + #return (-1)**_intx * INF + raise ZeroDivisionError("gamma function pole") + if _intx <= _max_exact_gamma: + return _exact_gamma[_intx] + if x < 0.5: + # TODO: sinpi + return pi / (_sinpi_real(x)*_gamma_real(1-x)) + else: + x -= 1.0 + r = _lanczos_p[0] + for i in range(1, _lanczos_g+2): + r += _lanczos_p[i]/(x+i) + t = x + _lanczos_g + 0.5 + return 2.506628274631000502417 * t**(x+0.5) * math.exp(-t) * r + +def _gamma_complex(x): + if not x.imag: + return complex(_gamma_real(x.real)) + if x.real < 0.5: + # TODO: sinpi + return pi / (_sinpi_complex(x)*_gamma_complex(1-x)) + else: + x -= 1.0 + r = _lanczos_p[0] + for i in range(1, _lanczos_g+2): + r += _lanczos_p[i]/(x+i) + t = x + _lanczos_g + 0.5 + return 2.506628274631000502417 * t**(x+0.5) * cmath.exp(-t) * r + +gamma = _mathfun_real(_gamma_real, _gamma_complex) + +def rgamma(x): + try: + return 1./gamma(x) + except ZeroDivisionError: + return x*0.0 + +def factorial(x): + return gamma(x+1.0) + +def arg(x): + if type(x) is float: + return math.atan2(0.0,x) + return math.atan2(x.imag,x.real) + +# XXX: broken for negatives +def loggamma(x): + if type(x) not in (float, complex): + try: + x = float(x) + except (ValueError, TypeError): + x = complex(x) + try: + xreal = x.real + ximag = x.imag + except AttributeError: # py2.5 + xreal = x + ximag = 0.0 + # Reflection formula + # http://functions.wolfram.com/GammaBetaErf/LogGamma/16/01/01/0003/ + if xreal < 0.0: + if abs(x) < 0.5: + v = log(gamma(x)) + if ximag == 0: + v = v.conjugate() + return v + z = 1-x + try: + re = z.real + im = z.imag + except AttributeError: # py2.5 + re = z + im = 0.0 + refloor = floor(re) + if im == 0.0: + imsign = 0 + elif im < 0.0: + imsign = -1 + else: + imsign = 1 + return (-pi*1j)*abs(refloor)*(1-abs(imsign)) + logpi - \ + log(sinpi(z-refloor)) - loggamma(z) + 1j*pi*refloor*imsign + if x == 1.0 or x == 2.0: + return x*0 + p = 0. + while abs(x) < 11: + p -= log(x) + x += 1.0 + s = 0.918938533204672742 + (x-0.5)*log(x) - x + r = 1./x + r2 = r*r + s += 0.083333333333333333333*r; r *= r2 + s += -0.0027777777777777777778*r; r *= r2 + s += 0.00079365079365079365079*r; r *= r2 + s += -0.0005952380952380952381*r; r *= r2 + s += 0.00084175084175084175084*r; r *= r2 + s += -0.0019175269175269175269*r; r *= r2 + s += 0.0064102564102564102564*r; r *= r2 + s += -0.02955065359477124183*r + return s + p + +_psi_coeff = [ +0.083333333333333333333, +-0.0083333333333333333333, +0.003968253968253968254, +-0.0041666666666666666667, +0.0075757575757575757576, +-0.021092796092796092796, +0.083333333333333333333, +-0.44325980392156862745, +3.0539543302701197438, +-26.456212121212121212] + +def _digamma_real(x): + _intx = int(x) + if _intx == x: + if _intx <= 0: + raise ZeroDivisionError("polygamma pole") + if x < 0.5: + x = 1.0-x + s = pi*cotpi(x) + else: + s = 0.0 + while x < 10.0: + s -= 1.0/x + x += 1.0 + x2 = x**-2 + t = x2 + for c in _psi_coeff: + s -= c*t + if t < 1e-20: + break + t *= x2 + return s + math_log(x) - 0.5/x + +def _digamma_complex(x): + if not x.imag: + return complex(_digamma_real(x.real)) + if x.real < 0.5: + x = 1.0-x + s = pi*cotpi(x) + else: + s = 0.0 + while abs(x) < 10.0: + s -= 1.0/x + x += 1.0 + x2 = x**-2 + t = x2 + for c in _psi_coeff: + s -= c*t + if abs(t) < 1e-20: + break + t *= x2 + return s + cmath.log(x) - 0.5/x + +digamma = _mathfun_real(_digamma_real, _digamma_complex) + +# TODO: could implement complex erf and erfc here. Need +# to find an accurate method (avoiding cancellation) +# for approx. 1 < abs(x) < 9. + +_erfc_coeff_P = [ + 1.0000000161203922312, + 2.1275306946297962644, + 2.2280433377390253297, + 1.4695509105618423961, + 0.66275911699770787537, + 0.20924776504163751585, + 0.045459713768411264339, + 0.0063065951710717791934, + 0.00044560259661560421715][::-1] + +_erfc_coeff_Q = [ + 1.0000000000000000000, + 3.2559100272784894318, + 4.9019435608903239131, + 4.4971472894498014205, + 2.7845640601891186528, + 1.2146026030046904138, + 0.37647108453729465912, + 0.080970149639040548613, + 0.011178148899483545902, + 0.00078981003831980423513][::-1] + +def _polyval(coeffs, x): + p = coeffs[0] + for c in coeffs[1:]: + p = c + x*p + return p + +def _erf_taylor(x): + # Taylor series assuming 0 <= x <= 1 + x2 = x*x + s = t = x + n = 1 + while abs(t) > 1e-17: + t *= x2/n + s -= t/(n+n+1) + n += 1 + t *= x2/n + s += t/(n+n+1) + n += 1 + return 1.1283791670955125739*s + +def _erfc_mid(x): + # Rational approximation assuming 0 <= x <= 9 + return exp(-x*x)*_polyval(_erfc_coeff_P,x)/_polyval(_erfc_coeff_Q,x) + +def _erfc_asymp(x): + # Asymptotic expansion assuming x >= 9 + x2 = x*x + v = exp(-x2)/x*0.56418958354775628695 + r = t = 0.5 / x2 + s = 1.0 + for n in range(1,22,4): + s -= t + t *= r * (n+2) + s += t + t *= r * (n+4) + if abs(t) < 1e-17: + break + return s * v + +def erf(x): + """ + erf of a real number. + """ + x = float(x) + if x != x: + return x + if x < 0.0: + return -erf(-x) + if x >= 1.0: + if x >= 6.0: + return 1.0 + return 1.0 - _erfc_mid(x) + return _erf_taylor(x) + +def erfc(x): + """ + erfc of a real number. + """ + x = float(x) + if x != x: + return x + if x < 0.0: + if x < -6.0: + return 2.0 + return 2.0-erfc(-x) + if x > 9.0: + return _erfc_asymp(x) + if x >= 1.0: + return _erfc_mid(x) + return 1.0 - _erf_taylor(x) + +gauss42 = [\ +(0.99839961899006235, 0.0041059986046490839), +(-0.99839961899006235, 0.0041059986046490839), +(0.9915772883408609, 0.009536220301748501), +(-0.9915772883408609,0.009536220301748501), +(0.97934250806374812, 0.014922443697357493), +(-0.97934250806374812, 0.014922443697357493), +(0.96175936533820439,0.020227869569052644), +(-0.96175936533820439, 0.020227869569052644), +(0.93892355735498811, 0.025422959526113047), +(-0.93892355735498811,0.025422959526113047), +(0.91095972490412735, 0.030479240699603467), +(-0.91095972490412735, 0.030479240699603467), +(0.87802056981217269,0.03536907109759211), +(-0.87802056981217269, 0.03536907109759211), +(0.8402859832618168, 0.040065735180692258), +(-0.8402859832618168,0.040065735180692258), +(0.7979620532554873, 0.044543577771965874), +(-0.7979620532554873, 0.044543577771965874), +(0.75127993568948048,0.048778140792803244), +(-0.75127993568948048, 0.048778140792803244), +(0.70049459055617114, 0.052746295699174064), +(-0.70049459055617114,0.052746295699174064), +(0.64588338886924779, 0.056426369358018376), +(-0.64588338886924779, 0.056426369358018376), +(0.58774459748510932, 0.059798262227586649), +(-0.58774459748510932, 0.059798262227586649), +(0.5263957499311922, 0.062843558045002565), +(-0.5263957499311922, 0.062843558045002565), +(0.46217191207042191, 0.065545624364908975), +(-0.46217191207042191, 0.065545624364908975), +(0.39542385204297503, 0.067889703376521934), +(-0.39542385204297503, 0.067889703376521934), +(0.32651612446541151, 0.069862992492594159), +(-0.32651612446541151, 0.069862992492594159), +(0.25582507934287907, 0.071454714265170971), +(-0.25582507934287907, 0.071454714265170971), +(0.18373680656485453, 0.072656175243804091), +(-0.18373680656485453, 0.072656175243804091), +(0.11064502720851986, 0.073460813453467527), +(-0.11064502720851986, 0.073460813453467527), +(0.036948943165351772, 0.073864234232172879), +(-0.036948943165351772, 0.073864234232172879)] + +EI_ASYMP_CONVERGENCE_RADIUS = 40.0 + +def ei_asymp(z, _e1=False): + r = 1./z + s = t = 1.0 + k = 1 + while 1: + t *= k*r + s += t + if abs(t) < 1e-16: + break + k += 1 + v = s*exp(z)/z + if _e1: + if type(z) is complex: + zreal = z.real + zimag = z.imag + else: + zreal = z + zimag = 0.0 + if zimag == 0.0 and zreal > 0.0: + v += pi*1j + else: + if type(z) is complex: + if z.imag > 0: + v += pi*1j + if z.imag < 0: + v -= pi*1j + return v + +def ei_taylor(z, _e1=False): + s = t = z + k = 2 + while 1: + t = t*z/k + term = t/k + if abs(term) < 1e-17: + break + s += term + k += 1 + s += euler + if _e1: + s += log(-z) + else: + if type(z) is float or z.imag == 0.0: + s += math_log(abs(z)) + else: + s += cmath.log(z) + return s + +def ei(z, _e1=False): + typez = type(z) + if typez not in (float, complex): + try: + z = float(z) + typez = float + except (TypeError, ValueError): + z = complex(z) + typez = complex + if not z: + return -INF + absz = abs(z) + if absz > EI_ASYMP_CONVERGENCE_RADIUS: + return ei_asymp(z, _e1) + elif absz <= 2.0 or (typez is float and z > 0.0): + return ei_taylor(z, _e1) + # Integrate, starting from whichever is smaller of a Taylor + # series value or an asymptotic series value + if typez is complex and z.real > 0.0: + zref = z / absz + ref = ei_taylor(zref, _e1) + else: + zref = EI_ASYMP_CONVERGENCE_RADIUS * z / absz + ref = ei_asymp(zref, _e1) + C = (zref-z)*0.5 + D = (zref+z)*0.5 + s = 0.0 + if type(z) is complex: + _exp = cmath.exp + else: + _exp = math.exp + for x,w in gauss42: + t = C*x+D + s += w*_exp(t)/t + ref -= C*s + return ref + +def e1(z): + # hack to get consistent signs if the imaginary part if 0 + # and signed + typez = type(z) + if type(z) not in (float, complex): + try: + z = float(z) + typez = float + except (TypeError, ValueError): + z = complex(z) + typez = complex + if typez is complex and not z.imag: + z = complex(z.real, 0.0) + # end hack + return -ei(-z, _e1=True) + +_zeta_int = [\ +-0.5, +0.0, +1.6449340668482264365,1.2020569031595942854,1.0823232337111381915, +1.0369277551433699263,1.0173430619844491397,1.0083492773819228268, +1.0040773561979443394,1.0020083928260822144,1.0009945751278180853, +1.0004941886041194646,1.0002460865533080483,1.0001227133475784891, +1.0000612481350587048,1.0000305882363070205,1.0000152822594086519, +1.0000076371976378998,1.0000038172932649998,1.0000019082127165539, +1.0000009539620338728,1.0000004769329867878,1.0000002384505027277, +1.0000001192199259653,1.0000000596081890513,1.0000000298035035147, +1.0000000149015548284] + +_zeta_P = [-3.50000000087575873, -0.701274355654678147, +-0.0672313458590012612, -0.00398731457954257841, +-0.000160948723019303141, -4.67633010038383371e-6, +-1.02078104417700585e-7, -1.68030037095896287e-9, +-1.85231868742346722e-11][::-1] + +_zeta_Q = [1.00000000000000000, -0.936552848762465319, +-0.0588835413263763741, -0.00441498861482948666, +-0.000143416758067432622, -5.10691659585090782e-6, +-9.58813053268913799e-8, -1.72963791443181972e-9, +-1.83527919681474132e-11][::-1] + +_zeta_1 = [3.03768838606128127e-10, -1.21924525236601262e-8, +2.01201845887608893e-7, -1.53917240683468381e-6, +-5.09890411005967954e-7, 0.000122464707271619326, +-0.000905721539353130232, -0.00239315326074843037, +0.084239750013159168, 0.418938517907442414, 0.500000001921884009] + +_zeta_0 = [-3.46092485016748794e-10, -6.42610089468292485e-9, +1.76409071536679773e-7, -1.47141263991560698e-6, -6.38880222546167613e-7, +0.000122641099800668209, -0.000905894913516772796, -0.00239303348507992713, +0.0842396947501199816, 0.418938533204660256, 0.500000000000000052] + +def zeta(s): + """ + Riemann zeta function, real argument + """ + if not isinstance(s, (float, int)): + try: + s = float(s) + except (ValueError, TypeError): + try: + s = complex(s) + if not s.imag: + return complex(zeta(s.real)) + except (ValueError, TypeError): + pass + raise NotImplementedError + if s == 1: + raise ValueError("zeta(1) pole") + if s >= 27: + return 1.0 + 2.0**(-s) + 3.0**(-s) + n = int(s) + if n == s: + if n >= 0: + return _zeta_int[n] + if not (n % 2): + return 0.0 + if s <= 0.0: + return 2.**s*pi**(s-1)*_sinpi_real(0.5*s)*_gamma_real(1-s)*zeta(1-s) + if s <= 2.0: + if s <= 1.0: + return _polyval(_zeta_0,s)/(s-1) + return _polyval(_zeta_1,s)/(s-1) + z = _polyval(_zeta_P,s) / _polyval(_zeta_Q,s) + return 1.0 + 2.0**(-s) + 3.0**(-s) + 4.0**(-s)*z diff --git a/.venv/lib/python3.11/site-packages/mpmath/rational.py b/.venv/lib/python3.11/site-packages/mpmath/rational.py new file mode 100644 index 0000000000000000000000000000000000000000..58745205319ac3548ad5feb49371d2d154b2d3c8 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/rational.py @@ -0,0 +1,240 @@ +import operator +import sys +from .libmp import int_types, mpf_hash, bitcount, from_man_exp, HASH_MODULUS + +new = object.__new__ + +def create_reduced(p, q, _cache={}): + key = p, q + if key in _cache: + return _cache[key] + x, y = p, q + while y: + x, y = y, x % y + if x != 1: + p //= x + q //= x + v = new(mpq) + v._mpq_ = p, q + # Speedup integers, half-integers and other small fractions + if q <= 4 and abs(key[0]) < 100: + _cache[key] = v + return v + +class mpq(object): + """ + Exact rational type, currently only intended for internal use. + """ + + __slots__ = ["_mpq_"] + + def __new__(cls, p, q=1): + if type(p) is tuple: + p, q = p + elif hasattr(p, '_mpq_'): + p, q = p._mpq_ + return create_reduced(p, q) + + def __repr__(s): + return "mpq(%s,%s)" % s._mpq_ + + def __str__(s): + return "(%s/%s)" % s._mpq_ + + def __int__(s): + a, b = s._mpq_ + return a // b + + def __nonzero__(s): + return bool(s._mpq_[0]) + + __bool__ = __nonzero__ + + def __hash__(s): + a, b = s._mpq_ + if sys.version_info >= (3, 2): + inverse = pow(b, HASH_MODULUS-2, HASH_MODULUS) + if not inverse: + h = sys.hash_info.inf + else: + h = (abs(a) * inverse) % HASH_MODULUS + if a < 0: h = -h + if h == -1: h = -2 + return h + else: + if b == 1: + return hash(a) + # Power of two: mpf compatible hash + if not (b & (b-1)): + return mpf_hash(from_man_exp(a, 1-bitcount(b))) + return hash((a,b)) + + def __eq__(s, t): + ttype = type(t) + if ttype is mpq: + return s._mpq_ == t._mpq_ + if ttype in int_types: + a, b = s._mpq_ + if b != 1: + return False + return a == t + return NotImplemented + + def __ne__(s, t): + ttype = type(t) + if ttype is mpq: + return s._mpq_ != t._mpq_ + if ttype in int_types: + a, b = s._mpq_ + if b != 1: + return True + return a != t + return NotImplemented + + def _cmp(s, t, op): + ttype = type(t) + if ttype in int_types: + a, b = s._mpq_ + return op(a, t*b) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return op(a*d, b*c) + return NotImplementedError + + def __lt__(s, t): return s._cmp(t, operator.lt) + def __le__(s, t): return s._cmp(t, operator.le) + def __gt__(s, t): return s._cmp(t, operator.gt) + def __ge__(s, t): return s._cmp(t, operator.ge) + + def __abs__(s): + a, b = s._mpq_ + if a >= 0: + return s + v = new(mpq) + v._mpq_ = -a, b + return v + + def __neg__(s): + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = -a, b + return v + + def __pos__(s): + return s + + def __add__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*d+b*c, b*d) + if ttype in int_types: + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = a+b*t, b + return v + return NotImplemented + + __radd__ = __add__ + + def __sub__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*d-b*c, b*d) + if ttype in int_types: + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = a-b*t, b + return v + return NotImplemented + + def __rsub__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(b*c-a*d, b*d) + if ttype in int_types: + a, b = s._mpq_ + v = new(mpq) + v._mpq_ = b*t-a, b + return v + return NotImplemented + + def __mul__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*c, b*d) + if ttype in int_types: + a, b = s._mpq_ + return create_reduced(a*t, b) + return NotImplemented + + __rmul__ = __mul__ + + def __div__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(a*d, b*c) + if ttype in int_types: + a, b = s._mpq_ + return create_reduced(a, b*t) + return NotImplemented + + def __rdiv__(s, t): + ttype = type(t) + if ttype is mpq: + a, b = s._mpq_ + c, d = t._mpq_ + return create_reduced(b*c, a*d) + if ttype in int_types: + a, b = s._mpq_ + return create_reduced(b*t, a) + return NotImplemented + + def __pow__(s, t): + ttype = type(t) + if ttype in int_types: + a, b = s._mpq_ + if t: + if t < 0: + a, b, t = b, a, -t + v = new(mpq) + v._mpq_ = a**t, b**t + return v + raise ZeroDivisionError + return NotImplemented + + +mpq_1 = mpq((1,1)) +mpq_0 = mpq((0,1)) +mpq_1_2 = mpq((1,2)) +mpq_3_2 = mpq((3,2)) +mpq_1_4 = mpq((1,4)) +mpq_1_16 = mpq((1,16)) +mpq_3_16 = mpq((3,16)) +mpq_5_2 = mpq((5,2)) +mpq_3_4 = mpq((3,4)) +mpq_7_4 = mpq((7,4)) +mpq_5_4 = mpq((5,4)) + + +# Register with "numbers" ABC +# We do not subclass, hence we do not use the @abstractmethod checks. While +# this is less invasive it may turn out that we do not actually support +# parts of the expected interfaces. See +# http://docs.python.org/2/library/numbers.html for list of abstract +# methods. +try: + import numbers + numbers.Rational.register(mpq) +except ImportError: + pass diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_calculus.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..f0a59773d672f0db20bb5072773472a5a3cc1d1f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_calculus.py @@ -0,0 +1,216 @@ +import pytest +from mpmath import * + +def test_approximation(): + mp.dps = 15 + f = lambda x: cos(2-2*x)/x + p, err = chebyfit(f, [2, 4], 8, error=True) + assert err < 1e-5 + for i in range(10): + x = 2 + i/5. + assert abs(polyval(p, x) - f(x)) < err + +def test_limits(): + mp.dps = 15 + assert limit(lambda x: (x-sin(x))/x**3, 0).ae(mpf(1)/6) + assert limit(lambda n: (1+1/n)**n, inf).ae(e) + +def test_polyval(): + assert polyval([], 3) == 0 + assert polyval([0], 3) == 0 + assert polyval([5], 3) == 5 + # 4x^3 - 2x + 5 + p = [4, 0, -2, 5] + assert polyval(p,4) == 253 + assert polyval(p,4,derivative=True) == (253, 190) + +def test_polyroots(): + p = polyroots([1,-4]) + assert p[0].ae(4) + p, q = polyroots([1,2,3]) + assert p.ae(-1 - sqrt(2)*j) + assert q.ae(-1 + sqrt(2)*j) + #this is not a real test, it only tests a specific case + assert polyroots([1]) == [] + pytest.raises(ValueError, lambda: polyroots([0])) + +def test_polyroots_legendre(): + n = 64 + coeffs = [11975573020964041433067793888190275875, 0, + -190100434726484311252477736051902332000, 0, + 1437919688271127330313741595496589239248, 0, + -6897338342113537600691931230430793911840, 0, + 23556405536185284408974715545252277554280, 0, + -60969520211303089058522793175947071316960, 0, + 124284021969194758465450309166353645376880, 0, + -204721258548015217049921875719981284186016, 0, + 277415422258095841688223780704620656114900, 0, + -313237834141273382807123548182995095192800, 0, + 297432255354328395601259515935229287637200, 0, + -239057700565161140389797367947941296605600, 0, + 163356095386193445933028201431093219347160, 0, + -95158890516229191805647495979277603503200, 0, + 47310254620162038075933656063247634556400, 0, + -20071017111583894941305187420771723751200, 0, + 7255051932731034189479516844750603752850, 0, + -2228176940331017311443863996901733412640, 0, + 579006552594977616773047095969088431600, 0, + -126584428502545713788439446082310831200, 0, + 23112325428835593809686977515028663000, 0, + -3491517141958743235617737161547844000, 0, + 431305058712550634988073414073557200, 0, + -42927166660756742088912492757452000, 0, + 3378527005707706553294038781836500, 0, + -205277590220215081719131470288800, 0, + 9330799555464321896324157740400, 0, + -304114948474392713657972548576, 0, + 6695289961520387531608984680, 0, + -91048139350447232095702560, 0, + 659769125727878493447120, 0, + -1905929106580294155360, 0, + 916312070471295267] + + with mp.workdps(3): + with pytest.raises(mp.NoConvergence): + polyroots(coeffs, maxsteps=5, cleanup=True, error=False, + extraprec=n*10) + + roots = polyroots(coeffs, maxsteps=50, cleanup=True, error=False, + extraprec=n*10) + roots = [str(r) for r in roots] + assert roots == \ + ['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', '-0.961', + '-0.946', '-0.93', '-0.911', '-0.889', '-0.866', '-0.841', + '-0.813', '-0.784', '-0.753', '-0.72', '-0.685', '-0.649', + '-0.611', '-0.572', '-0.531', '-0.489', '-0.446', '-0.402', + '-0.357', '-0.311', '-0.265', '-0.217', '-0.17', '-0.121', + '-0.073', '-0.0243', '0.0243', '0.073', '0.121', '0.17', '0.217', + '0.265', '0.311', '0.357', '0.402', '0.446', '0.489', '0.531', + '0.572', '0.611', '0.649', '0.685', '0.72', '0.753', '0.784', + '0.813', '0.841', '0.866', '0.889', '0.911', '0.93', '0.946', + '0.961', '0.973', '0.983', '0.991', '0.996', '0.999'] + +def test_polyroots_legendre_init(): + extra_prec = 100 + coeffs = [11975573020964041433067793888190275875, 0, + -190100434726484311252477736051902332000, 0, + 1437919688271127330313741595496589239248, 0, + -6897338342113537600691931230430793911840, 0, + 23556405536185284408974715545252277554280, 0, + -60969520211303089058522793175947071316960, 0, + 124284021969194758465450309166353645376880, 0, + -204721258548015217049921875719981284186016, 0, + 277415422258095841688223780704620656114900, 0, + -313237834141273382807123548182995095192800, 0, + 297432255354328395601259515935229287637200, 0, + -239057700565161140389797367947941296605600, 0, + 163356095386193445933028201431093219347160, 0, + -95158890516229191805647495979277603503200, 0, + 47310254620162038075933656063247634556400, 0, + -20071017111583894941305187420771723751200, 0, + 7255051932731034189479516844750603752850, 0, + -2228176940331017311443863996901733412640, 0, + 579006552594977616773047095969088431600, 0, + -126584428502545713788439446082310831200, 0, + 23112325428835593809686977515028663000, 0, + -3491517141958743235617737161547844000, 0, + 431305058712550634988073414073557200, 0, + -42927166660756742088912492757452000, 0, + 3378527005707706553294038781836500, 0, + -205277590220215081719131470288800, 0, + 9330799555464321896324157740400, 0, + -304114948474392713657972548576, 0, + 6695289961520387531608984680, 0, + -91048139350447232095702560, 0, + 659769125727878493447120, 0, + -1905929106580294155360, 0, + 916312070471295267] + + roots_init = matrix(['-0.999', '-0.996', '-0.991', '-0.983', '-0.973', + '-0.961', '-0.946', '-0.93', '-0.911', '-0.889', + '-0.866', '-0.841', '-0.813', '-0.784', '-0.753', + '-0.72', '-0.685', '-0.649', '-0.611', '-0.572', + '-0.531', '-0.489', '-0.446', '-0.402', '-0.357', + '-0.311', '-0.265', '-0.217', '-0.17', '-0.121', + '-0.073', '-0.0243', '0.0243', '0.073', '0.121', + '0.17', '0.217', '0.265', ' 0.311', '0.357', + '0.402', '0.446', '0.489', '0.531', '0.572', + '0.611', '0.649', '0.685', '0.72', '0.753', + '0.784', '0.813', '0.841', '0.866', '0.889', + '0.911', '0.93', '0.946', '0.961', '0.973', + '0.983', '0.991', '0.996', '0.999', '1.0']) + with mp.workdps(2*mp.dps): + roots_exact = polyroots(coeffs, maxsteps=50, cleanup=True, error=False, + extraprec=2*extra_prec) + with pytest.raises(mp.NoConvergence): + polyroots(coeffs, maxsteps=5, cleanup=True, error=False, + extraprec=extra_prec) + roots,err = polyroots(coeffs, maxsteps=5, cleanup=True, error=True, + extraprec=extra_prec,roots_init=roots_init) + assert max(matrix(roots_exact)-matrix(roots).apply(abs)) < err + roots1,err1 = polyroots(coeffs, maxsteps=25, cleanup=True, error=True, + extraprec=extra_prec,roots_init=roots_init[:60]) + assert max(matrix(roots_exact)-matrix(roots1).apply(abs)) < err1 + +def test_pade(): + one = mpf(1) + mp.dps = 20 + N = 10 + a = [one] + k = 1 + for i in range(1, N+1): + k *= i + a.append(one/k) + p, q = pade(a, N//2, N//2) + for x in arange(0, 1, 0.1): + r = polyval(p[::-1], x)/polyval(q[::-1], x) + assert(r.ae(exp(x), 1.0e-10)) + mp.dps = 15 + +def test_fourier(): + mp.dps = 15 + c, s = fourier(lambda x: x+1, [-1, 2], 2) + #plot([lambda x: x+1, lambda x: fourierval((c, s), [-1, 2], x)], [-1, 2]) + assert c[0].ae(1.5) + assert c[1].ae(-3*sqrt(3)/(2*pi)) + assert c[2].ae(3*sqrt(3)/(4*pi)) + assert s[0] == 0 + assert s[1].ae(3/(2*pi)) + assert s[2].ae(3/(4*pi)) + assert fourierval((c, s), [-1, 2], 1).ae(1.9134966715663442) + +def test_differint(): + mp.dps = 15 + assert differint(lambda t: t, 2, -0.5).ae(8*sqrt(2/pi)/3) + +def test_invlap(): + mp.dps = 15 + t = 0.01 + fp = lambda p: 1/(p+1)**2 + ft = lambda t: t*exp(-t) + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + t = 1.0 + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + + t = 0.01 + fp = lambda p: log(p)/p + ft = lambda t: -euler-log(t) + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) + t = 1.0 + ftt = ft(t) + assert invertlaplace(fp,t,method='talbot').ae(ftt) + assert invertlaplace(fp,t,method='stehfest').ae(ftt) + assert invertlaplace(fp,t,method='dehoog').ae(ftt) + assert invertlaplace(fp,t,method='cohen').ae(ftt) diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_division.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_division.py new file mode 100644 index 0000000000000000000000000000000000000000..c704cadeb953793ac0a887aa09c4278cf68a2824 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_division.py @@ -0,0 +1,143 @@ +from mpmath.libmp import * +from mpmath import mpf, mp + +from random import randint, choice, seed + +all_modes = [round_floor, round_ceiling, round_down, round_up, round_nearest] + +fb = from_bstr +fi = from_int +ff = from_float + + +def test_div_1_3(): + a = fi(1) + b = fi(3) + c = fi(-1) + + # floor rounds down, ceiling rounds up + assert mpf_div(a, b, 7, round_floor) == fb('0.01010101') + assert mpf_div(a, b, 7, round_ceiling) == fb('0.01010110') + assert mpf_div(a, b, 7, round_down) == fb('0.01010101') + assert mpf_div(a, b, 7, round_up) == fb('0.01010110') + assert mpf_div(a, b, 7, round_nearest) == fb('0.01010101') + + # floor rounds up, ceiling rounds down + assert mpf_div(c, b, 7, round_floor) == fb('-0.01010110') + assert mpf_div(c, b, 7, round_ceiling) == fb('-0.01010101') + assert mpf_div(c, b, 7, round_down) == fb('-0.01010101') + assert mpf_div(c, b, 7, round_up) == fb('-0.01010110') + assert mpf_div(c, b, 7, round_nearest) == fb('-0.01010101') + +def test_mpf_divi_1_3(): + a = 1 + b = fi(3) + c = -1 + assert mpf_rdiv_int(a, b, 7, round_floor) == fb('0.01010101') + assert mpf_rdiv_int(a, b, 7, round_ceiling) == fb('0.01010110') + assert mpf_rdiv_int(a, b, 7, round_down) == fb('0.01010101') + assert mpf_rdiv_int(a, b, 7, round_up) == fb('0.01010110') + assert mpf_rdiv_int(a, b, 7, round_nearest) == fb('0.01010101') + assert mpf_rdiv_int(c, b, 7, round_floor) == fb('-0.01010110') + assert mpf_rdiv_int(c, b, 7, round_ceiling) == fb('-0.01010101') + assert mpf_rdiv_int(c, b, 7, round_down) == fb('-0.01010101') + assert mpf_rdiv_int(c, b, 7, round_up) == fb('-0.01010110') + assert mpf_rdiv_int(c, b, 7, round_nearest) == fb('-0.01010101') + + +def test_div_300(): + + q = fi(1000000) + a = fi(300499999) # a/q is a little less than a half-integer + b = fi(300500000) # b/q exactly a half-integer + c = fi(300500001) # c/q is a little more than a half-integer + + # Check nearest integer rounding (prec=9 as 2**8 < 300 < 2**9) + + assert mpf_div(a, q, 9, round_down) == fi(300) + assert mpf_div(b, q, 9, round_down) == fi(300) + assert mpf_div(c, q, 9, round_down) == fi(300) + assert mpf_div(a, q, 9, round_up) == fi(301) + assert mpf_div(b, q, 9, round_up) == fi(301) + assert mpf_div(c, q, 9, round_up) == fi(301) + + # Nearest even integer is down + assert mpf_div(a, q, 9, round_nearest) == fi(300) + assert mpf_div(b, q, 9, round_nearest) == fi(300) + assert mpf_div(c, q, 9, round_nearest) == fi(301) + + # Nearest even integer is up + a = fi(301499999) + b = fi(301500000) + c = fi(301500001) + assert mpf_div(a, q, 9, round_nearest) == fi(301) + assert mpf_div(b, q, 9, round_nearest) == fi(302) + assert mpf_div(c, q, 9, round_nearest) == fi(302) + + +def test_tight_integer_division(): + # Test that integer division at tightest possible precision is exact + N = 100 + seed(1) + for i in range(N): + a = choice([1, -1]) * randint(1, 1< 0.5 + m = 1 + int(mp.rand() * 10) + n = 1 + int(mp.rand() * 10) + A = 2 * mp.randmatrix(m, n) - 1 + if mp.rand() > 0.5: + A *= 10 + for x in xrange(m): + for y in xrange(n): + A[x,y]=int(A[x,y]) + + run_svd_r(A, full_matrices = full, verbose = False) + +def test_svd_c_rand(): + for i in xrange(5): + full = mp.rand() > 0.5 + m = 1 + int(mp.rand() * 10) + n = 1 + int(mp.rand() * 10) + A = (2 * mp.randmatrix(m, n) - 1) + 1j * (2 * mp.randmatrix(m, n) - 1) + if mp.rand() > 0.5: + A *= 10 + for x in xrange(m): + for y in xrange(n): + A[x,y]=int(mp.re(A[x,y])) + 1j * int(mp.im(A[x,y])) + + run_svd_c(A, full_matrices=full, verbose=False) + +def test_svd_test_case(): + # a test case from Golub and Reinsch + # (see wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).) + + eps = mp.exp(0.8 * mp.log(mp.eps)) + + a = [[22, 10, 2, 3, 7], + [14, 7, 10, 0, 8], + [-1, 13, -1, -11, 3], + [-3, -2, 13, -2, 4], + [ 9, 8, 1, -2, 4], + [ 9, 1, -7, 5, -1], + [ 2, -6, 6, 5, 1], + [ 4, 5, 0, -2, 2]] + + a = mp.matrix(a) + b = mp.matrix([mp.sqrt(1248), 20, mp.sqrt(384), 0, 0]) + + S = mp.svd_r(a, compute_uv = False) + S -= b + assert mp.mnorm(S) < eps + + S = mp.svd_c(a, compute_uv = False) + S -= b + assert mp.mnorm(S) < eps + + +def test_gauss_quadrature_static(): + a = [-0.57735027, 0.57735027] + b = [ 1, 1] + run_gauss("legendre", a , b) + + a = [ -0.906179846, -0.538469310, 0, 0.538469310, 0.906179846] + b = [ 0.23692689, 0.47862867, 0.56888889, 0.47862867, 0.23692689] + run_gauss("legendre", a , b) + + a = [ 0.06943184, 0.33000948, 0.66999052, 0.93056816] + b = [ 0.17392742, 0.32607258, 0.32607258, 0.17392742] + run_gauss("legendre01", a , b) + + a = [-0.70710678, 0.70710678] + b = [ 0.88622693, 0.88622693] + run_gauss("hermite", a , b) + + a = [ -2.02018287, -0.958572465, 0, 0.958572465, 2.02018287] + b = [ 0.01995324, 0.39361932, 0.94530872, 0.39361932, 0.01995324] + run_gauss("hermite", a , b) + + a = [ 0.41577456, 2.29428036, 6.28994508] + b = [ 0.71109301, 0.27851773, 0.01038926] + run_gauss("laguerre", a , b) + +def test_gauss_quadrature_dynamic(verbose = False): + n = 5 + + A = mp.randmatrix(2 * n, 1) + + def F(x): + r = 0 + for i in xrange(len(A) - 1, -1, -1): + r = r * x + A[i] + return r + + def run(qtype, FW, R, alpha = 0, beta = 0): + X, W = mp.gauss_quadrature(n, qtype, alpha = alpha, beta = beta) + + a = 0 + for i in xrange(len(X)): + a += W[i] * F(X[i]) + + b = mp.quad(lambda x: FW(x) * F(x), R) + + c = mp.fabs(a - b) + + if verbose: + print(qtype, c, a, b) + + assert c < 1e-5 + + run("legendre", lambda x: 1, [-1, 1]) + run("legendre01", lambda x: 1, [0, 1]) + run("hermite", lambda x: mp.exp(-x*x), [-mp.inf, mp.inf]) + run("laguerre", lambda x: mp.exp(-x), [0, mp.inf]) + run("glaguerre", lambda x: mp.sqrt(x)*mp.exp(-x), [0, mp.inf], alpha = 1 / mp.mpf(2)) + run("chebyshev1", lambda x: 1/mp.sqrt(1-x*x), [-1, 1]) + run("chebyshev2", lambda x: mp.sqrt(1-x*x), [-1, 1]) + run("jacobi", lambda x: (1-x)**(1/mp.mpf(3)) * (1+x)**(1/mp.mpf(5)), [-1, 1], alpha = 1 / mp.mpf(3), beta = 1 / mp.mpf(5) ) diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_elliptic.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_elliptic.py new file mode 100644 index 0000000000000000000000000000000000000000..4dddc2df34b8d2fa7f2028b3501e5b7f140d8912 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_elliptic.py @@ -0,0 +1,670 @@ +""" +Limited tests of the elliptic functions module. A full suite of +extensive testing can be found in elliptic_torture_tests.py + +Author of the first version: M.T. Taschuk + +References: + +[1] Abramowitz & Stegun. 'Handbook of Mathematical Functions, 9th Ed.', + (Dover duplicate of 1972 edition) +[2] Whittaker 'A Course of Modern Analysis, 4th Ed.', 1946, + Cambridge University Press + +""" + +import mpmath +import random +import pytest + +from mpmath import * + +def mpc_ae(a, b, eps=eps): + res = True + res = res and a.real.ae(b.real, eps) + res = res and a.imag.ae(b.imag, eps) + return res + +zero = mpf(0) +one = mpf(1) + +jsn = ellipfun('sn') +jcn = ellipfun('cn') +jdn = ellipfun('dn') + +calculate_nome = lambda k: qfrom(k=k) + +def test_ellipfun(): + mp.dps = 15 + assert ellipfun('ss', 0, 0) == 1 + assert ellipfun('cc', 0, 0) == 1 + assert ellipfun('dd', 0, 0) == 1 + assert ellipfun('nn', 0, 0) == 1 + assert ellipfun('sn', 0.25, 0).ae(sin(0.25)) + assert ellipfun('cn', 0.25, 0).ae(cos(0.25)) + assert ellipfun('dn', 0.25, 0).ae(1) + assert ellipfun('ns', 0.25, 0).ae(csc(0.25)) + assert ellipfun('nc', 0.25, 0).ae(sec(0.25)) + assert ellipfun('nd', 0.25, 0).ae(1) + assert ellipfun('sc', 0.25, 0).ae(tan(0.25)) + assert ellipfun('sd', 0.25, 0).ae(sin(0.25)) + assert ellipfun('cd', 0.25, 0).ae(cos(0.25)) + assert ellipfun('cs', 0.25, 0).ae(cot(0.25)) + assert ellipfun('dc', 0.25, 0).ae(sec(0.25)) + assert ellipfun('ds', 0.25, 0).ae(csc(0.25)) + assert ellipfun('sn', 0.25, 1).ae(tanh(0.25)) + assert ellipfun('cn', 0.25, 1).ae(sech(0.25)) + assert ellipfun('dn', 0.25, 1).ae(sech(0.25)) + assert ellipfun('ns', 0.25, 1).ae(coth(0.25)) + assert ellipfun('nc', 0.25, 1).ae(cosh(0.25)) + assert ellipfun('nd', 0.25, 1).ae(cosh(0.25)) + assert ellipfun('sc', 0.25, 1).ae(sinh(0.25)) + assert ellipfun('sd', 0.25, 1).ae(sinh(0.25)) + assert ellipfun('cd', 0.25, 1).ae(1) + assert ellipfun('cs', 0.25, 1).ae(csch(0.25)) + assert ellipfun('dc', 0.25, 1).ae(1) + assert ellipfun('ds', 0.25, 1).ae(csch(0.25)) + assert ellipfun('sn', 0.25, 0.5).ae(0.24615967096986145833) + assert ellipfun('cn', 0.25, 0.5).ae(0.96922928989378439337) + assert ellipfun('dn', 0.25, 0.5).ae(0.98473484156599474563) + assert ellipfun('ns', 0.25, 0.5).ae(4.0624038700573130369) + assert ellipfun('nc', 0.25, 0.5).ae(1.0317476065024692949) + assert ellipfun('nd', 0.25, 0.5).ae(1.0155017958029488665) + assert ellipfun('sc', 0.25, 0.5).ae(0.25397465134058993408) + assert ellipfun('sd', 0.25, 0.5).ae(0.24997558792415733063) + assert ellipfun('cd', 0.25, 0.5).ae(0.98425408443195497052) + assert ellipfun('cs', 0.25, 0.5).ae(3.9374008182374110826) + assert ellipfun('dc', 0.25, 0.5).ae(1.0159978158253033913) + assert ellipfun('ds', 0.25, 0.5).ae(4.0003906313579720593) + + + + +def test_calculate_nome(): + mp.dps = 100 + + q = calculate_nome(zero) + assert(q == zero) + + mp.dps = 25 + # used Mathematica's EllipticNomeQ[m] + math1 = [(mpf(1)/10, mpf('0.006584651553858370274473060')), + (mpf(2)/10, mpf('0.01394285727531826872146409')), + (mpf(3)/10, mpf('0.02227743615715350822901627')), + (mpf(4)/10, mpf('0.03188334731336317755064299')), + (mpf(5)/10, mpf('0.04321391826377224977441774')), + (mpf(6)/10, mpf('0.05702025781460967637754953')), + (mpf(7)/10, mpf('0.07468994353717944761143751')), + (mpf(8)/10, mpf('0.09927369733882489703607378')), + (mpf(9)/10, mpf('0.1401731269542615524091055')), + (mpf(9)/10, mpf('0.1401731269542615524091055'))] + + for i in math1: + m = i[0] + q = calculate_nome(sqrt(m)) + assert q.ae(i[1]) + + mp.dps = 15 + +def test_jtheta(): + mp.dps = 25 + + z = q = zero + for n in range(1,5): + value = jtheta(n, z, q) + assert(value == (n-1)//2) + + for q in [one, mpf(2)]: + for n in range(1,5): + pytest.raises(ValueError, lambda: jtheta(n, z, q)) + + z = one/10 + q = one/11 + + # Mathematical N[EllipticTheta[1, 1/10, 1/11], 25] + res = mpf('0.1069552990104042681962096') + result = jtheta(1, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[2, 1/10, 1/11], 25] + res = mpf('1.101385760258855791140606') + result = jtheta(2, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[3, 1/10, 1/11], 25] + res = mpf('1.178319743354331061795905') + result = jtheta(3, z, q) + assert(result.ae(res)) + + # Mathematica N[EllipticTheta[4, 1/10, 1/11], 25] + res = mpf('0.8219318954665153577314573') + result = jtheta(4, z, q) + assert(result.ae(res)) + + # test for sin zeros for jtheta(1, z, q) + # test for cos zeros for jtheta(2, z, q) + z1 = pi + z2 = pi/2 + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + result = jtheta(1, z1, q) + assert(result.ae(0)) + result = jtheta(2, z2, q) + assert(result.ae(0)) + mp.dps = 15 + + +def test_jtheta_issue_79(): + # near the circle of covergence |q| = 1 the convergence slows + # down; for |q| > Q_LIM the theta functions raise ValueError + mp.dps = 30 + mp.dps += 30 + q = mpf(6)/10 - one/10**6 - mpf(8)/10 * j + mp.dps -= 30 + # Mathematica run first + # N[EllipticTheta[3, 1, 6/10 - 10^-6 - 8/10*I], 2000] + # then it works: + # N[EllipticTheta[3, 1, 6/10 - 10^-6 - 8/10*I], 30] + res = mpf('32.0031009628901652627099524264') + \ + mpf('16.6153027998236087899308935624') * j + result = jtheta(3, 1, q) + # check that for abs(q) > Q_LIM a ValueError exception is raised + mp.dps += 30 + q = mpf(6)/10 - one/10**7 - mpf(8)/10 * j + mp.dps -= 30 + pytest.raises(ValueError, lambda: jtheta(3, 1, q)) + + # bug reported in issue 79 + mp.dps = 100 + z = (1+j)/3 + q = mpf(368983957219251)/10**15 + mpf(636363636363636)/10**15 * j + # Mathematica N[EllipticTheta[1, z, q], 35] + res = mpf('2.4439389177990737589761828991467471') + \ + mpf('0.5446453005688226915290954851851490') *j + mp.dps = 30 + result = jtheta(1, z, q) + assert(result.ae(res)) + mp.dps = 80 + z = 3 + 4*j + q = 0.5 + 0.5*j + r1 = jtheta(1, z, q) + mp.dps = 15 + r2 = jtheta(1, z, q) + assert r1.ae(r2) + mp.dps = 80 + z = 3 + j + q1 = exp(j*3) + # longer test + # for n in range(1, 6) + for n in range(1, 2): + mp.dps = 80 + q = q1*(1 - mpf(1)/10**n) + r1 = jtheta(1, z, q) + mp.dps = 15 + r2 = jtheta(1, z, q) + assert r1.ae(r2) + mp.dps = 15 + # issue 79 about high derivatives + assert jtheta(3, 4.5, 0.25, 9).ae(1359.04892680683) + assert jtheta(3, 4.5, 0.25, 50).ae(-6.14832772630905e+33) + mp.dps = 50 + r = jtheta(3, 4.5, 0.25, 9) + assert r.ae('1359.048926806828939547859396600218966947753213803') + r = jtheta(3, 4.5, 0.25, 50) + assert r.ae('-6148327726309051673317975084654262.4119215720343656') + +def test_jtheta_identities(): + """ + Tests the some of the jacobi identidies found in Abramowitz, + Sec. 16.28, Pg. 576. The identities are tested to 1 part in 10^98. + """ + mp.dps = 110 + eps1 = ldexp(eps, 30) + + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + + zstring = str(10*random.random()) + z = mpf(zstring) + # Abramowitz 16.28.1 + # v_1(z, q)**2 * v_4(0, q)**2 = v_3(z, q)**2 * v_2(0, q)**2 + # - v_2(z, q)**2 * v_3(0, q)**2 + term1 = (jtheta(1, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(3, z, q)**2) * (jtheta(2, zero, q)**2) + term3 = (jtheta(2, z, q)**2) * (jtheta(3, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + zstring = str(100*random.random()) + z = mpf(zstring) + # Abramowitz 16.28.2 + # v_2(z, q)**2 * v_4(0, q)**2 = v_4(z, q)**2 * v_2(0, q)**2 + # - v_1(z, q)**2 * v_3(0, q)**2 + term1 = (jtheta(2, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(4, z, q)**2) * (jtheta(2, zero, q)**2) + term3 = (jtheta(1, z, q)**2) * (jtheta(3, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.3 + # v_3(z, q)**2 * v_4(0, q)**2 = v_4(z, q)**2 * v_3(0, q)**2 + # - v_1(z, q)**2 * v_2(0, q)**2 + term1 = (jtheta(3, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(4, z, q)**2) * (jtheta(3, zero, q)**2) + term3 = (jtheta(1, z, q)**2) * (jtheta(2, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.4 + # v_4(z, q)**2 * v_4(0, q)**2 = v_3(z, q)**2 * v_3(0, q)**2 + # - v_2(z, q)**2 * v_2(0, q)**2 + term1 = (jtheta(4, z, q)**2) * (jtheta(4, zero, q)**2) + term2 = (jtheta(3, z, q)**2) * (jtheta(3, zero, q)**2) + term3 = (jtheta(2, z, q)**2) * (jtheta(2, zero, q)**2) + equality = term1 - term2 + term3 + assert(equality.ae(0, eps1)) + + # Abramowitz 16.28.5 + # v_2(0, q)**4 + v_4(0, q)**4 == v_3(0, q)**4 + term1 = (jtheta(2, zero, q))**4 + term2 = (jtheta(4, zero, q))**4 + term3 = (jtheta(3, zero, q))**4 + equality = term1 + term2 - term3 + assert(equality.ae(0, eps1)) + mp.dps = 15 + +def test_jtheta_complex(): + mp.dps = 30 + z = mpf(1)/4 + j/8 + q = mpf(1)/3 + j/7 + # Mathematica N[EllipticTheta[1, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.31618034835986160705729105731678285') + \ + mpf('0.07542013825835103435142515194358975') * j + r = jtheta(1, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[2, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('1.6530986428239765928634711417951828') + \ + mpf('0.2015344864707197230526742145361455') * j + r = jtheta(2, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[3, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('1.6520564411784228184326012700348340') + \ + mpf('0.1998129119671271328684690067401823') * j + r = jtheta(3, z, q) + assert(mpc_ae(r, res)) + + # Mathematica N[EllipticTheta[4, 1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.37619082382228348252047624089973824') - \ + mpf('0.15623022130983652972686227200681074') * j + r = jtheta(4, z, q) + assert(mpc_ae(r, res)) + + # check some theta function identities + mp.dos = 100 + z = mpf(1)/4 + j/8 + q = mpf(1)/3 + j/7 + mp.dps += 10 + a = [0,0, jtheta(2, 0, q), jtheta(3, 0, q), jtheta(4, 0, q)] + t = [0, jtheta(1, z, q), jtheta(2, z, q), jtheta(3, z, q), jtheta(4, z, q)] + r = [(t[2]*a[4])**2 - (t[4]*a[2])**2 + (t[1] *a[3])**2, + (t[3]*a[4])**2 - (t[4]*a[3])**2 + (t[1] *a[2])**2, + (t[1]*a[4])**2 - (t[3]*a[2])**2 + (t[2] *a[3])**2, + (t[4]*a[4])**2 - (t[3]*a[3])**2 + (t[2] *a[2])**2, + a[2]**4 + a[4]**4 - a[3]**4] + mp.dps -= 10 + for x in r: + assert(mpc_ae(x, mpc(0))) + mp.dps = 15 + +def test_djtheta(): + mp.dps = 30 + + z = one/7 + j/3 + q = one/8 + j/5 + # Mathematica N[EllipticThetaPrime[1, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('1.5555195883277196036090928995803201') - \ + mpf('0.02439761276895463494054149673076275') * j + result = jtheta(1, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[2, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('0.19825296689470982332701283509685662') - \ + mpf('0.46038135182282106983251742935250009') * j + result = jtheta(2, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[3, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('0.36492498415476212680896699407390026') - \ + mpf('0.57743812698666990209897034525640369') * j + result = jtheta(3, z, q, 1) + assert(mpc_ae(result, res)) + + # Mathematica N[EllipticThetaPrime[4, 1/7 + I/3, 1/8 + I/5], 35] + res = mpf('-0.38936892528126996010818803742007352') + \ + mpf('0.66549886179739128256269617407313625') * j + result = jtheta(4, z, q, 1) + assert(mpc_ae(result, res)) + + for i in range(10): + q = (one*random.random() + j*random.random())/2 + # identity in Wittaker, Watson &21.41 + a = jtheta(1, 0, q, 1) + b = jtheta(2, 0, q)*jtheta(3, 0, q)*jtheta(4, 0, q) + assert(a.ae(b)) + + # test higher derivatives + mp.dps = 20 + for q,z in [(one/3, one/5), (one/3 + j/8, one/5), + (one/3, one/5 + j/8), (one/3 + j/7, one/5 + j/8)]: + for n in [1, 2, 3, 4]: + r = jtheta(n, z, q, 2) + r1 = diff(lambda zz: jtheta(n, zz, q), z, n=2) + assert r.ae(r1) + r = jtheta(n, z, q, 3) + r1 = diff(lambda zz: jtheta(n, zz, q), z, n=3) + assert r.ae(r1) + + # identity in Wittaker, Watson &21.41 + q = one/3 + z = zero + a = [0]*5 + a[1] = jtheta(1, z, q, 3)/jtheta(1, z, q, 1) + for n in [2,3,4]: + a[n] = jtheta(n, z, q, 2)/jtheta(n, z, q) + equality = a[2] + a[3] + a[4] - a[1] + assert(equality.ae(0)) + mp.dps = 15 + +def test_jsn(): + """ + Test some special cases of the sn(z, q) function. + """ + mp.dps = 100 + + # trival case + result = jsn(zero, zero) + assert(result == zero) + + # Abramowitz Table 16.5 + # + # sn(0, m) = 0 + + for i in range(10): + qstring = str(random.random()) + q = mpf(qstring) + + equality = jsn(zero, q) + assert(equality.ae(0)) + + # Abramowitz Table 16.6.1 + # + # sn(z, 0) = sin(z), m == 0 + # + # sn(z, 1) = tanh(z), m == 1 + # + # It would be nice to test these, but I find that they run + # in to numerical trouble. I'm currently treating as a boundary + # case for sn function. + + mp.dps = 25 + arg = one/10 + #N[JacobiSN[1/10, 2^-100], 25] + res = mpf('0.09983341664682815230681420') + m = ldexp(one, -100) + result = jsn(arg, m) + assert(result.ae(res)) + + # N[JacobiSN[1/10, 1/10], 25] + res = mpf('0.09981686718599080096451168') + result = jsn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + +def test_jcn(): + """ + Test some special cases of the cn(z, q) function. + """ + mp.dps = 100 + + # Abramowitz Table 16.5 + # cn(0, q) = 1 + qstring = str(random.random()) + q = mpf(qstring) + cn = jcn(zero, q) + assert(cn.ae(one)) + + # Abramowitz Table 16.6.2 + # + # cn(u, 0) = cos(u), m == 0 + # + # cn(u, 1) = sech(z), m == 1 + # + # It would be nice to test these, but I find that they run + # in to numerical trouble. I'm currently treating as a boundary + # case for cn function. + + mp.dps = 25 + arg = one/10 + m = ldexp(one, -100) + #N[JacobiCN[1/10, 2^-100], 25] + res = mpf('0.9950041652780257660955620') + result = jcn(arg, m) + assert(result.ae(res)) + + # N[JacobiCN[1/10, 1/10], 25] + res = mpf('0.9950058256237368748520459') + result = jcn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + +def test_jdn(): + """ + Test some special cases of the dn(z, q) function. + """ + mp.dps = 100 + + # Abramowitz Table 16.5 + # dn(0, q) = 1 + mstring = str(random.random()) + m = mpf(mstring) + + dn = jdn(zero, m) + assert(dn.ae(one)) + + mp.dps = 25 + # N[JacobiDN[1/10, 1/10], 25] + res = mpf('0.9995017055025556219713297') + arg = one/10 + result = jdn(arg, arg) + assert(result.ae(res)) + mp.dps = 15 + + +def test_sn_cn_dn_identities(): + """ + Tests the some of the jacobi elliptic function identities found + on Mathworld. Haven't found in Abramowitz. + """ + mp.dps = 100 + N = 5 + for i in range(N): + qstring = str(random.random()) + q = mpf(qstring) + zstring = str(100*random.random()) + z = mpf(zstring) + + # MathWorld + # sn(z, q)**2 + cn(z, q)**2 == 1 + term1 = jsn(z, q)**2 + term2 = jcn(z, q)**2 + equality = one - term1 - term2 + assert(equality.ae(0)) + + # MathWorld + # k**2 * sn(z, m)**2 + dn(z, m)**2 == 1 + for i in range(N): + mstring = str(random.random()) + m = mpf(qstring) + k = m.sqrt() + zstring = str(10*random.random()) + z = mpf(zstring) + term1 = k**2 * jsn(z, m)**2 + term2 = jdn(z, m)**2 + equality = one - term1 - term2 + assert(equality.ae(0)) + + + for i in range(N): + mstring = str(random.random()) + m = mpf(mstring) + k = m.sqrt() + zstring = str(random.random()) + z = mpf(zstring) + + # MathWorld + # k**2 * cn(z, m)**2 + (1 - k**2) = dn(z, m)**2 + term1 = k**2 * jcn(z, m)**2 + term2 = 1 - k**2 + term3 = jdn(z, m)**2 + equality = term3 - term1 - term2 + assert(equality.ae(0)) + + K = ellipk(k**2) + # Abramowitz Table 16.5 + # sn(K, m) = 1; K is K(k), first complete elliptic integral + r = jsn(K, m) + assert(r.ae(one)) + + # Abramowitz Table 16.5 + # cn(K, q) = 0; K is K(k), first complete elliptic integral + equality = jcn(K, m) + assert(equality.ae(0)) + + # Abramowitz Table 16.6.3 + # dn(z, 0) = 1, m == 0 + z = m + value = jdn(z, zero) + assert(value.ae(one)) + + mp.dps = 15 + +def test_sn_cn_dn_complex(): + mp.dps = 30 + # N[JacobiSN[1/4 + I/8, 1/3 + I/7], 35] in Mathematica + res = mpf('0.2495674401066275492326652143537') + \ + mpf('0.12017344422863833381301051702823') * j + u = mpf(1)/4 + j/8 + m = mpf(1)/3 + j/7 + r = jsn(u, m) + assert(mpc_ae(r, res)) + + #N[JacobiCN[1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.9762691700944007312693721148331') - \ + mpf('0.0307203994181623243583169154824')*j + r = jcn(u, m) + #assert r.real.ae(res.real) + #assert r.imag.ae(res.imag) + assert(mpc_ae(r, res)) + + #N[JacobiDN[1/4 + I/8, 1/3 + I/7], 35] + res = mpf('0.99639490163039577560547478589753039') - \ + mpf('0.01346296520008176393432491077244994')*j + r = jdn(u, m) + assert(mpc_ae(r, res)) + mp.dps = 15 + +def test_elliptic_integrals(): + # Test cases from Carlson's paper + mp.dps = 15 + assert elliprd(0,2,1).ae(1.7972103521033883112) + assert elliprd(2,3,4).ae(0.16510527294261053349) + assert elliprd(j,-j,2).ae(0.65933854154219768919) + assert elliprd(0,j,-j).ae(1.2708196271909686299 + 2.7811120159520578777j) + assert elliprd(0,j-1,j).ae(-1.8577235439239060056 - 0.96193450888838559989j) + assert elliprd(-2-j,-j,-1+j).ae(1.8249027393703805305 - 1.2218475784827035855j) + # extra test cases + assert elliprg(0,0,0) == 0 + assert elliprg(0,0,16).ae(2) + assert elliprg(0,16,0).ae(2) + assert elliprg(16,0,0).ae(2) + assert elliprg(1,4,0).ae(1.2110560275684595248036) + assert elliprg(1,0,4).ae(1.2110560275684595248036) + assert elliprg(0,4,1).ae(1.2110560275684595248036) + # should be symmetric -- fixes a bug present in the paper + x,y,z = 1,1j,-1+1j + assert elliprg(x,y,z).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(x,z,y).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(y,x,z).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(y,z,x).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(z,x,y).ae(0.64139146875812627545 + 0.58085463774808290907j) + assert elliprg(z,y,x).ae(0.64139146875812627545 + 0.58085463774808290907j) + + for n in [5, 15, 30, 60, 100]: + mp.dps = n + assert elliprf(1,2,0).ae('1.3110287771460599052324197949455597068413774757158115814084108519003952935352071251151477664807145467230678763') + assert elliprf(0.5,1,0).ae('1.854074677301371918433850347195260046217598823521766905585928045056021776838119978357271861650371897277771871') + assert elliprf(j,-j,0).ae('1.854074677301371918433850347195260046217598823521766905585928045056021776838119978357271861650371897277771871') + assert elliprf(j-1,j,0).ae(mpc('0.79612586584233913293056938229563057846592264089185680214929401744498956943287031832657642790719940442165621412', + '-1.2138566698364959864300942567386038975419875860741507618279563735753073152507112254567291141460317931258599889')) + assert elliprf(2,3,4).ae('0.58408284167715170669284916892566789240351359699303216166309375305508295130412919665541330837704050454472379308') + assert elliprf(j,-j,2).ae('1.0441445654064360931078658361850779139591660747973017593275012615517220315993723776182276555339288363064476126') + assert elliprf(j-1,j,1-j).ae(mpc('0.93912050218619371196624617169781141161485651998254431830645241993282941057500174238125105410055253623847335313', + '-0.53296252018635269264859303449447908970360344322834582313172115220559316331271520508208025270300138589669326136')) + assert elliprc(0,0.25).ae(+pi) + assert elliprc(2.25,2).ae(+ln2) + assert elliprc(0,j).ae(mpc('1.1107207345395915617539702475151734246536554223439225557713489017391086982748684776438317336911913093408525532', + '-1.1107207345395915617539702475151734246536554223439225557713489017391086982748684776438317336911913093408525532')) + assert elliprc(-j,j).ae(mpc('1.2260849569072198222319655083097718755633725139745941606203839524036426936825652935738621522906572884239069297', + '-0.34471136988767679699935618332997956653521218571295874986708834375026550946053920574015526038040124556716711353')) + assert elliprc(0.25,-2).ae(ln2/3) + assert elliprc(j,-1).ae(mpc('0.77778596920447389875196055840799837589537035343923012237628610795937014001905822029050288316217145443865649819', + '0.1983248499342877364755170948292130095921681309577950696116251029742793455964385947473103628983664877025779304')) + assert elliprj(0,1,2,3).ae('0.77688623778582332014190282640545501102298064276022952731669118325952563819813258230708177398475643634103990878') + assert elliprj(2,3,4,5).ae('0.14297579667156753833233879421985774801466647854232626336218889885463800128817976132826443904216546421431528308') + assert elliprj(2,3,4,-1+j).ae(mpc('0.13613945827770535203521374457913768360237593025944342652613569368333226052158214183059386307242563164036672709', + '-0.38207561624427164249600936454845112611060375760094156571007648297226090050927156176977091273224510621553615189')) + assert elliprj(j,-j,0,2).ae('1.6490011662710884518243257224860232300246792717163891216346170272567376981346412066066050103935109581019055806') + assert elliprj(-1+j,-1-j,1,2).ae('0.94148358841220238083044612133767270187474673547917988681610772381758628963408843935027667916713866133196845063') + assert elliprj(j,-j,0,1-j).ae(mpc('1.8260115229009316249372594065790946657011067182850435297162034335356430755397401849070610280860044610878657501', + '1.2290661908643471500163617732957042849283739403009556715926326841959667290840290081010472716420690899886276961')) + assert elliprj(-1+j,-1-j,1,-3+j).ae(mpc('-0.61127970812028172123588152373622636829986597243716610650831553882054127570542477508023027578037045504958619422', + '-1.0684038390006807880182112972232562745485871763154040245065581157751693730095703406209466903752930797510491155')) + assert elliprj(-1+j,-2-j,-j,-1+j).ae(mpc('1.8249027393703805304622013339009022294368078659619988943515764258335975852685224202567854526307030593012768954', + '-1.2218475784827035854568450371590419833166777535029296025352291308244564398645467465067845461070602841312456831')) + + assert elliprg(0,16,16).ae(+pi) + assert elliprg(2,3,4).ae('1.7255030280692277601061148835701141842692457170470456590515892070736643637303053506944907685301315299153040991') + assert elliprg(0,j,-j).ae('0.42360654239698954330324956174109581824072295516347109253028968632986700241706737986160014699730561497106114281') + assert elliprg(j-1,j,0).ae(mpc('0.44660591677018372656731970402124510811555212083508861036067729944477855594654762496407405328607219895053798354', + '0.70768352357515390073102719507612395221369717586839400605901402910893345301718731499237159587077682267374159282')) + assert elliprg(-j,j-1,j).ae(mpc('0.36023392184473309033675652092928695596803358846377334894215349632203382573844427952830064383286995172598964266', + '0.40348623401722113740956336997761033878615232917480045914551915169013722542827052849476969199578321834819903921')) + assert elliprg(0, mpf('0.0796'), 4).ae('1.0284758090288040009838871385180217366569777284430590125081211090574701293154645750017813190805144572673802094') + mp.dps = 15 + + # more test cases for the branch of ellippi / elliprj + assert elliprj(-1-0.5j, -10-6j, -10-3j, -5+10j).ae(0.128470516743927699 + 0.102175950778504625j, abs_eps=1e-8) + assert elliprj(1.987, 4.463 - 1.614j, 0, -3.965).ae(-0.341575118513811305 - 0.394703757004268486j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037+0.632j, 1.654, -0.9609).ae(-1.14735199581485639 - 0.134450158867472264j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037-0.632j, 1.654, -0.9609).ae(1.758765901861727 - 0.161002343366626892j, abs_eps=1e-5) + assert elliprj(0.3068, -4.037+0.0632j, 1.654, -0.9609).ae(-1.17157627949475577 - 0.069182614173988811j, abs_eps=1e-8) + assert elliprj(0.3068, -4.037+0.00632j, 1.654, -0.9609).ae(-1.17337595670549633 - 0.0623069224526925j, abs_eps=1e-8) + + # these require accurate integration + assert elliprj(0.3068, -4.037-0.0632j, 1.654, -0.9609).ae(1.77940452391261626 + 0.0388711305592447234j) + assert elliprj(0.3068, -4.037-0.00632j, 1.654, -0.9609).ae(1.77806722756403055 + 0.0592749824572262329j) + # issue #571 + assert ellippi(2.1 + 0.94j, 2.3 + 0.98j, 2.5 + 0.01j).ae(-0.40652414240811963438 + 2.1547659461404749309j) + + assert ellippi(2.0-1.0j, 2.0+1.0j).ae(1.8578723151271115 - 1.18642180609983531j) + assert ellippi(2.0-0.5j, 0.5+1.0j).ae(0.936761970766645807 - 1.61876787838890786j) + assert ellippi(2.0, 1.0+1.0j).ae(0.999881420735506708 - 2.4139272867045391j) + assert ellippi(2.0+1.0j, 2.0-1.0j).ae(1.8578723151271115 + 1.18642180609983531j) + assert ellippi(2.0+1.0j, 2.0).ae(2.78474654927885845 + 2.02204728966993314j) + +def test_issue_238(): + assert isnan(qfrom(m=nan)) diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_identify.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_identify.py new file mode 100644 index 0000000000000000000000000000000000000000..f75ab0bc4f04ecb614011e7f4599989465cab785 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_identify.py @@ -0,0 +1,19 @@ +from mpmath import * + +def test_pslq(): + mp.dps = 15 + assert pslq([3*pi+4*e/7, pi, e, log(2)]) == [7, -21, -4, 0] + assert pslq([4.9999999999999991, 1]) == [1, -5] + assert pslq([2,1]) == [1, -2] + +def test_identify(): + mp.dps = 20 + assert identify(zeta(4), ['log(2)', 'pi**4']) == '((1/90)*pi**4)' + mp.dps = 15 + assert identify(exp(5)) == 'exp(5)' + assert identify(exp(4)) == 'exp(4)' + assert identify(log(5)) == 'log(5)' + assert identify(exp(3*pi), ['pi']) == 'exp((3*pi))' + assert identify(3, full=True) == ['3', '3', '1/(1/3)', 'sqrt(9)', + '1/sqrt((1/9))', '(sqrt(12)/2)**2', '1/(sqrt(12)/6)**2'] + assert identify(pi+1, {'a':+pi}) == '(1 + 1*a)' diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_interval.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_interval.py new file mode 100644 index 0000000000000000000000000000000000000000..251fd8b7ddb00074e8ae27cce4a01d8f4f8fe151 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_interval.py @@ -0,0 +1,453 @@ +from mpmath import * + +def test_interval_identity(): + iv.dps = 15 + assert mpi(2) == mpi(2, 2) + assert mpi(2) != mpi(-2, 2) + assert not (mpi(2) != mpi(2, 2)) + assert mpi(-1, 1) == mpi(-1, 1) + assert str(mpi('0.1')) == "[0.099999999999999991673, 0.10000000000000000555]" + assert repr(mpi('0.1')) == "mpi('0.099999999999999992', '0.10000000000000001')" + u = mpi(-1, 3) + assert -1 in u + assert 2 in u + assert 3 in u + assert -1.1 not in u + assert 3.1 not in u + assert mpi(-1, 3) in u + assert mpi(0, 1) in u + assert mpi(-1.1, 2) not in u + assert mpi(2.5, 3.1) not in u + w = mpi(-inf, inf) + assert mpi(-5, 5) in w + assert mpi(2, inf) in w + assert mpi(0, 2) in mpi(0, 10) + assert not (3 in mpi(-inf, 0)) + +def test_interval_hash(): + assert hash(mpi(3)) == hash(3) + assert hash(mpi(3.25)) == hash(3.25) + assert hash(mpi(3,4)) == hash(mpi(3,4)) + assert hash(iv.mpc(3)) == hash(3) + assert hash(iv.mpc(3,4)) == hash(3+4j) + assert hash(iv.mpc((1,3),(2,4))) == hash(iv.mpc((1,3),(2,4))) + +def test_interval_arithmetic(): + iv.dps = 15 + assert mpi(2) + mpi(3,4) == mpi(5,6) + assert mpi(1, 2)**2 == mpi(1, 4) + assert mpi(1) + mpi(0, 1e-50) == mpi(1, mpf('1.0000000000000002')) + x = 1 / (1 / mpi(3)) + assert x.a < 3 < x.b + x = mpi(2) ** mpi(0.5) + iv.dps += 5 + sq = iv.sqrt(2) + iv.dps -= 5 + assert x.a < sq < x.b + assert mpi(1) / mpi(1, inf) + assert mpi(2, 3) / inf == mpi(0, 0) + assert mpi(0) / inf == 0 + assert mpi(0) / 0 == mpi(-inf, inf) + assert mpi(inf) / 0 == mpi(-inf, inf) + assert mpi(0) * inf == mpi(-inf, inf) + assert 1 / mpi(2, inf) == mpi(0, 0.5) + assert str((mpi(50, 50) * mpi(-10, -10)) / 3) == \ + '[-166.66666666666668561, -166.66666666666665719]' + assert mpi(0, 4) ** 3 == mpi(0, 64) + assert mpi(2,4).mid == 3 + iv.dps = 30 + a = mpi(iv.pi) + iv.dps = 15 + b = +a + assert b.a < a.a + assert b.b > a.b + a = mpi(iv.pi) + assert a == +a + assert abs(mpi(-1,2)) == mpi(0,2) + assert abs(mpi(0.5,2)) == mpi(0.5,2) + assert abs(mpi(-3,2)) == mpi(0,3) + assert abs(mpi(-3,-0.5)) == mpi(0.5,3) + assert mpi(0) * mpi(2,3) == mpi(0) + assert mpi(2,3) * mpi(0) == mpi(0) + assert mpi(1,3).delta == 2 + assert mpi(1,2) - mpi(3,4) == mpi(-3,-1) + assert mpi(-inf,0) - mpi(0,inf) == mpi(-inf,0) + assert mpi(-inf,0) - mpi(-inf,inf) == mpi(-inf,inf) + assert mpi(0,inf) - mpi(-inf,1) == mpi(-1,inf) + +def test_interval_mul(): + assert mpi(-1, 0) * inf == mpi(-inf, 0) + assert mpi(-1, 0) * -inf == mpi(0, inf) + assert mpi(0, 1) * inf == mpi(0, inf) + assert mpi(0, 1) * mpi(0, inf) == mpi(0, inf) + assert mpi(-1, 1) * inf == mpi(-inf, inf) + assert mpi(-1, 1) * mpi(0, inf) == mpi(-inf, inf) + assert mpi(-1, 1) * mpi(-inf, inf) == mpi(-inf, inf) + assert mpi(-inf, 0) * mpi(0, 1) == mpi(-inf, 0) + assert mpi(-inf, 0) * mpi(0, 0) * mpi(-inf, 0) + assert mpi(-inf, 0) * mpi(-inf, inf) == mpi(-inf, inf) + assert mpi(-5,0)*mpi(-32,28) == mpi(-140,160) + assert mpi(2,3) * mpi(-1,2) == mpi(-3,6) + # Should be undefined? + assert mpi(inf, inf) * 0 == mpi(-inf, inf) + assert mpi(-inf, -inf) * 0 == mpi(-inf, inf) + assert mpi(0) * mpi(-inf,2) == mpi(-inf,inf) + assert mpi(0) * mpi(-2,inf) == mpi(-inf,inf) + assert mpi(-2,inf) * mpi(0) == mpi(-inf,inf) + assert mpi(-inf,2) * mpi(0) == mpi(-inf,inf) + +def test_interval_pow(): + assert mpi(3)**2 == mpi(9, 9) + assert mpi(-3)**2 == mpi(9, 9) + assert mpi(-3, 1)**2 == mpi(0, 9) + assert mpi(-3, -1)**2 == mpi(1, 9) + assert mpi(-3, -1)**3 == mpi(-27, -1) + assert mpi(-3, 1)**3 == mpi(-27, 1) + assert mpi(-2, 3)**2 == mpi(0, 9) + assert mpi(-3, 2)**2 == mpi(0, 9) + assert mpi(4) ** -1 == mpi(0.25, 0.25) + assert mpi(-4) ** -1 == mpi(-0.25, -0.25) + assert mpi(4) ** -2 == mpi(0.0625, 0.0625) + assert mpi(-4) ** -2 == mpi(0.0625, 0.0625) + assert mpi(0, 1) ** inf == mpi(0, 1) + assert mpi(0, 1) ** -inf == mpi(1, inf) + assert mpi(0, inf) ** inf == mpi(0, inf) + assert mpi(0, inf) ** -inf == mpi(0, inf) + assert mpi(1, inf) ** inf == mpi(1, inf) + assert mpi(1, inf) ** -inf == mpi(0, 1) + assert mpi(2, 3) ** 1 == mpi(2, 3) + assert mpi(2, 3) ** 0 == 1 + assert mpi(1,3) ** mpi(2) == mpi(1,9) + +def test_interval_sqrt(): + assert mpi(4) ** 0.5 == mpi(2) + +def test_interval_div(): + assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(0, 1) / mpi(0, 1) == mpi(0, inf) + assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(2, 2) == mpi(inf, inf) + assert mpi(0, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(0, inf) / mpi(2, 2) == mpi(0, inf) + assert mpi(2, inf) / mpi(2, 2) == mpi(1, inf) + assert mpi(2, inf) / mpi(2, inf) == mpi(0, inf) + assert mpi(-4, 8) / mpi(1, inf) == mpi(-4, 8) + assert mpi(-4, 8) / mpi(0.5, inf) == mpi(-8, 16) + assert mpi(-inf, 8) / mpi(0.5, inf) == mpi(-inf, 16) + assert mpi(-inf, inf) / mpi(0.5, inf) == mpi(-inf, inf) + assert mpi(8, inf) / mpi(0.5, inf) == mpi(0, inf) + assert mpi(-8, inf) / mpi(0.5, inf) == mpi(-16, inf) + assert mpi(-4, 8) / mpi(inf, inf) == mpi(0, 0) + assert mpi(0, 8) / mpi(inf, inf) == mpi(0, 0) + assert mpi(0, 0) / mpi(inf, inf) == mpi(0, 0) + assert mpi(-inf, 0) / mpi(inf, inf) == mpi(-inf, 0) + assert mpi(-inf, 8) / mpi(inf, inf) == mpi(-inf, 0) + assert mpi(-inf, inf) / mpi(inf, inf) == mpi(-inf, inf) + assert mpi(-8, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(0, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(8, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(inf, inf) / mpi(inf, inf) == mpi(0, inf) + assert mpi(-1, 2) / mpi(0, 1) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(0, 1) == mpi(0.0, +inf) + assert mpi(-1, 0) / mpi(0, 1) == mpi(-inf, 0.0) + assert mpi(-0.5, -0.25) / mpi(0, 1) == mpi(-inf, -0.25) + assert mpi(0.5, 1) / mpi(0, 1) == mpi(0.5, +inf) + assert mpi(0.5, 4) / mpi(0, 1) == mpi(0.5, +inf) + assert mpi(-1, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) + assert mpi(-4, -0.5) / mpi(0, 1) == mpi(-inf, -0.5) + assert mpi(-1, 2) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, 0) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-0.5, -0.25) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0.5, 1) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(0.5, 4) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-4, -0.5) / mpi(-2, 0.5) == mpi(-inf, +inf) + assert mpi(-1, 2) / mpi(-1, 0) == mpi(-inf, +inf) + assert mpi(0, 1) / mpi(-1, 0) == mpi(-inf, 0.0) + assert mpi(-1, 0) / mpi(-1, 0) == mpi(0.0, +inf) + assert mpi(-0.5, -0.25) / mpi(-1, 0) == mpi(0.25, +inf) + assert mpi(0.5, 1) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(0.5, 4) / mpi(-1, 0) == mpi(-inf, -0.5) + assert mpi(-1, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) + assert mpi(-4, -0.5) / mpi(-1, 0) == mpi(0.5, +inf) + assert mpi(-1, 2) / mpi(0.5, 1) == mpi(-2.0, 4.0) + assert mpi(0, 1) / mpi(0.5, 1) == mpi(0.0, 2.0) + assert mpi(-1, 0) / mpi(0.5, 1) == mpi(-2.0, 0.0) + assert mpi(-0.5, -0.25) / mpi(0.5, 1) == mpi(-1.0, -0.25) + assert mpi(0.5, 1) / mpi(0.5, 1) == mpi(0.5, 2.0) + assert mpi(0.5, 4) / mpi(0.5, 1) == mpi(0.5, 8.0) + assert mpi(-1, -0.5) / mpi(0.5, 1) == mpi(-2.0, -0.5) + assert mpi(-4, -0.5) / mpi(0.5, 1) == mpi(-8.0, -0.5) + assert mpi(-1, 2) / mpi(-2, -0.5) == mpi(-4.0, 2.0) + assert mpi(0, 1) / mpi(-2, -0.5) == mpi(-2.0, 0.0) + assert mpi(-1, 0) / mpi(-2, -0.5) == mpi(0.0, 2.0) + assert mpi(-0.5, -0.25) / mpi(-2, -0.5) == mpi(0.125, 1.0) + assert mpi(0.5, 1) / mpi(-2, -0.5) == mpi(-2.0, -0.25) + assert mpi(0.5, 4) / mpi(-2, -0.5) == mpi(-8.0, -0.25) + assert mpi(-1, -0.5) / mpi(-2, -0.5) == mpi(0.25, 2.0) + assert mpi(-4, -0.5) / mpi(-2, -0.5) == mpi(0.25, 8.0) + # Should be undefined? + assert mpi(0, 0) / mpi(0, 0) == mpi(-inf, inf) + assert mpi(0, 0) / mpi(0, 1) == mpi(-inf, inf) + +def test_interval_cos_sin(): + iv.dps = 15 + cos = iv.cos + sin = iv.sin + tan = iv.tan + pi = iv.pi + # Around 0 + assert cos(mpi(0)) == 1 + assert sin(mpi(0)) == 0 + assert cos(mpi(0,1)) == mpi(0.54030230586813965399, 1.0) + assert sin(mpi(0,1)) == mpi(0, 0.8414709848078966159) + assert cos(mpi(1,2)) == mpi(-0.4161468365471424069, 0.54030230586813976501) + assert sin(mpi(1,2)) == mpi(0.84147098480789650488, 1.0) + assert sin(mpi(1,2.5)) == mpi(0.59847214410395643824, 1.0) + assert cos(mpi(-1, 1)) == mpi(0.54030230586813965399, 1.0) + assert cos(mpi(-1, 0.5)) == mpi(0.54030230586813965399, 1.0) + assert cos(mpi(-1, 1.5)) == mpi(0.070737201667702906405, 1.0) + assert sin(mpi(-1,1)) == mpi(-0.8414709848078966159, 0.8414709848078966159) + assert sin(mpi(-1,0.5)) == mpi(-0.8414709848078966159, 0.47942553860420300538) + assert mpi(-0.8414709848078966159, 1.00000000000000002e-100) in sin(mpi(-1,1e-100)) + assert mpi(-2.00000000000000004e-100, 1.00000000000000002e-100) in sin(mpi(-2e-100,1e-100)) + # Same interval + assert cos(mpi(2, 2.5)) + assert cos(mpi(3.5, 4)) == mpi(-0.93645668729079634129, -0.65364362086361182946) + assert cos(mpi(5, 5.5)) == mpi(0.28366218546322624627, 0.70866977429126010168) + assert mpi(0.59847214410395654927, 0.90929742682568170942) in sin(mpi(2, 2.5)) + assert sin(mpi(3.5, 4)) == mpi(-0.75680249530792831347, -0.35078322768961983646) + assert sin(mpi(5, 5.5)) == mpi(-0.95892427466313856499, -0.70554032557039181306) + # Higher roots + iv.dps = 55 + w = 4*10**50 + mpi(0.5) + for p in [15, 40, 80]: + iv.dps = p + assert 0 in sin(4*mpi(pi)) + assert 0 in sin(4*10**50*mpi(pi)) + assert 0 in cos((4+0.5)*mpi(pi)) + assert 0 in cos(w*mpi(pi)) + assert 1 in cos(4*mpi(pi)) + assert 1 in cos(4*10**50*mpi(pi)) + iv.dps = 15 + assert cos(mpi(2,inf)) == mpi(-1,1) + assert sin(mpi(2,inf)) == mpi(-1,1) + assert cos(mpi(-inf,2)) == mpi(-1,1) + assert sin(mpi(-inf,2)) == mpi(-1,1) + u = tan(mpi(0.5,1)) + assert mpf(u.a).ae(mp.tan(0.5)) + assert mpf(u.b).ae(mp.tan(1)) + v = iv.cot(mpi(0.5,1)) + assert mpf(v.a).ae(mp.cot(1)) + assert mpf(v.b).ae(mp.cot(0.5)) + # Sanity check of evaluation at n*pi and (n+1/2)*pi + for n in range(-5,7,2): + x = iv.cos(n*iv.pi) + assert -1 in x + assert x >= -1 + assert x != -1 + x = iv.sin((n+0.5)*iv.pi) + assert -1 in x + assert x >= -1 + assert x != -1 + for n in range(-6,8,2): + x = iv.cos(n*iv.pi) + assert 1 in x + assert x <= 1 + if n: + assert x != 1 + x = iv.sin((n+0.5)*iv.pi) + assert 1 in x + assert x <= 1 + assert x != 1 + for n in range(-6,7): + x = iv.cos((n+0.5)*iv.pi) + assert x.a < 0 < x.b + x = iv.sin(n*iv.pi) + if n: + assert x.a < 0 < x.b + +def test_interval_complex(): + # TODO: many more tests + iv.dps = 15 + mp.dps = 15 + assert iv.mpc(2,3) == 2+3j + assert iv.mpc(2,3) != 2+4j + assert iv.mpc(2,3) != 1+3j + assert 1+3j in iv.mpc([1,2],[3,4]) + assert 2+5j not in iv.mpc([1,2],[3,4]) + assert iv.mpc(1,2) + 1j == 1+3j + assert iv.mpc([1,2],[2,3]) + 2+3j == iv.mpc([3,4],[5,6]) + assert iv.mpc([2,4],[4,8]) / 2 == iv.mpc([1,2],[2,4]) + assert iv.mpc([1,2],[2,4]) * 2j == iv.mpc([-8,-4],[2,4]) + assert iv.mpc([2,4],[4,8]) / 2j == iv.mpc([2,4],[-2,-1]) + assert iv.exp(2+3j).ae(mp.exp(2+3j)) + assert iv.log(2+3j).ae(mp.log(2+3j)) + assert (iv.mpc(2,3) ** iv.mpc(0.5,2)).ae(mp.mpc(2,3) ** mp.mpc(0.5,2)) + assert 1j in (iv.mpf(-1) ** 0.5) + assert 1j in (iv.mpc(-1) ** 0.5) + assert abs(iv.mpc(0)) == 0 + assert abs(iv.mpc(inf)) == inf + assert abs(iv.mpc(3,4)) == 5 + assert abs(iv.mpc(4)) == 4 + assert abs(iv.mpc(0,4)) == 4 + assert abs(iv.mpc(0,[2,3])) == iv.mpf([2,3]) + assert abs(iv.mpc(0,[-3,2])) == iv.mpf([0,3]) + assert abs(iv.mpc([3,5],[4,12])) == iv.mpf([5,13]) + assert abs(iv.mpc([3,5],[-4,12])) == iv.mpf([3,13]) + assert iv.mpc(2,3) ** 0 == 1 + assert iv.mpc(2,3) ** 1 == (2+3j) + assert iv.mpc(2,3) ** 2 == (2+3j)**2 + assert iv.mpc(2,3) ** 3 == (2+3j)**3 + assert iv.mpc(2,3) ** 4 == (2+3j)**4 + assert iv.mpc(2,3) ** 5 == (2+3j)**5 + assert iv.mpc(2,2) ** (-1) == (2+2j) ** (-1) + assert iv.mpc(2,2) ** (-2) == (2+2j) ** (-2) + assert iv.cos(2).ae(mp.cos(2)) + assert iv.sin(2).ae(mp.sin(2)) + assert iv.cos(2+3j).ae(mp.cos(2+3j)) + assert iv.sin(2+3j).ae(mp.sin(2+3j)) + +def test_interval_complex_arg(): + mp.dps = 15 + iv.dps = 15 + assert iv.arg(3) == 0 + assert iv.arg(0) == 0 + assert iv.arg([0,3]) == 0 + assert iv.arg(-3).ae(pi) + assert iv.arg(2+3j).ae(iv.arg(2+3j)) + z = iv.mpc([-2,-1],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-1+4j)) + assert t.b.ae(mp.arg(-2+3j)) + z = iv.mpc([-2,1],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1+3j)) + assert t.b.ae(mp.arg(-2+3j)) + z = iv.mpc([1,2],[3,4]) + t = iv.arg(z) + assert t.a.ae(mp.arg(2+3j)) + assert t.b.ae(mp.arg(1+4j)) + z = iv.mpc([1,2],[-2,3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1-2j)) + assert t.b.ae(mp.arg(1+3j)) + z = iv.mpc([1,2],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(1-4j)) + assert t.b.ae(mp.arg(2-3j)) + z = iv.mpc([-1,2],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-1-3j)) + assert t.b.ae(mp.arg(2-3j)) + z = iv.mpc([-2,-1],[-4,-3]) + t = iv.arg(z) + assert t.a.ae(mp.arg(-2-3j)) + assert t.b.ae(mp.arg(-1-4j)) + z = iv.mpc([-2,-1],[-3,3]) + t = iv.arg(z) + assert t.a.ae(-mp.pi) + assert t.b.ae(mp.pi) + z = iv.mpc([-2,2],[-3,3]) + t = iv.arg(z) + assert t.a.ae(-mp.pi) + assert t.b.ae(mp.pi) + +def test_interval_ae(): + iv.dps = 15 + x = iv.mpf([1,2]) + assert x.ae(1) is None + assert x.ae(1.5) is None + assert x.ae(2) is None + assert x.ae(2.01) is False + assert x.ae(0.99) is False + x = iv.mpf(3.5) + assert x.ae(3.5) is True + assert x.ae(3.5+1e-15) is True + assert x.ae(3.5-1e-15) is True + assert x.ae(3.501) is False + assert x.ae(3.499) is False + assert x.ae(iv.mpf([3.5,3.501])) is None + assert x.ae(iv.mpf([3.5,4.5+1e-15])) is None + +def test_interval_nstr(): + iv.dps = n = 30 + x = mpi(1, 2) + # FIXME: error_dps should not be necessary + assert iv.nstr(x, n, mode='plusminus', error_dps=6) == '1.5 +- 0.5' + assert iv.nstr(x, n, mode='plusminus', use_spaces=False, error_dps=6) == '1.5+-0.5' + assert iv.nstr(x, n, mode='percent') == '1.5 (33.33%)' + assert iv.nstr(x, n, mode='brackets', use_spaces=False) == '[1.0,2.0]' + assert iv.nstr(x, n, mode='brackets' , brackets=('<', '>')) == '<1.0, 2.0>' + x = mpi('5.2582327113062393041', '5.2582327113062749951') + assert iv.nstr(x, n, mode='diff') == '5.2582327113062[393041, 749951]' + assert iv.nstr(iv.cos(mpi(1)), n, mode='diff', use_spaces=False) == '0.54030230586813971740093660744[2955,3053]' + assert iv.nstr(mpi('1e123', '1e129'), n, mode='diff') == '[1.0e+123, 1.0e+129]' + exp = iv.exp + assert iv.nstr(iv.exp(mpi('5000.1')), n, mode='diff') == '3.2797365856787867069110487[0926, 1191]e+2171' + iv.dps = 15 + +def test_mpi_from_str(): + iv.dps = 15 + assert iv.convert('1.5 +- 0.5') == mpi(mpf('1.0'), mpf('2.0')) + assert mpi(1, 2) in iv.convert('1.5 (33.33333333333333333333333333333%)') + assert iv.convert('[1, 2]') == mpi(1, 2) + assert iv.convert('1[2, 3]') == mpi(12, 13) + assert iv.convert('1.[23,46]e-8') == mpi('1.23e-8', '1.46e-8') + assert iv.convert('12[3.4,5.9]e4') == mpi('123.4e+4', '125.9e4') + +def test_interval_gamma(): + mp.dps = 15 + iv.dps = 15 + # TODO: need many more tests + assert iv.rgamma(0) == 0 + assert iv.fac(0) == 1 + assert iv.fac(1) == 1 + assert iv.fac(2) == 2 + assert iv.fac(3) == 6 + assert iv.gamma(0) == [-inf,inf] + assert iv.gamma(1) == 1 + assert iv.gamma(2) == 1 + assert iv.gamma(3) == 2 + assert -3.5449077018110320546 in iv.gamma(-0.5) + assert iv.loggamma(1) == 0 + assert iv.loggamma(2) == 0 + assert 0.69314718055994530942 in iv.loggamma(3) + # Test tight log-gamma endpoints based on monotonicity + xs = [iv.mpc([2,3],[1,4]), + iv.mpc([2,3],[-4,-1]), + iv.mpc([2,3],[-1,4]), + iv.mpc([2,3],[-4,1]), + iv.mpc([2,3],[-4,4]), + iv.mpc([-3,-2],[2,4]), + iv.mpc([-3,-2],[-4,-2])] + for x in xs: + ys = [mp.loggamma(mp.mpc(x.a,x.c)), + mp.loggamma(mp.mpc(x.b,x.c)), + mp.loggamma(mp.mpc(x.a,x.d)), + mp.loggamma(mp.mpc(x.b,x.d))] + if 0 in x.imag: + ys += [mp.loggamma(x.a), mp.loggamma(x.b)] + min_real = min([y.real for y in ys]) + max_real = max([y.real for y in ys]) + min_imag = min([y.imag for y in ys]) + max_imag = max([y.imag for y in ys]) + z = iv.loggamma(x) + assert z.a.ae(min_real) + assert z.b.ae(max_real) + assert z.c.ae(min_imag) + assert z.d.ae(max_imag) + +def test_interval_conversions(): + mp.dps = 15 + iv.dps = 15 + for a, b in ((-0.0, 0), (0.0, 0.5), (1.0, 1), \ + ('-inf', 20.5), ('-inf', float(sqrt(2)))): + r = mpi(a, b) + assert int(r.b) == int(b) + assert float(r.a) == float(a) + assert float(r.b) == float(b) + assert complex(r.a) == complex(a) + assert complex(r.b) == complex(b) diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_matrices.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..1547b90664dba66a98a7f026a04a4ed1aa1ed3b4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_matrices.py @@ -0,0 +1,253 @@ +import pytest +import sys +from mpmath import * + +def test_matrix_basic(): + A1 = matrix(3) + for i in range(3): + A1[i,i] = 1 + assert A1 == eye(3) + assert A1 == matrix(A1) + A2 = matrix(3, 2) + assert not A2._matrix__data + A3 = matrix([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) + assert list(A3) == list(range(1, 10)) + A3[1,1] = 0 + assert not (1, 1) in A3._matrix__data + A4 = matrix([[1, 2, 3], [4, 5, 6]]) + A5 = matrix([[6, -1], [3, 2], [0, -3]]) + assert A4 * A5 == matrix([[12, -6], [39, -12]]) + assert A1 * A3 == A3 * A1 == A3 + pytest.raises(ValueError, lambda: A2*A2) + l = [[10, 20, 30], [40, 0, 60], [70, 80, 90]] + A6 = matrix(l) + assert A6.tolist() == l + assert A6 == eval(repr(A6)) + A6 = fp.matrix(A6) + assert A6 == eval(repr(A6)) + assert A6*1j == eval(repr(A6*1j)) + assert A3 * 10 == 10 * A3 == A6 + assert A2.rows == 3 + assert A2.cols == 2 + A3.rows = 2 + A3.cols = 2 + assert len(A3._matrix__data) == 3 + assert A4 + A4 == 2*A4 + pytest.raises(ValueError, lambda: A4 + A2) + assert sum(A1 - A1) == 0 + A7 = matrix([[1, 2], [3, 4], [5, 6], [7, 8]]) + x = matrix([10, -10]) + assert A7*x == matrix([-10, -10, -10, -10]) + A8 = ones(5) + assert sum((A8 + 1) - (2 - zeros(5))) == 0 + assert (1 + ones(4)) / 2 - 1 == zeros(4) + assert eye(3)**10 == eye(3) + pytest.raises(ValueError, lambda: A7**2) + A9 = randmatrix(3) + A10 = matrix(A9) + A9[0,0] = -100 + assert A9 != A10 + assert nstr(A9) + +def test_matmul(): + """ + Test the PEP465 "@" matrix multiplication syntax. + To avoid syntax errors when importing this file in Python 3.5 and below, we have to use exec() - sorry for that. + """ + # TODO remove exec() wrapper as soon as we drop support for Python <= 3.5 + if sys.hexversion < 0x30500f0: + # we are on Python < 3.5 + pytest.skip("'@' (__matmul__) is only supported in Python 3.5 or newer") + A4 = matrix([[1, 2, 3], [4, 5, 6]]) + A5 = matrix([[6, -1], [3, 2], [0, -3]]) + exec("assert A4 @ A5 == A4 * A5") + +def test_matrix_slices(): + A = matrix([ [1, 2, 3], + [4, 5 ,6], + [7, 8 ,9]]) + V = matrix([1,2,3,4,5]) + + # Get slice + assert A[:,:] == A + assert A[:,1] == matrix([[2],[5],[8]]) + assert A[2,:] == matrix([[7, 8 ,9]]) + assert A[1:3,1:3] == matrix([[5,6],[8,9]]) + assert V[2:4] == matrix([3,4]) + pytest.raises(IndexError, lambda: A[:,1:6]) + + # Assign slice with matrix + A1 = matrix(3) + A1[:,:] = A + assert A1[:,:] == matrix([[1, 2, 3], + [4, 5 ,6], + [7, 8 ,9]]) + A1[0,:] = matrix([[10, 11, 12]]) + assert A1 == matrix([ [10, 11, 12], + [4, 5 ,6], + [7, 8 ,9]]) + A1[:,2] = matrix([[13], [14], [15]]) + assert A1 == matrix([ [10, 11, 13], + [4, 5 ,14], + [7, 8 ,15]]) + A1[:2,:2] = matrix([[16, 17], [18 , 19]]) + assert A1 == matrix([ [16, 17, 13], + [18, 19 ,14], + [7, 8 ,15]]) + V[1:3] = 10 + assert V == matrix([1,10,10,4,5]) + with pytest.raises(ValueError): + A1[2,:] = A[:,1] + + with pytest.raises(IndexError): + A1[2,1:20] = A[:,:] + + # Assign slice with scalar + A1[:,2] = 10 + assert A1 == matrix([ [16, 17, 10], + [18, 19 ,10], + [7, 8 ,10]]) + A1[:,:] = 40 + for x in A1: + assert x == 40 + + +def test_matrix_power(): + A = matrix([[1, 2], [3, 4]]) + assert A**2 == A*A + assert A**3 == A*A*A + assert A**-1 == inverse(A) + assert A**-2 == inverse(A*A) + +def test_matrix_transform(): + A = matrix([[1, 2], [3, 4], [5, 6]]) + assert A.T == A.transpose() == matrix([[1, 3, 5], [2, 4, 6]]) + swap_row(A, 1, 2) + assert A == matrix([[1, 2], [5, 6], [3, 4]]) + l = [1, 2] + swap_row(l, 0, 1) + assert l == [2, 1] + assert extend(eye(3), [1,2,3]) == matrix([[1,0,0,1],[0,1,0,2],[0,0,1,3]]) + +def test_matrix_conjugate(): + A = matrix([[1 + j, 0], [2, j]]) + assert A.conjugate() == matrix([[mpc(1, -1), 0], [2, mpc(0, -1)]]) + assert A.transpose_conj() == A.H == matrix([[mpc(1, -1), 2], + [0, mpc(0, -1)]]) + +def test_matrix_creation(): + assert diag([1, 2, 3]) == matrix([[1, 0, 0], [0, 2, 0], [0, 0, 3]]) + A1 = ones(2, 3) + assert A1.rows == 2 and A1.cols == 3 + for a in A1: + assert a == 1 + A2 = zeros(3, 2) + assert A2.rows == 3 and A2.cols == 2 + for a in A2: + assert a == 0 + assert randmatrix(10) != randmatrix(10) + one = mpf(1) + assert hilbert(3) == matrix([[one, one/2, one/3], + [one/2, one/3, one/4], + [one/3, one/4, one/5]]) + +def test_norms(): + # matrix norms + A = matrix([[1, -2], [-3, -1], [2, 1]]) + assert mnorm(A,1) == 6 + assert mnorm(A,inf) == 4 + assert mnorm(A,'F') == sqrt(20) + # vector norms + assert norm(-3) == 3 + x = [1, -2, 7, -12] + assert norm(x, 1) == 22 + assert round(norm(x, 2), 10) == 14.0712472795 + assert round(norm(x, 10), 10) == 12.0054633727 + assert norm(x, inf) == 12 + +def test_vector(): + x = matrix([0, 1, 2, 3, 4]) + assert x == matrix([[0], [1], [2], [3], [4]]) + assert x[3] == 3 + assert len(x._matrix__data) == 4 + assert list(x) == list(range(5)) + x[0] = -10 + x[4] = 0 + assert x[0] == -10 + assert len(x) == len(x.T) == 5 + assert x.T*x == matrix([[114]]) + +def test_matrix_copy(): + A = ones(6) + B = A.copy() + C = +A + assert A == B + assert A == C + B[0,0] = 0 + assert A != B + C[0,0] = 42 + assert A != C + +def test_matrix_numpy(): + try: + import numpy + except ImportError: + return + l = [[1, 2], [3, 4], [5, 6]] + a = numpy.array(l) + assert matrix(l) == matrix(a) + +def test_interval_matrix_scalar_mult(): + """Multiplication of iv.matrix and any scalar type""" + a = mpi(-1, 1) + b = a + a * 2j + c = mpf(42) + d = c + c * 2j + e = 1.234 + f = fp.convert(e) + g = e + e * 3j + h = fp.convert(g) + M = iv.ones(1) + for x in [a, b, c, d, e, f, g, h]: + assert x * M == iv.matrix([x]) + assert M * x == iv.matrix([x]) + +@pytest.mark.xfail() +def test_interval_matrix_matrix_mult(): + """Multiplication of iv.matrix and other matrix types""" + A = ones(1) + B = fp.ones(1) + M = iv.ones(1) + for X in [A, B, M]: + assert X * M == iv.matrix(X) + assert X * M == X + assert M * X == iv.matrix(X) + assert M * X == X + +def test_matrix_conversion_to_iv(): + # Test that matrices with foreign datatypes are properly converted + for other_type_eye in [eye(3), fp.eye(3), iv.eye(3)]: + A = iv.matrix(other_type_eye) + B = iv.eye(3) + assert type(A[0,0]) == type(B[0,0]) + assert A.tolist() == B.tolist() + +def test_interval_matrix_mult_bug(): + # regression test for interval matrix multiplication: + # result must be nonzero-width and contain the exact result + x = convert('1.00000000000001') # note: this is implicitly rounded to some near mpf float value + A = matrix([[x]]) + B = iv.matrix(A) + C = iv.matrix([[x]]) + assert B == C + B = B * B + C = C * C + assert B == C + assert B[0, 0].delta > 1e-16 + assert B[0, 0].delta < 3e-16 + assert C[0, 0].delta > 1e-16 + assert C[0, 0].delta < 3e-16 + assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in B[0, 0] + assert mp.mpf('1.00000000000001998401444325291756783368705994138804689654') in C[0, 0] + # the following caused an error before the bug was fixed + assert iv.matrix(mp.eye(2)) * (iv.ones(2) + mpi(1, 2)) == iv.matrix([[mpi(2, 3), mpi(2, 3)], [mpi(2, 3), mpi(2, 3)]]) diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_summation.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_summation.py new file mode 100644 index 0000000000000000000000000000000000000000..04ffd29f994e1e6310678eec292c0e03f2d6c725 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_summation.py @@ -0,0 +1,53 @@ +from mpmath import * + +def test_sumem(): + mp.dps = 15 + assert sumem(lambda k: 1/k**2.5, [50, 100]).ae(0.0012524505324784962) + assert sumem(lambda k: k**4 + 3*k + 1, [10, 100]).ae(2050333103) + +def test_nsum(): + mp.dps = 15 + assert nsum(lambda x: x**2, [1, 3]) == 14 + assert nsum(lambda k: 1/factorial(k), [0, inf]).ae(e) + assert nsum(lambda k: (-1)**(k+1) / k, [1, inf]).ae(log(2)) + assert nsum(lambda k: (-1)**(k+1) / k**2, [1, inf]).ae(pi**2 / 12) + assert nsum(lambda k: (-1)**k / log(k), [2, inf]).ae(0.9242998972229388) + assert nsum(lambda k: 1/k**2, [1, inf]).ae(pi**2 / 6) + assert nsum(lambda k: 2**k/fac(k), [0, inf]).ae(exp(2)) + assert nsum(lambda k: 1/k**2, [4, inf], method='e').ae(0.2838229557371153) + assert abs(fp.nsum(lambda k: 1/k**4, [1, fp.inf]) - 1.082323233711138) < 1e-5 + assert abs(fp.nsum(lambda k: 1/k**4, [1, fp.inf], method='e') - 1.082323233711138) < 1e-4 + +def test_nprod(): + mp.dps = 15 + assert nprod(lambda k: exp(1/k**2), [1,inf], method='r').ae(exp(pi**2/6)) + assert nprod(lambda x: x**2, [1, 3]) == 36 + +def test_fsum(): + mp.dps = 15 + assert fsum([]) == 0 + assert fsum([-4]) == -4 + assert fsum([2,3]) == 5 + assert fsum([1e-100,1]) == 1 + assert fsum([1,1e-100]) == 1 + assert fsum([1e100,1]) == 1e100 + assert fsum([1,1e100]) == 1e100 + assert fsum([1e-100,0]) == 1e-100 + assert fsum([1e-100,1e100,1e-100]) == 1e100 + assert fsum([2,1+1j,1]) == 4+1j + assert fsum([2,inf,3]) == inf + assert fsum([2,-1], absolute=1) == 3 + assert fsum([2,-1], squared=1) == 5 + assert fsum([1,1+j], squared=1) == 1+2j + assert fsum([1,3+4j], absolute=1) == 6 + assert fsum([1,2+3j], absolute=1, squared=1) == 14 + assert isnan(fsum([inf,-inf])) + assert fsum([inf,-inf], absolute=1) == inf + assert fsum([inf,-inf], squared=1) == inf + assert fsum([inf,-inf], absolute=1, squared=1) == inf + assert iv.fsum([1,mpi(2,3)]) == mpi(3,4) + +def test_fprod(): + mp.dps = 15 + assert fprod([]) == 1 + assert fprod([2,3]) == 6 diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/test_trig.py b/.venv/lib/python3.11/site-packages/mpmath/tests/test_trig.py new file mode 100644 index 0000000000000000000000000000000000000000..c70a2a0ff4c44c784404ecdb15357d5b91a992d6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/test_trig.py @@ -0,0 +1,136 @@ +from mpmath import * +from mpmath.libmp import * + +def test_trig_misc_hard(): + mp.prec = 53 + # Worst-case input for an IEEE double, from a paper by Kahan + x = ldexp(6381956970095103,797) + assert cos(x) == mpf('-4.6871659242546277e-19') + assert sin(x) == 1 + + mp.prec = 150 + a = mpf(10**50) + mp.prec = 53 + assert sin(a).ae(-0.7896724934293100827) + assert cos(a).ae(-0.6135286082336635622) + + # Check relative accuracy close to x = zero + assert sin(1e-100) == 1e-100 # when rounding to nearest + assert sin(1e-6).ae(9.999999999998333e-007, rel_eps=2e-15, abs_eps=0) + assert sin(1e-6j).ae(1.0000000000001666e-006j, rel_eps=2e-15, abs_eps=0) + assert sin(-1e-6j).ae(-1.0000000000001666e-006j, rel_eps=2e-15, abs_eps=0) + assert cos(1e-100) == 1 + assert cos(1e-6).ae(0.9999999999995) + assert cos(-1e-6j).ae(1.0000000000005) + assert tan(1e-100) == 1e-100 + assert tan(1e-6).ae(1.0000000000003335e-006, rel_eps=2e-15, abs_eps=0) + assert tan(1e-6j).ae(9.9999999999966644e-007j, rel_eps=2e-15, abs_eps=0) + assert tan(-1e-6j).ae(-9.9999999999966644e-007j, rel_eps=2e-15, abs_eps=0) + +def test_trig_near_zero(): + mp.dps = 15 + + for r in [round_nearest, round_down, round_up, round_floor, round_ceiling]: + assert sin(0, rounding=r) == 0 + assert cos(0, rounding=r) == 1 + + a = mpf('1e-100') + b = mpf('-1e-100') + + assert sin(a, rounding=round_nearest) == a + assert sin(a, rounding=round_down) < a + assert sin(a, rounding=round_floor) < a + assert sin(a, rounding=round_up) >= a + assert sin(a, rounding=round_ceiling) >= a + assert sin(b, rounding=round_nearest) == b + assert sin(b, rounding=round_down) > b + assert sin(b, rounding=round_floor) <= b + assert sin(b, rounding=round_up) <= b + assert sin(b, rounding=round_ceiling) > b + + assert cos(a, rounding=round_nearest) == 1 + assert cos(a, rounding=round_down) < 1 + assert cos(a, rounding=round_floor) < 1 + assert cos(a, rounding=round_up) == 1 + assert cos(a, rounding=round_ceiling) == 1 + assert cos(b, rounding=round_nearest) == 1 + assert cos(b, rounding=round_down) < 1 + assert cos(b, rounding=round_floor) < 1 + assert cos(b, rounding=round_up) == 1 + assert cos(b, rounding=round_ceiling) == 1 + + +def test_trig_near_n_pi(): + + mp.dps = 15 + a = [n*pi for n in [1, 2, 6, 11, 100, 1001, 10000, 100001]] + mp.dps = 135 + a.append(10**100 * pi) + mp.dps = 15 + + assert sin(a[0]) == mpf('1.2246467991473531772e-16') + assert sin(a[1]) == mpf('-2.4492935982947063545e-16') + assert sin(a[2]) == mpf('-7.3478807948841190634e-16') + assert sin(a[3]) == mpf('4.8998251578625894243e-15') + assert sin(a[4]) == mpf('1.9643867237284719452e-15') + assert sin(a[5]) == mpf('-8.8632615209684813458e-15') + assert sin(a[6]) == mpf('-4.8568235395684898392e-13') + assert sin(a[7]) == mpf('3.9087342299491231029e-11') + assert sin(a[8]) == mpf('-1.369235466754566993528e-36') + + r = round_nearest + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) == 1 + + r = round_up + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) == 1 + + r = round_down + assert cos(a[0], rounding=r) > -1 + assert cos(a[1], rounding=r) < 1 + assert cos(a[2], rounding=r) < 1 + assert cos(a[3], rounding=r) > -1 + assert cos(a[4], rounding=r) < 1 + assert cos(a[5], rounding=r) > -1 + assert cos(a[6], rounding=r) < 1 + assert cos(a[7], rounding=r) > -1 + assert cos(a[8], rounding=r) < 1 + + r = round_floor + assert cos(a[0], rounding=r) == -1 + assert cos(a[1], rounding=r) < 1 + assert cos(a[2], rounding=r) < 1 + assert cos(a[3], rounding=r) == -1 + assert cos(a[4], rounding=r) < 1 + assert cos(a[5], rounding=r) == -1 + assert cos(a[6], rounding=r) < 1 + assert cos(a[7], rounding=r) == -1 + assert cos(a[8], rounding=r) < 1 + + r = round_ceiling + assert cos(a[0], rounding=r) > -1 + assert cos(a[1], rounding=r) == 1 + assert cos(a[2], rounding=r) == 1 + assert cos(a[3], rounding=r) > -1 + assert cos(a[4], rounding=r) == 1 + assert cos(a[5], rounding=r) > -1 + assert cos(a[6], rounding=r) == 1 + assert cos(a[7], rounding=r) > -1 + assert cos(a[8], rounding=r) == 1 + + mp.dps = 15 diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/torture.py b/.venv/lib/python3.11/site-packages/mpmath/tests/torture.py new file mode 100644 index 0000000000000000000000000000000000000000..845d5c6d7d017e51e1ed9a8fe3106cfa32fd967f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/tests/torture.py @@ -0,0 +1,224 @@ +""" +Torture tests for asymptotics and high precision evaluation of +special functions. + +(Other torture tests may also be placed here.) + +Running this file (gmpy recommended!) takes several CPU minutes. +With Python 2.6+, multiprocessing is used automatically to run tests +in parallel if many cores are available. (A single test may take between +a second and several minutes; possibly more.) + +The idea: + +* We evaluate functions at positive, negative, imaginary, 45- and 135-degree + complex values with magnitudes between 10^-20 to 10^20, at precisions between + 5 and 150 digits (we can go even higher for fast functions). + +* Comparing the result from two different precision levels provides + a strong consistency check (particularly for functions that use + different algorithms at different precision levels). + +* That the computation finishes at all (without failure), within reasonable + time, provides a check that evaluation works at all: that the code runs, + that it doesn't get stuck in an infinite loop, and that it doesn't use + some extremely slowly algorithm where it could use a faster one. + +TODO: + +* Speed up those functions that take long to finish! +* Generalize to test more cases; more options. +* Implement a timeout mechanism. +* Some functions are notably absent, including the following: + * inverse trigonometric functions (some become inaccurate for complex arguments) + * ci, si (not implemented properly for large complex arguments) + * zeta functions (need to modify test not to try too large imaginary values) + * and others... + +""" + + +import sys, os +from timeit import default_timer as clock + +if "-nogmpy" in sys.argv: + sys.argv.remove('-nogmpy') + os.environ['MPMATH_NOGMPY'] = 'Y' + +filt = '' +if not sys.argv[-1].endswith(".py"): + filt = sys.argv[-1] + +from mpmath import * +from mpmath.libmp.backend import exec_ + +def test_asymp(f, maxdps=150, verbose=False, huge_range=False): + dps = [5,15,25,50,90,150,500,1500,5000,10000] + dps = [p for p in dps if p <= maxdps] + def check(x,y,p,inpt): + if abs(x-y)/abs(y) < workprec(20)(power)(10, -p+1): + return + print() + print("Error!") + print("Input:", inpt) + print("dps =", p) + print("Result 1:", x) + print("Result 2:", y) + print("Absolute error:", abs(x-y)) + print("Relative error:", abs(x-y)/abs(y)) + raise AssertionError + exponents = range(-20,20) + if huge_range: + exponents += [-1000, -100, -50, 50, 100, 1000] + for n in exponents: + if verbose: + sys.stdout.write(". ") + mp.dps = 25 + xpos = mpf(10)**n / 1.1287 + xneg = -xpos + ximag = xpos*j + xcomplex1 = xpos*(1+j) + xcomplex2 = xpos*(-1+j) + for i in range(len(dps)): + if verbose: + print("Testing dps = %s" % dps[i]) + mp.dps = dps[i] + new = f(xpos), f(xneg), f(ximag), f(xcomplex1), f(xcomplex2) + if i != 0: + p = dps[i-1] + check(prev[0], new[0], p, xpos) + check(prev[1], new[1], p, xneg) + check(prev[2], new[2], p, ximag) + check(prev[3], new[3], p, xcomplex1) + check(prev[4], new[4], p, xcomplex2) + prev = new + if verbose: + print() + +a1, a2, a3, a4, a5 = 1.5, -2.25, 3.125, 4, 2 + +def test_bernoulli_huge(): + p, q = bernfrac(9000) + assert p % 10**10 == 9636701091 + assert q == 4091851784687571609141381951327092757255270 + mp.dps = 15 + assert str(bernoulli(10**100)) == '-2.58183325604736e+987675256497386331227838638980680030172857347883537824464410652557820800494271520411283004120790908623' + mp.dps = 50 + assert str(bernoulli(10**100)) == '-2.5818332560473632073252488656039475548106223822913e+987675256497386331227838638980680030172857347883537824464410652557820800494271520411283004120790908623' + mp.dps = 15 + +cases = """\ +test_bernoulli_huge() +test_asymp(lambda z: +pi, maxdps=10000) +test_asymp(lambda z: +e, maxdps=10000) +test_asymp(lambda z: +ln2, maxdps=10000) +test_asymp(lambda z: +ln10, maxdps=10000) +test_asymp(lambda z: +phi, maxdps=10000) +test_asymp(lambda z: +catalan, maxdps=5000) +test_asymp(lambda z: +euler, maxdps=5000) +test_asymp(lambda z: +glaisher, maxdps=1000) +test_asymp(lambda z: +khinchin, maxdps=1000) +test_asymp(lambda z: +twinprime, maxdps=150) +test_asymp(lambda z: stieltjes(2), maxdps=150) +test_asymp(lambda z: +mertens, maxdps=150) +test_asymp(lambda z: +apery, maxdps=5000) +test_asymp(sqrt, maxdps=10000, huge_range=True) +test_asymp(cbrt, maxdps=5000, huge_range=True) +test_asymp(lambda z: root(z,4), maxdps=5000, huge_range=True) +test_asymp(lambda z: root(z,-5), maxdps=5000, huge_range=True) +test_asymp(exp, maxdps=5000, huge_range=True) +test_asymp(expm1, maxdps=1500) +test_asymp(ln, maxdps=5000, huge_range=True) +test_asymp(cosh, maxdps=5000) +test_asymp(sinh, maxdps=5000) +test_asymp(tanh, maxdps=1500) +test_asymp(sin, maxdps=5000, huge_range=True) +test_asymp(cos, maxdps=5000, huge_range=True) +test_asymp(tan, maxdps=1500) +test_asymp(agm, maxdps=1500, huge_range=True) +test_asymp(ellipk, maxdps=1500) +test_asymp(ellipe, maxdps=1500) +test_asymp(lambertw, huge_range=True) +test_asymp(lambda z: lambertw(z,-1)) +test_asymp(lambda z: lambertw(z,1)) +test_asymp(lambda z: lambertw(z,4)) +test_asymp(gamma) +test_asymp(loggamma) # huge_range=True ? +test_asymp(ei) +test_asymp(e1) +test_asymp(li, huge_range=True) +test_asymp(ci) +test_asymp(si) +test_asymp(chi) +test_asymp(shi) +test_asymp(erf) +test_asymp(erfc) +test_asymp(erfi) +test_asymp(lambda z: besselj(2, z)) +test_asymp(lambda z: bessely(2, z)) +test_asymp(lambda z: besseli(2, z)) +test_asymp(lambda z: besselk(2, z)) +test_asymp(lambda z: besselj(-2.25, z)) +test_asymp(lambda z: bessely(-2.25, z)) +test_asymp(lambda z: besseli(-2.25, z)) +test_asymp(lambda z: besselk(-2.25, z)) +test_asymp(airyai) +test_asymp(airybi) +test_asymp(lambda z: hyp0f1(a1, z)) +test_asymp(lambda z: hyp1f1(a1, a2, z)) +test_asymp(lambda z: hyp1f2(a1, a2, a3, z)) +test_asymp(lambda z: hyp2f0(a1, a2, z)) +test_asymp(lambda z: hyperu(a1, a2, z)) +test_asymp(lambda z: hyp2f1(a1, a2, a3, z)) +test_asymp(lambda z: hyp2f2(a1, a2, a3, a4, z)) +test_asymp(lambda z: hyp2f3(a1, a2, a3, a4, a5, z)) +test_asymp(lambda z: coulombf(a1, a2, z)) +test_asymp(lambda z: coulombg(a1, a2, z)) +test_asymp(lambda z: polylog(2,z)) +test_asymp(lambda z: polylog(3,z)) +test_asymp(lambda z: polylog(-2,z)) +test_asymp(lambda z: expint(4, z)) +test_asymp(lambda z: expint(-4, z)) +test_asymp(lambda z: expint(2.25, z)) +test_asymp(lambda z: gammainc(2.5, z, 5)) +test_asymp(lambda z: gammainc(2.5, 5, z)) +test_asymp(lambda z: hermite(3, z)) +test_asymp(lambda z: hermite(2.5, z)) +test_asymp(lambda z: legendre(3, z)) +test_asymp(lambda z: legendre(4, z)) +test_asymp(lambda z: legendre(2.5, z)) +test_asymp(lambda z: legenp(a1, a2, z)) +test_asymp(lambda z: legenq(a1, a2, z), maxdps=90) # abnormally slow +test_asymp(lambda z: jtheta(1, z, 0.5)) +test_asymp(lambda z: jtheta(2, z, 0.5)) +test_asymp(lambda z: jtheta(3, z, 0.5)) +test_asymp(lambda z: jtheta(4, z, 0.5)) +test_asymp(lambda z: jtheta(1, z, 0.5, 1)) +test_asymp(lambda z: jtheta(2, z, 0.5, 1)) +test_asymp(lambda z: jtheta(3, z, 0.5, 1)) +test_asymp(lambda z: jtheta(4, z, 0.5, 1)) +test_asymp(barnesg, maxdps=90) +""" + +def testit(line): + if filt in line: + print(line) + t1 = clock() + exec_(line, globals(), locals()) + t2 = clock() + elapsed = t2-t1 + print("Time:", elapsed, "for", line, "(OK)") + +if __name__ == '__main__': + try: + from multiprocessing import Pool + mapf = Pool(None).map + print("Running tests with multiprocessing") + except ImportError: + print("Not using multiprocessing") + mapf = map + t1 = clock() + tasks = cases.splitlines() + mapf(testit, tasks) + t2 = clock() + print("Cumulative wall time:", t2-t1) diff --git a/.venv/lib/python3.11/site-packages/mpmath/usertools.py b/.venv/lib/python3.11/site-packages/mpmath/usertools.py new file mode 100644 index 0000000000000000000000000000000000000000..8028a4c46f1c635a6857f1f2de48ac6675d3c6d3 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/usertools.py @@ -0,0 +1,93 @@ + +def monitor(f, input='print', output='print'): + """ + Returns a wrapped copy of *f* that monitors evaluation by calling + *input* with every input (*args*, *kwargs*) passed to *f* and + *output* with every value returned from *f*. The default action + (specify using the special string value ``'print'``) is to print + inputs and outputs to stdout, along with the total evaluation + count:: + + >>> from mpmath import * + >>> mp.dps = 5; mp.pretty = False + >>> diff(monitor(exp), 1) # diff will eval f(x-h) and f(x+h) + in 0 (mpf('0.99999999906867742538452148'),) {} + out 0 mpf('2.7182818259274480055282064') + in 1 (mpf('1.0000000009313225746154785'),) {} + out 1 mpf('2.7182818309906424675501024') + mpf('2.7182808') + + To disable either the input or the output handler, you may + pass *None* as argument. + + Custom input and output handlers may be used e.g. to store + results for later analysis:: + + >>> mp.dps = 15 + >>> input = [] + >>> output = [] + >>> findroot(monitor(sin, input.append, output.append), 3.0) + mpf('3.1415926535897932') + >>> len(input) # Count number of evaluations + 9 + >>> print(input[3]); print(output[3]) + ((mpf('3.1415076583334066'),), {}) + 8.49952562843408e-5 + >>> print(input[4]); print(output[4]) + ((mpf('3.1415928201669122'),), {}) + -1.66577118985331e-7 + + """ + if not input: + input = lambda v: None + elif input == 'print': + incount = [0] + def input(value): + args, kwargs = value + print("in %s %r %r" % (incount[0], args, kwargs)) + incount[0] += 1 + if not output: + output = lambda v: None + elif output == 'print': + outcount = [0] + def output(value): + print("out %s %r" % (outcount[0], value)) + outcount[0] += 1 + def f_monitored(*args, **kwargs): + input((args, kwargs)) + v = f(*args, **kwargs) + output(v) + return v + return f_monitored + +def timing(f, *args, **kwargs): + """ + Returns time elapsed for evaluating ``f()``. Optionally arguments + may be passed to time the execution of ``f(*args, **kwargs)``. + + If the first call is very quick, ``f`` is called + repeatedly and the best time is returned. + """ + once = kwargs.get('once') + if 'once' in kwargs: + del kwargs['once'] + if args or kwargs: + if len(args) == 1 and not kwargs: + arg = args[0] + g = lambda: f(arg) + else: + g = lambda: f(*args, **kwargs) + else: + g = f + from timeit import default_timer as clock + t1=clock(); v=g(); t2=clock(); t=t2-t1 + if t > 0.05 or once: + return t + for i in range(3): + t1=clock(); + # Evaluate multiple times because the timer function + # has a significant overhead + g();g();g();g();g();g();g();g();g();g() + t2=clock() + t=min(t,(t2-t1)/10) + return t diff --git a/.venv/lib/python3.11/site-packages/mpmath/visualization.py b/.venv/lib/python3.11/site-packages/mpmath/visualization.py new file mode 100644 index 0000000000000000000000000000000000000000..17e12e97bead4f2977b59361a4de7672f0e9b75f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/visualization.py @@ -0,0 +1,313 @@ +""" +Plotting (requires matplotlib) +""" + +from colorsys import hsv_to_rgb, hls_to_rgb +from .libmp import NoConvergence +from .libmp.backend import xrange + +class VisualizationMethods(object): + plot_ignore = (ValueError, ArithmeticError, ZeroDivisionError, NoConvergence) + +def plot(ctx, f, xlim=[-5,5], ylim=None, points=200, file=None, dpi=None, + singularities=[], axes=None): + r""" + Shows a simple 2D plot of a function `f(x)` or list of functions + `[f_0(x), f_1(x), \ldots, f_n(x)]` over a given interval + specified by *xlim*. Some examples:: + + plot(lambda x: exp(x)*li(x), [1, 4]) + plot([cos, sin], [-4, 4]) + plot([fresnels, fresnelc], [-4, 4]) + plot([sqrt, cbrt], [-4, 4]) + plot(lambda t: zeta(0.5+t*j), [-20, 20]) + plot([floor, ceil, abs, sign], [-5, 5]) + + Points where the function raises a numerical exception or + returns an infinite value are removed from the graph. + Singularities can also be excluded explicitly + as follows (useful for removing erroneous vertical lines):: + + plot(cot, ylim=[-5, 5]) # bad + plot(cot, ylim=[-5, 5], singularities=[-pi, 0, pi]) # good + + For parts where the function assumes complex values, the + real part is plotted with dashes and the imaginary part + is plotted with dots. + + .. note :: This function requires matplotlib (pylab). + """ + if file: + axes = None + fig = None + if not axes: + import pylab + fig = pylab.figure() + axes = fig.add_subplot(111) + if not isinstance(f, (tuple, list)): + f = [f] + a, b = xlim + colors = ['b', 'r', 'g', 'm', 'k'] + for n, func in enumerate(f): + x = ctx.arange(a, b, (b-a)/float(points)) + segments = [] + segment = [] + in_complex = False + for i in xrange(len(x)): + try: + if i != 0: + for sing in singularities: + if x[i-1] <= sing and x[i] >= sing: + raise ValueError + v = func(x[i]) + if ctx.isnan(v) or abs(v) > 1e300: + raise ValueError + if hasattr(v, "imag") and v.imag: + re = float(v.real) + im = float(v.imag) + if not in_complex: + in_complex = True + segments.append(segment) + segment = [] + segment.append((float(x[i]), re, im)) + else: + if in_complex: + in_complex = False + segments.append(segment) + segment = [] + if hasattr(v, "real"): + v = v.real + segment.append((float(x[i]), v)) + except ctx.plot_ignore: + if segment: + segments.append(segment) + segment = [] + if segment: + segments.append(segment) + for segment in segments: + x = [s[0] for s in segment] + y = [s[1] for s in segment] + if not x: + continue + c = colors[n % len(colors)] + if len(segment[0]) == 3: + z = [s[2] for s in segment] + axes.plot(x, y, '--'+c, linewidth=3) + axes.plot(x, z, ':'+c, linewidth=3) + else: + axes.plot(x, y, c, linewidth=3) + axes.set_xlim([float(_) for _ in xlim]) + if ylim: + axes.set_ylim([float(_) for _ in ylim]) + axes.set_xlabel('x') + axes.set_ylabel('f(x)') + axes.grid(True) + if fig: + if file: + pylab.savefig(file, dpi=dpi) + else: + pylab.show() + +def default_color_function(ctx, z): + if ctx.isinf(z): + return (1.0, 1.0, 1.0) + if ctx.isnan(z): + return (0.5, 0.5, 0.5) + pi = 3.1415926535898 + a = (float(ctx.arg(z)) + ctx.pi) / (2*ctx.pi) + a = (a + 0.5) % 1.0 + b = 1.0 - float(1/(1.0+abs(z)**0.3)) + return hls_to_rgb(a, b, 0.8) + +blue_orange_colors = [ + (-1.0, (0.0, 0.0, 0.0)), + (-0.95, (0.1, 0.2, 0.5)), # dark blue + (-0.5, (0.0, 0.5, 1.0)), # blueish + (-0.05, (0.4, 0.8, 0.8)), # cyanish + ( 0.0, (1.0, 1.0, 1.0)), + ( 0.05, (1.0, 0.9, 0.3)), # yellowish + ( 0.5, (0.9, 0.5, 0.0)), # orangeish + ( 0.95, (0.7, 0.1, 0.0)), # redish + ( 1.0, (0.0, 0.0, 0.0)), + ( 2.0, (0.0, 0.0, 0.0)), +] + +def phase_color_function(ctx, z): + if ctx.isinf(z): + return (1.0, 1.0, 1.0) + if ctx.isnan(z): + return (0.5, 0.5, 0.5) + pi = 3.1415926535898 + w = float(ctx.arg(z)) / pi + w = max(min(w, 1.0), -1.0) + for i in range(1,len(blue_orange_colors)): + if blue_orange_colors[i][0] > w: + a, (ra, ga, ba) = blue_orange_colors[i-1] + b, (rb, gb, bb) = blue_orange_colors[i] + s = (w-a) / (b-a) + return ra+(rb-ra)*s, ga+(gb-ga)*s, ba+(bb-ba)*s + +def cplot(ctx, f, re=[-5,5], im=[-5,5], points=2000, color=None, + verbose=False, file=None, dpi=None, axes=None): + """ + Plots the given complex-valued function *f* over a rectangular part + of the complex plane specified by the pairs of intervals *re* and *im*. + For example:: + + cplot(lambda z: z, [-2, 2], [-10, 10]) + cplot(exp) + cplot(zeta, [0, 1], [0, 50]) + + By default, the complex argument (phase) is shown as color (hue) and + the magnitude is show as brightness. You can also supply a + custom color function (*color*). This function should take a + complex number as input and return an RGB 3-tuple containing + floats in the range 0.0-1.0. + + Alternatively, you can select a builtin color function by passing + a string as *color*: + + * "default" - default color scheme + * "phase" - a color scheme that only renders the phase of the function, + with white for positive reals, black for negative reals, gold in the + upper half plane, and blue in the lower half plane. + + To obtain a sharp image, the number of points may need to be + increased to 100,000 or thereabout. Since evaluating the + function that many times is likely to be slow, the 'verbose' + option is useful to display progress. + + .. note :: This function requires matplotlib (pylab). + """ + if color is None or color == "default": + color = ctx.default_color_function + if color == "phase": + color = ctx.phase_color_function + import pylab + if file: + axes = None + fig = None + if not axes: + fig = pylab.figure() + axes = fig.add_subplot(111) + rea, reb = re + ima, imb = im + dre = reb - rea + dim = imb - ima + M = int(ctx.sqrt(points*dre/dim)+1) + N = int(ctx.sqrt(points*dim/dre)+1) + x = pylab.linspace(rea, reb, M) + y = pylab.linspace(ima, imb, N) + # Note: we have to be careful to get the right rotation. + # Test with these plots: + # cplot(lambda z: z if z.real < 0 else 0) + # cplot(lambda z: z if z.imag < 0 else 0) + w = pylab.zeros((N, M, 3)) + for n in xrange(N): + for m in xrange(M): + z = ctx.mpc(x[m], y[n]) + try: + v = color(f(z)) + except ctx.plot_ignore: + v = (0.5, 0.5, 0.5) + w[n,m] = v + if verbose: + print(str(n) + ' of ' + str(N)) + rea, reb, ima, imb = [float(_) for _ in [rea, reb, ima, imb]] + axes.imshow(w, extent=(rea, reb, ima, imb), origin='lower') + axes.set_xlabel('Re(z)') + axes.set_ylabel('Im(z)') + if fig: + if file: + pylab.savefig(file, dpi=dpi) + else: + pylab.show() + +def splot(ctx, f, u=[-5,5], v=[-5,5], points=100, keep_aspect=True, \ + wireframe=False, file=None, dpi=None, axes=None): + """ + Plots the surface defined by `f`. + + If `f` returns a single component, then this plots the surface + defined by `z = f(x,y)` over the rectangular domain with + `x = u` and `y = v`. + + If `f` returns three components, then this plots the parametric + surface `x, y, z = f(u,v)` over the pairs of intervals `u` and `v`. + + For example, to plot a simple function:: + + >>> from mpmath import * + >>> f = lambda x, y: sin(x+y)*cos(y) + >>> splot(f, [-pi,pi], [-pi,pi]) # doctest: +SKIP + + Plotting a donut:: + + >>> r, R = 1, 2.5 + >>> f = lambda u, v: [r*cos(u), (R+r*sin(u))*cos(v), (R+r*sin(u))*sin(v)] + >>> splot(f, [0, 2*pi], [0, 2*pi]) # doctest: +SKIP + + .. note :: This function requires matplotlib (pylab) 0.98.5.3 or higher. + """ + import pylab + import mpl_toolkits.mplot3d as mplot3d + if file: + axes = None + fig = None + if not axes: + fig = pylab.figure() + axes = mplot3d.axes3d.Axes3D(fig) + ua, ub = u + va, vb = v + du = ub - ua + dv = vb - va + if not isinstance(points, (list, tuple)): + points = [points, points] + M, N = points + u = pylab.linspace(ua, ub, M) + v = pylab.linspace(va, vb, N) + x, y, z = [pylab.zeros((M, N)) for i in xrange(3)] + xab, yab, zab = [[0, 0] for i in xrange(3)] + for n in xrange(N): + for m in xrange(M): + fdata = f(ctx.convert(u[m]), ctx.convert(v[n])) + try: + x[m,n], y[m,n], z[m,n] = fdata + except TypeError: + x[m,n], y[m,n], z[m,n] = u[m], v[n], fdata + for c, cab in [(x[m,n], xab), (y[m,n], yab), (z[m,n], zab)]: + if c < cab[0]: + cab[0] = c + if c > cab[1]: + cab[1] = c + if wireframe: + axes.plot_wireframe(x, y, z, rstride=4, cstride=4) + else: + axes.plot_surface(x, y, z, rstride=4, cstride=4) + axes.set_xlabel('x') + axes.set_ylabel('y') + axes.set_zlabel('z') + if keep_aspect: + dx, dy, dz = [cab[1] - cab[0] for cab in [xab, yab, zab]] + maxd = max(dx, dy, dz) + if dx < maxd: + delta = maxd - dx + axes.set_xlim3d(xab[0] - delta / 2.0, xab[1] + delta / 2.0) + if dy < maxd: + delta = maxd - dy + axes.set_ylim3d(yab[0] - delta / 2.0, yab[1] + delta / 2.0) + if dz < maxd: + delta = maxd - dz + axes.set_zlim3d(zab[0] - delta / 2.0, zab[1] + delta / 2.0) + if fig: + if file: + pylab.savefig(file, dpi=dpi) + else: + pylab.show() + + +VisualizationMethods.plot = plot +VisualizationMethods.default_color_function = default_color_function +VisualizationMethods.phase_color_function = phase_color_function +VisualizationMethods.cplot = cplot +VisualizationMethods.splot = splot diff --git a/.venv/lib/python3.11/site-packages/proto/__pycache__/_file_info.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/__pycache__/_file_info.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0982302f5e70e3c7563c399a56450bf8067ade06 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/__pycache__/_file_info.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/__pycache__/_package_info.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/__pycache__/_package_info.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..01b9fd8acc4339717a9343771ac84e3605162f6b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/__pycache__/_package_info.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/__pycache__/modules.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/__pycache__/modules.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9a52709d28128ada151eb513639b732ad4f763fe Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/__pycache__/modules.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/__pycache__/primitives.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/__pycache__/primitives.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..875cd1189f31143e96d3e1b1c3333fb6a35c8fb9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/__pycache__/primitives.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/__pycache__/utils.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3649ef881f1462a9479bbe40b96422d281cad663 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/__pycache__/utils.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/__pycache__/version.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/__pycache__/version.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bdaa0d13622a3c8102c4416bd6fdc39d68f64c88 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/__pycache__/version.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b6860de5d4997422629d6d19250182e7a5d3886e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/compat.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/compat.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c53e095b9187954499f3d2d227e0f02772aa27f3 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/compat.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/marshal.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/marshal.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ea0a06043ab31e0fc6ec13639a1520191d94a295 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/marshal/__pycache__/marshal.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/proto/marshal/collections/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/proto/marshal/collections/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8f2569b0e382b0b2f2f0d31cb18099993ba82dbc Binary files /dev/null and b/.venv/lib/python3.11/site-packages/proto/marshal/collections/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/METADATA b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..78cde34ad1f681da6482410e949c877f2ab71b77 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/METADATA @@ -0,0 +1,64 @@ +Metadata-Version: 2.4 +Name: referencing +Version: 0.36.2 +Summary: JSON Referencing + Python +Project-URL: Documentation, https://referencing.readthedocs.io/ +Project-URL: Homepage, https://github.com/python-jsonschema/referencing +Project-URL: Issues, https://github.com/python-jsonschema/referencing/issues/ +Project-URL: Funding, https://github.com/sponsors/Julian +Project-URL: Tidelift, https://tidelift.com/subscription/pkg/pypi-referencing?utm_source=pypi-referencing&utm_medium=referral&utm_campaign=pypi-link +Project-URL: Changelog, https://referencing.readthedocs.io/en/stable/changes/ +Project-URL: Source, https://github.com/python-jsonschema/referencing +Author-email: Julian Berman +License-Expression: MIT +License-File: COPYING +Keywords: asyncapi,json,jsonschema,openapi,referencing +Classifier: Development Status :: 3 - Alpha +Classifier: Intended Audience :: Developers +Classifier: Operating System :: OS Independent +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 3.9 +Classifier: Programming Language :: Python :: 3.10 +Classifier: Programming Language :: Python :: 3.11 +Classifier: Programming Language :: Python :: 3.12 +Classifier: Programming Language :: Python :: 3.13 +Classifier: Programming Language :: Python :: Implementation :: CPython +Classifier: Programming Language :: Python :: Implementation :: PyPy +Classifier: Topic :: File Formats :: JSON +Classifier: Topic :: File Formats :: JSON :: JSON Schema +Requires-Python: >=3.9 +Requires-Dist: attrs>=22.2.0 +Requires-Dist: rpds-py>=0.7.0 +Requires-Dist: typing-extensions>=4.4.0; python_version < '3.13' +Description-Content-Type: text/x-rst + +=============== +``referencing`` +=============== + +|PyPI| |Pythons| |CI| |ReadTheDocs| |pre-commit| + +.. |PyPI| image:: https://img.shields.io/pypi/v/referencing.svg + :alt: PyPI version + :target: https://pypi.org/project/referencing/ + +.. |Pythons| image:: https://img.shields.io/pypi/pyversions/referencing.svg + :alt: Supported Python versions + :target: https://pypi.org/project/referencing/ + +.. |CI| image:: https://github.com/python-jsonschema/referencing/workflows/CI/badge.svg + :alt: Build status + :target: https://github.com/python-jsonschema/referencing/actions?query=workflow%3ACI + +.. |ReadTheDocs| image:: https://readthedocs.org/projects/referencing/badge/?version=stable&style=flat + :alt: ReadTheDocs status + :target: https://referencing.readthedocs.io/en/stable/ + +.. |pre-commit| image:: https://results.pre-commit.ci/badge/github/python-jsonschema/referencing/main.svg + :alt: pre-commit.ci status + :target: https://results.pre-commit.ci/latest/github/python-jsonschema/referencing/main + + +An implementation-agnostic implementation of JSON reference resolution. + +See `the documentation `_ for more details. diff --git a/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/RECORD b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..0f5daa705d7610fbe561819301c9b6c6d21f4a62 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/RECORD @@ -0,0 +1,33 @@ +referencing-0.36.2.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +referencing-0.36.2.dist-info/METADATA,sha256=8eyM93pT0UngPkNw0ZxSgXU8d_JKvDRo0nxhSwW9cgY,2843 +referencing-0.36.2.dist-info/RECORD,, +referencing-0.36.2.dist-info/WHEEL,sha256=qtCwoSJWgHk21S1Kb4ihdzI2rlJ1ZKaIurTj_ngOhyQ,87 +referencing-0.36.2.dist-info/licenses/COPYING,sha256=QtzWNJX4e063x3V6-jebtVpT-Ur9el9lfZrfVyNuUVw,1057 +referencing/__init__.py,sha256=5IZKXaAH_FWyCJRkaTn1XcptLfg9cveLb9u5nYUxJKs,207 +referencing/__pycache__/__init__.cpython-311.pyc,, +referencing/__pycache__/_attrs.cpython-311.pyc,, +referencing/__pycache__/_core.cpython-311.pyc,, +referencing/__pycache__/exceptions.cpython-311.pyc,, +referencing/__pycache__/jsonschema.cpython-311.pyc,, +referencing/__pycache__/retrieval.cpython-311.pyc,, +referencing/__pycache__/typing.cpython-311.pyc,, +referencing/_attrs.py,sha256=bgT-KMhDVLeGtWxM_SGKYeLaZBFzT2kUVFdAkOcXi8g,791 +referencing/_attrs.pyi,sha256=J6StMUKqixO4H7Eii9-TXNfCOfS8aHm-1ewimOA-8oo,559 +referencing/_core.py,sha256=0SJfZW68dOrLMaFdhMyuyYzb0Bi9d0BcPjGwijesf9E,24830 +referencing/exceptions.py,sha256=zFgaEg6WiKeT58MQuKNsgGDnHszp26c4oReC6sF9gHM,4176 +referencing/jsonschema.py,sha256=jFURIFOnuxtE4doPL1xSDeuQ4OhdxNzSn8MRRbTeVyk,18628 +referencing/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +referencing/retrieval.py,sha256=QYlOvhiQeDI12XKwezhZ3XOUzqBTFE8b5TpfATamA7I,2697 +referencing/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +referencing/tests/__pycache__/__init__.cpython-311.pyc,, +referencing/tests/__pycache__/test_core.cpython-311.pyc,, +referencing/tests/__pycache__/test_exceptions.cpython-311.pyc,, +referencing/tests/__pycache__/test_jsonschema.cpython-311.pyc,, +referencing/tests/__pycache__/test_referencing_suite.cpython-311.pyc,, +referencing/tests/__pycache__/test_retrieval.cpython-311.pyc,, +referencing/tests/test_core.py,sha256=eap0CAaI23vjMIbVyEj92qLddp3iHH3AxC55CKUN4LU,37854 +referencing/tests/test_exceptions.py,sha256=7eOdHyobXMt7-h5AnnH7u8iw2uHPaH7U4Bs9JhLgjWo,934 +referencing/tests/test_jsonschema.py,sha256=4QnjUWOAMAn5yeA8ZtldJkhI54vwKWJWB0LDzNdx5xc,11687 +referencing/tests/test_referencing_suite.py,sha256=wD6veMfLsUu0s4MLjm7pS8cg4cIfL7FMBENngk73zCI,2335 +referencing/tests/test_retrieval.py,sha256=vcbnfA4TqVeqUzW073wO-nLeqVIv0rQZWNWv0z9km48,3719 +referencing/typing.py,sha256=WjUbnZ6jPAd31cnCFAaeWIVENzyHtHdJyOlelv1GY70,1445 diff --git a/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..12228d414b6cfed7c39d3781c85c63256a1d7fb5 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/WHEEL @@ -0,0 +1,4 @@ +Wheel-Version: 1.0 +Generator: hatchling 1.27.0 +Root-Is-Purelib: true +Tag: py3-none-any diff --git a/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/licenses/COPYING b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/licenses/COPYING new file mode 100644 index 0000000000000000000000000000000000000000..a9f853e43069b8e3f8a156a4af2b1198a004230d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/referencing-0.36.2.dist-info/licenses/COPYING @@ -0,0 +1,19 @@ +Copyright (c) 2022 Julian Berman + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE.