diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__init__.py b/.venv/lib/python3.11/site-packages/mpmath/calculus/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..040a3806b968f75b8d1a88ae37a4979fe83d466a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/calculus/__init__.py @@ -0,0 +1,6 @@ +from . import calculus +# XXX: hack to set methods +from . import approximation +from . import differentiation +from . import extrapolation +from . import polynomials diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce77c8ddae07425dfca661b690787b1542d636ce Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/approximation.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/approximation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1129169feda470dbf6df70eaed9cb77e66106d4c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/approximation.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/calculus.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/calculus.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b50303bac2964b3182e04f902f6b0d8fb02be80f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/calculus.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..84f5f6d2d3c2c7ee9602b8c154a7dda4110d0b9e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd6e1992dca3d049031f82ac1610801aa372167d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..de629a7e2dc7f79403b10df3542e4ae475f87391 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/odes.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/odes.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5787cd0c1f5ff7c04d2d14099375071ffb9654e1 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/odes.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/optimization.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/optimization.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4ed146c7d5b9017dca62ffe4edd329348caffb17 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/optimization.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..965e32c6cbfa54a5e642c1fefa00fbcafd8ca49d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..18111af7e943b03b4456df29de40b1066d4dbfb7 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/approximation.py b/.venv/lib/python3.11/site-packages/mpmath/calculus/approximation.py new file mode 100644 index 0000000000000000000000000000000000000000..7ca5cc598fb53491cb6ae4a41a40477c58544d53 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/calculus/approximation.py @@ -0,0 +1,246 @@ +from ..libmp.backend import xrange +from .calculus import defun + +#----------------------------------------------------------------------------# +# Approximation methods # +#----------------------------------------------------------------------------# + +# The Chebyshev approximation formula is given at: +# http://mathworld.wolfram.com/ChebyshevApproximationFormula.html + +# The only major changes in the following code is that we return the +# expanded polynomial coefficients instead of Chebyshev coefficients, +# and that we automatically transform [a,b] -> [-1,1] and back +# for convenience. + +# Coefficient in Chebyshev approximation +def chebcoeff(ctx,f,a,b,j,N): + s = ctx.mpf(0) + h = ctx.mpf(0.5) + for k in range(1, N+1): + t = ctx.cospi((k-h)/N) + s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N) + return 2*s/N + +# Generate Chebyshev polynomials T_n(ax+b) in expanded form +def chebT(ctx, a=1, b=0): + Tb = [1] + yield Tb + Ta = [b, a] + while 1: + yield Ta + # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b) + Tmp = [0] + [2*a*t for t in Ta] + for i, c in enumerate(Ta): Tmp[i] += 2*b*c + for i, c in enumerate(Tb): Tmp[i] -= c + Ta, Tb = Tmp, Ta + +@defun +def chebyfit(ctx, f, interval, N, error=False): + r""" + Computes a polynomial of degree `N-1` that approximates the + given function `f` on the interval `[a, b]`. With ``error=True``, + :func:`~mpmath.chebyfit` also returns an accurate estimate of the + maximum absolute error; that is, the maximum value of + `|f(x) - P(x)|` for `x \in [a, b]`. + + :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula, + which gives a nearly optimal solution: that is, the maximum + error of the approximating polynomial is very close to + the smallest possible for any polynomial of the same degree. + + Chebyshev approximation is very useful if one needs repeated + evaluation of an expensive function, such as function defined + implicitly by an integral or a differential equation. (For + example, it could be used to turn a slow mpmath function + into a fast machine-precision version of the same.) + + **Examples** + + Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation + of `f(x) = \cos(x)`, valid on the interval `[1, 2]`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> poly, err = chebyfit(cos, [1, 2], 5, error=True) + >>> nprint(poly) + [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553] + >>> nprint(err, 12) + 1.61351758081e-5 + + The polynomial can be evaluated using ``polyval``:: + + >>> nprint(polyval(poly, 1.6), 12) + -0.0291858904138 + >>> nprint(cos(1.6), 12) + -0.0291995223013 + + Sampling the true error at 1000 points shows that the error + estimate generated by ``chebyfit`` is remarkably good:: + + >>> error = lambda x: abs(cos(x) - polyval(poly, x)) + >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12) + 1.61349954245e-5 + + **Choice of degree** + + The degree `N` can be set arbitrarily high, to obtain an + arbitrarily good approximation. As a rule of thumb, an + `N`-term Chebyshev approximation is good to `N/(b-a)` decimal + places on a unit interval (although this depends on how + well-behaved `f` is). The cost grows accordingly: ``chebyfit`` + evaluates the function `(N^2)/2` times to compute the + coefficients and an additional `N` times to estimate the error. + + **Possible issues** + + One should be careful to use a sufficiently high working + precision both when calling ``chebyfit`` and when evaluating + the resulting polynomial, as the polynomial is sometimes + ill-conditioned. It is for example difficult to reach + 15-digit accuracy when evaluating the polynomial using + machine precision floats, no matter the theoretical + accuracy of the polynomial. (The option to return the + coefficients in Chebyshev form should be made available + in the future.) + + It is important to note the Chebyshev approximation works + poorly if `f` is not smooth. A function containing singularities, + rapid oscillation, etc can be approximated more effectively by + multiplying it by a weight function that cancels out the + nonsmooth features, or by dividing the interval into several + segments. + """ + a, b = ctx._as_points(interval) + orig = ctx.prec + try: + ctx.prec = orig + int(N**0.5) + 20 + c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)] + d = [ctx.zero] * N + d[0] = -c[0]/2 + h = ctx.mpf(0.5) + T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a)) + for (k, Tk) in zip(range(N), T): + for i in range(len(Tk)): + d[i] += c[k]*Tk[i] + d = d[::-1] + # Estimate maximum error + err = ctx.zero + for k in range(N): + x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h + err = max(err, abs(f(x) - ctx.polyval(d, x))) + finally: + ctx.prec = orig + if error: + return d, +err + else: + return d + +@defun +def fourier(ctx, f, interval, N): + r""" + Computes the Fourier series of degree `N` of the given function + on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns + two lists `(c, s)` of coefficients (the cosine series and sine + series, respectively), such that + + .. math :: + + f(x) \sim \sum_{k=0}^N + c_k \cos(k m x) + s_k \sin(k m x) + + where `m = 2 \pi / (b-a)`. + + Note that many texts define the first coefficient as `2 c_0` instead + of `c_0`. The easiest way to evaluate the computed series correctly + is to pass it to :func:`~mpmath.fourierval`. + + **Examples** + + The function `f(x) = x` has a simple Fourier series on the standard + interval `[-\pi, \pi]`. The cosine coefficients are all zero (because + the function has odd symmetry), and the sine coefficients are + rational numbers:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> c, s = fourier(lambda x: x, [-pi, pi], 5) + >>> nprint(c) + [0.0, 0.0, 0.0, 0.0, 0.0, 0.0] + >>> nprint(s) + [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4] + + This computes a Fourier series of a nonsymmetric function on + a nonstandard interval:: + + >>> I = [-1, 1.5] + >>> f = lambda x: x**2 - 4*x + 1 + >>> cs = fourier(f, I, 4) + >>> nprint(cs[0]) + [0.583333, 1.12479, -1.27552, 0.904708, -0.441296] + >>> nprint(cs[1]) + [0.0, -2.6255, 0.580905, 0.219974, -0.540057] + + It is instructive to plot a function along with its truncated + Fourier series:: + + >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP + + Fourier series generally converge slowly (and may not converge + pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier + series gives an `L^2` error corresponding to 2-digit accuracy:: + + >>> I = [-1, 1] + >>> cs = fourier(cosh, I, 9) + >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2 + >>> nprint(sqrt(quad(g, I))) + 0.00467963 + + :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions, + the accuracy (and speed) can be improved by including all singular + points in the interval specification:: + + >>> nprint(fourier(abs, [-1, 1], 0), 10) + ([0.5000441648], [0.0]) + >>> nprint(fourier(abs, [-1, 0, 1], 0), 10) + ([0.5], [0.0]) + + """ + interval = ctx._as_points(interval) + a = interval[0] + b = interval[-1] + L = b-a + cos_series = [] + sin_series = [] + cutoff = ctx.eps*10 + for n in xrange(N+1): + m = 2*n*ctx.pi/L + an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L + bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L + if n == 0: + an /= 2 + if abs(an) < cutoff: an = ctx.zero + if abs(bn) < cutoff: bn = ctx.zero + cos_series.append(an) + sin_series.append(bn) + return cos_series, sin_series + +@defun +def fourierval(ctx, series, interval, x): + """ + Evaluates a Fourier series (in the format computed by + by :func:`~mpmath.fourier` for the given interval) at the point `x`. + + The series should be a pair `(c, s)` where `c` is the + cosine series and `s` is the sine series. The two lists + need not have the same length. + """ + cs, ss = series + ab = ctx._as_points(interval) + a = interval[0] + b = interval[-1] + m = 2*ctx.pi/(ab[-1]-ab[0]) + s = ctx.zero + s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n]) + s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n]) + return s diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/calculus.py b/.venv/lib/python3.11/site-packages/mpmath/calculus/calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..24256f121d6c07e5ce954f2a5f5024f156f64016 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/calculus/calculus.py @@ -0,0 +1,6 @@ +class CalculusMethods(object): + pass + +def defun(f): + setattr(CalculusMethods, f.__name__, f) + return f diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/extrapolation.py b/.venv/lib/python3.11/site-packages/mpmath/calculus/extrapolation.py new file mode 100644 index 0000000000000000000000000000000000000000..7df0fea3c62c9b71ee24d3f39fd9b7fd3318ed23 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/calculus/extrapolation.py @@ -0,0 +1,2115 @@ +try: + from itertools import izip +except ImportError: + izip = zip + +from ..libmp.backend import xrange +from .calculus import defun + +try: + next = next +except NameError: + next = lambda _: _.next() + +@defun +def richardson(ctx, seq): + r""" + Given a list ``seq`` of the first `N` elements of a slowly convergent + infinite sequence, :func:`~mpmath.richardson` computes the `N`-term + Richardson extrapolate for the limit. + + :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated + limit and `c` is the magnitude of the largest weight used during the + computation. The weight provides an estimate of the precision + lost to cancellation. Due to cancellation effects, the sequence must + be typically be computed at a much higher precision than the target + accuracy of the extrapolation. + + **Applicability and issues** + + The `N`-step Richardson extrapolation algorithm used by + :func:`~mpmath.richardson` is described in [1]. + + Richardson extrapolation only works for a specific type of sequence, + namely one converging like partial sums of + `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials. + When the sequence does not convergence at such a rate + :func:`~mpmath.richardson` generally produces garbage. + + Richardson extrapolation has the advantage of being fast: the `N`-term + extrapolate requires only `O(N)` arithmetic operations, and usually + produces an estimate that is accurate to `O(N)` digits. Contrast with + the Shanks transformation (see :func:`~mpmath.shanks`), which requires + `O(N^2)` operations. + + :func:`~mpmath.richardson` is unable to produce an estimate for the + approximation error. One way to estimate the error is to perform + two extrapolations with slightly different `N` and comparing the + results. + + Richardson extrapolation does not work for oscillating sequences. + As a simple workaround, :func:`~mpmath.richardson` detects if the last + three elements do not differ monotonically, and in that case + applies extrapolation only to the even-index elements. + + **Example** + + Applying Richardson extrapolation to the Leibniz series for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) + ... for m in range(1,30)] + >>> v, c = richardson(S[:10]) + >>> v + 3.2126984126984126984126984127 + >>> nprint([v-pi, c]) + [0.0711058, 2.0] + + >>> v, c = richardson(S[:30]) + >>> v + 3.14159265468624052829954206226 + >>> nprint([v-pi, c]) + [1.09645e-9, 20833.3] + + **References** + + 1. [BenderOrszag]_ pp. 375-376 + + """ + if len(seq) < 3: + raise ValueError("seq should be of minimum length 3") + if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]): + seq = seq[::2] + N = len(seq)//2-1 + s = ctx.zero + # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)! + # To avoid repeated factorials, we simplify the quotient + # of successive weights to obtain a recurrence relation + c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N)) + maxc = 1 + for k in xrange(N+1): + s += c * seq[N+k] + maxc = max(abs(c), maxc) + c *= (k-N)*ctx.mpf(k+N+1)**N + c /= ((1+k)*ctx.mpf(k+N)**N) + return s, maxc + +@defun +def shanks(ctx, seq, table=None, randomized=False): + r""" + Given a list ``seq`` of the first `N` elements of a slowly + convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated + Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks + transformation often provides strong convergence acceleration, + especially if the sequence is oscillating. + + The iterated Shanks transformation is computed using the Wynn + epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full + epsilon table generated by Wynn's algorithm, which can be read + off as follows: + + * The table is a list of lists forming a lower triangular matrix, + where higher row and column indices correspond to more accurate + values. + * The columns with even index hold dummy entries (required for the + computation) and the columns with odd index hold the actual + extrapolates. + * The last element in the last row is typically the most + accurate estimate of the limit. + * The difference to the third last element in the last row + provides an estimate of the approximation error. + * The magnitude of the second last element provides an estimate + of the numerical accuracy lost to cancellation. + + For convenience, so the extrapolation is stopped at an odd index + so that ``shanks(seq)[-1][-1]`` always gives an estimate of the + limit. + + Optionally, an existing table can be passed to :func:`~mpmath.shanks`. + This can be used to efficiently extend a previous computation after + new elements have been appended to the sequence. The table will + then be updated in-place. + + **The Shanks transformation** + + The Shanks transformation is defined as follows (see [2]): given + the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is + given by + + .. math :: + + S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k} + + The Shanks transformation gives the exact limit `A_{\infty}` in a + single step if `A_k = A + a q^k`. Note in particular that it + extrapolates the exact sum of a geometric series in a single step. + + Applying the Shanks transformation once often improves convergence + substantially for an arbitrary sequence, but the optimal effect is + obtained by applying it iteratively: + `S(S(A_k)), S(S(S(A_k))), \ldots`. + + Wynn's epsilon algorithm provides an efficient way to generate + the table of iterated Shanks transformations. It reduces the + computation of each element to essentially a single division, at + the cost of requiring dummy elements in the table. See [1] for + details. + + **Precision issues** + + Due to cancellation effects, the sequence must be typically be + computed at a much higher precision than the target accuracy + of the extrapolation. + + If the Shanks transformation converges to the exact limit (such + as if the sequence is a geometric series), then a division by + zero occurs. By default, :func:`~mpmath.shanks` handles this case by + terminating the iteration and returning the table it has + generated so far. With *randomized=True*, it will instead + replace the zero by a pseudorandom number close to zero. + (TODO: find a better solution to this problem.) + + **Examples** + + We illustrate by applying Shanks transformation to the Leibniz + series for `\pi`:: + + >>> from mpmath import * + >>> mp.dps = 50 + >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m)) + ... for m in range(1,30)] + >>> + >>> T = shanks(S[:7]) + >>> for row in T: + ... nprint(row) + ... + [-0.75] + [1.25, 3.16667] + [-1.75, 3.13333, -28.75] + [2.25, 3.14524, 82.25, 3.14234] + [-2.75, 3.13968, -177.75, 3.14139, -969.937] + [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161] + + The extrapolated accuracy is about 4 digits, and about 4 digits + may have been lost due to cancellation:: + + >>> L = T[-1] + >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) + [2.22532e-5, 4.78309e-5, 3515.06] + + Now we extend the computation:: + + >>> T = shanks(S[:25], T) + >>> L = T[-1] + >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])]) + [3.75527e-19, 1.48478e-19, 2.96014e+17] + + The value for pi is now accurate to 18 digits. About 18 digits may + also have been lost to cancellation. + + Here is an example with a geometric series, where the convergence + is immediate (the sum is exactly 1):: + + >>> mp.dps = 15 + >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]): + ... nprint(row) + [4.0] + [8.0, 1.0] + + **References** + + 1. [GravesMorris]_ + + 2. [BenderOrszag]_ pp. 368-375 + + """ + if len(seq) < 2: + raise ValueError("seq should be of minimum length 2") + if table: + START = len(table) + else: + START = 0 + table = [] + STOP = len(seq) - 1 + if STOP & 1: + STOP -= 1 + one = ctx.one + eps = +ctx.eps + if randomized: + from random import Random + rnd = Random() + rnd.seed(START) + for i in xrange(START, STOP): + row = [] + for j in xrange(i+1): + if j == 0: + a, b = 0, seq[i+1]-seq[i] + else: + if j == 1: + a = seq[i] + else: + a = table[i-1][j-2] + b = row[j-1] - table[i-1][j-1] + if not b: + if randomized: + b = (1 + rnd.getrandbits(10))*eps + elif i & 1: + return table[:-1] + else: + return table + row.append(a + one/b) + table.append(row) + return table + + +class levin_class: + # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) + r""" + This interface implements Levin's (nonlinear) sequence transformation for + convergence acceleration and summation of divergent series. It performs + better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent + or alternating divergent series. + + Let *A* be the series we want to sum: + + .. math :: + + A = \sum_{k=0}^{\infty} a_k + + Attention: all `a_k` must be non-zero! + + Let `s_n` be the partial sums of this series: + + .. math :: + + s_n = \sum_{k=0}^n a_k. + + **Methods** + + Calling ``levin`` returns an object with the following methods. + + ``update(...)`` works with the list of individual terms `a_k` of *A*, and + ``update_step(...)`` works with the list of partial sums `s_k` of *A*: + + .. code :: + + v, e = ...update([a_0, a_1,..., a_k]) + v, e = ...update_psum([s_0, s_1,..., s_k]) + + ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)`` + works with the partial sums `s_k`: + + .. code :: + + v, e = ...step(a_k) + v, e = ...step_psum(s_k) + + *v* is the current estimate for *A*, and *e* is an error estimate which is + simply the difference between the current estimate and the last estimate. + One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``. + + **A word of caution** + + One can only hope for good results (i.e. convergence acceleration or + resummation) if the `s_n` have some well defind asymptotic behavior for + large `n` and are not erratic or random. Furthermore one usually needs very + high working precision because of the numerical cancellation. If the working + precision is insufficient, levin may produce silently numerical garbage. + Furthermore even if the Levin-transformation converges, in the general case + there is no proof that the result is mathematically sound. Only for very + special classes of problems one can prove that the Levin-transformation + converges to the expected result (for example Stieltjes-type integrals). + Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison + to Shanks/Wynn-epsilon, Richardson & co. + In summary one can say that the Levin-transformation is powerful but + unreliable and that it may need a copious amount of working precision. + + The Levin transform has several variants differing in the choice of weights. + Some variants are better suited for the possible flavours of convergence + behaviour of *A* than other variants: + + .. code :: + + convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon + + logarithmic + - + - + linear + + + + + alternating divergent + + + + + + "+" means the variant is suitable,"-" means the variant is not suitable; + for comparison the Shanks/Wynn-epsilon transform is listed, too. + + The variant is controlled though the variant keyword (i.e. ``variant="u"``, + ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice. + + Finally it is possible to use the Sidi-S transform instead of the Levin transform + by using the keyword ``method='sidi'``. The Sidi-S transform works better than the + Levin transformation for some divergent series (see the examples). + + Parameters: + + .. code :: + + method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation + variant "u","t" or "v" chooses the weight variant. + + The Levin transform is also accessible through the nsum interface. + ``method="l"`` or ``method="levin"`` select the normal Levin transform while + ``method="sidi"`` + selects the Sidi-S transform. The variant is in both cases selected through the + levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise + it will miss the point where the Levin transform converges resulting in numerical + overflow/garbage. For highly divergent series a copious amount of working precision + must be chosen. + + **Examples** + + First we sum the zeta function:: + + >>> from mpmath import mp + >>> mp.prec = 53 + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision + ... L = mp.levin(method = "levin", variant = "u") + ... S, s, n = [], 0, 1 + ... while 1: + ... s += mp.one / (n * n) + ... n += 1 + ... S.append(s) + ... v, e = L.update_psum(S) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.pi ** 2 / 6)) + 0.0 + >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u") + >>> print(mp.chop(v - w)) + 0.0 + + Now we sum the zeta function outside its range of convergence + (attention: This does not work at the negative integers!):: + + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision + ... L = mp.levin(method = "levin", variant = "v") + ... A, n = [], 1 + ... while 1: + ... s = mp.mpf(n) ** (2 + 3j) + ... n += 1 + ... A.append(s) + ... v, e = L.update(A) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.zeta(-2-3j))) + 0.0 + >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + >>> print(mp.chop(v - w)) + 0.0 + + Now we sum the divergent asymptotic expansion of an integral related to the + exponential integral (see also [2] p.373). The Sidi-S transform works best here:: + + >>> z = mp.mpf(10) + >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral + >>> eps = mp.mpf(mp.eps) + >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation + ... L = mp.levin(method = "sidi", variant = "t") + ... n = 0 + ... while 1: + ... s = (-1)**n * mp.fac(n) * z ** (-n) + ... v, e = L.step(s) + ... n += 1 + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - exact)) + 0.0 + >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + >>> print(mp.chop(v - w)) + 0.0 + + Another highly divergent integral is also summable:: + + >>> z = mp.mpf(2) + >>> eps = mp.mpf(mp.eps) + >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral + >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series + ... L = mp.levin(method = "levin", variant = "t") + ... n, s = 0, 0 + ... while 1: + ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)) + ... n += 1 + ... v, e = L.step_psum(s) + ... if e < eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - exact)) + 0.0 + >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), + ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + >>> print(mp.chop(v - w)) + 0.0 + + These examples run with 15-20 decimal digits precision. For higher precision the + working precision must be raised. + + **Examples for nsum** + + Here we calculate Euler's constant as the constant term in the Laurent + expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of + the logarithmic convergence behaviour of the Dirichlet series for zeta:: + + >>> mp.dps = 30 + >>> z = mp.mpf(10) ** (-10) + >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z + >>> print(mp.chop(a - mp.euler, tol = 1e-10)) + 0.0 + + The Sidi-S transform performs excellently for the alternating series of `\log(2)`:: + + >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi") + >>> print(mp.chop(a - mp.log(2))) + 0.0 + + Hypergeometric series can also be summed outside their range of convergence. + The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the + point where the Levin transform converges resulting in numerical overflow/garbage:: + + >>> z = 2 + 1j + >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + >>> print(mp.chop(exact-v)) + 0.0 + + References: + + [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of + Convergence and the Summation of Divergent Series" arXiv:math/0306302 + + [2] A. Sidi - "Pratical Extrapolation Methods" + + [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209 + + """ + + def __init__(self, method = "levin", variant = "u"): + self.variant = variant + self.n = 0 + self.a0 = 0 + self.theta = 1 + self.A = [] + self.B = [] + self.last = 0 + self.last_s = False + + if method == "levin": + self.factor = self.factor_levin + elif method == "sidi": + self.factor = self.factor_sidi + else: + raise ValueError("levin: unknown method \"%s\"" % method) + + def factor_levin(self, i): + # original levin + # [1] p.50,e.7.5-7 (with n-j replaced by i) + return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1) + + def factor_sidi(self, i): + # sidi analogon to levin (factorial series) + # [1] p.59,e.8.3-16 (with n-j replaced by i) + return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3)) + + def run(self, s, a0, a1 = 0): + if self.variant=="t": + # levin t + w=a0 + elif self.variant=="u": + # levin u + w=a0*(self.theta+self.n) + elif self.variant=="v": + # levin v + w=a0*a1/(a0-a1) + else: + assert False, "unknown variant" + + if w==0: + raise ValueError("levin: zero weight") + + self.A.append(s/w) + self.B.append(1/w) + + for i in range(self.n-1,-1,-1): + if i==self.n-1: + f=1 + else: + f=self.factor(i) + + self.A[i]=self.A[i+1]-f*self.A[i] + self.B[i]=self.B[i+1]-f*self.B[i] + + self.n+=1 + + ########################################################################### + + def update_psum(self,S): + """ + This routine applies the convergence acceleration to the list of partial sums. + + A = sum(a_k, k = 0..infinity) + s_n = sum(a_k, k = 0..n) + + v, e = ...update_psum([s_0, s_1,..., s_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + if self.variant!="v": + if self.n==0: + self.run(S[0],S[0]) + while self.n>> from mpmath import mp + >>> AC = mp.cohen_alt() + >>> S, s, n = [], 0, 1 + >>> while 1: + ... s += -((-1) ** n) * mp.one / (n * n) + ... n += 1 + ... S.append(s) + ... v, e = AC.update_psum(S) + ... if e < mp.eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> print(mp.chop(v - mp.pi ** 2 / 12)) + 0.0 + + Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`:: + + >>> A = [] + >>> AC = mp.cohen_alt() + >>> n = 1 + >>> while 1: + ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1))) + ... A.append(-mp.loggamma(1 + mp.one / (2 * n))) + ... n += 1 + ... v, e = AC.update(A) + ... if e < mp.eps: + ... break + ... if n > 1000: raise RuntimeError("iteration limit exceeded") + >>> v = mp.exp(v) + >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12)) + 0.0 + + ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface:: + + >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.log(2))) + 0.0 + >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a") + >>> print(mp.chop(v - mp.pi / 4)) + 0.0 + >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) + 0.0 + + """ + + def __init__(self): + self.last=0 + + def update(self, A): + """ + This routine applies the convergence acceleration to the list of individual terms. + + A = sum(a_k, k = 0..infinity) + + v, e = ...update([a_0, a_1,..., a_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + n = len(A) + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = -self.ctx.one + c = -d + s = 0 + + for k in xrange(n): + c = b - c + if k % 2 == 0: + s = s + c * A[k] + else: + s = s - c * A[k] + b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one)) + + value = s / d + + err = abs(value - self.last) + self.last = value + + return value, err + + def update_psum(self, S): + """ + This routine applies the convergence acceleration to the list of partial sums. + + A = sum(a_k, k = 0..infinity) + s_n = sum(a_k ,k = 0..n) + + v, e = ...update_psum([s_0, s_1,..., s_k]) + + output: + v current estimate of the series A + e an error estimate which is simply the difference between the current + estimate and the last estimate. + """ + + n = len(S) + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = self.ctx.one + s = 0 + + for k in xrange(n): + b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one)) + s += b * S[k] + + value = s / d + + err = abs(value - self.last) + self.last = value + + return value, err + +def cohen_alt(ctx): + L = cohen_alt_class() + L.ctx = ctx + return L + +cohen_alt.__doc__ = cohen_alt_class.__doc__ +defun(cohen_alt) + + +@defun +def sumap(ctx, f, interval, integral=None, error=False): + r""" + Evaluates an infinite series of an analytic summand *f* using the + Abel-Plana formula + + .. math :: + + \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) + + i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt. + + Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`), + the Abel-Plana formula does not require derivatives. However, + it only works when `|f(it)-f(-it)|` does not + increase too rapidly with `t`. + + **Examples** + + The Abel-Plana formula is particularly useful when the summand + decreases like a power of `k`; for example when the sum is a pure + zeta function:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> sumap(lambda k: 1/k**2.5, [1,inf]) + 1.34148725725091717975677 + >>> zeta(2.5) + 1.34148725725091717975677 + >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf]) + (-3.385361068546473342286084 - 0.7432082105196321803869551j) + >>> zeta(2.5+2.5j, 1+1j) + (-3.385361068546473342286084 - 0.7432082105196321803869551j) + + If the series is alternating, numerical quadrature along the real + line is likely to give poor results, so it is better to evaluate + the first term symbolically whenever possible: + + >>> n=3; z=-0.75 + >>> I = expint(n,-log(z)) + >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I)) + -0.6917036036904594510141448 + >>> polylog(n,z) + -0.6917036036904594510141448 + + """ + prec = ctx.prec + try: + ctx.prec += 10 + a, b = interval + if b != ctx.inf: + raise ValueError("b should be equal to ctx.inf") + g = lambda x: f(x+a) + if integral is None: + i1, err1 = ctx.quad(g, [0,ctx.inf], error=True) + else: + i1, err1 = integral, 0 + j = ctx.j + p = ctx.pi * 2 + if ctx._is_real_type(i1): + h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t) + else: + h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t) + i2, err2 = ctx.quad(h, [0,ctx.inf], error=True) + err = err1+err2 + v = i1+i2+0.5*g(ctx.mpf(0)) + finally: + ctx.prec = prec + if error: + return +v, err + return +v + + +@defun +def sumem(ctx, f, interval, tol=None, reject=10, integral=None, + adiffs=None, bdiffs=None, verbose=False, error=False, + _fast_abort=False): + r""" + Uses the Euler-Maclaurin formula to compute an approximation accurate + to within ``tol`` (which defaults to the present epsilon) of the sum + + .. math :: + + S = \sum_{k=a}^b f(k) + + where `(a,b)` are given by ``interval`` and `a` or `b` may be + infinite. The approximation is + + .. math :: + + S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} + + \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!} + \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right). + + The last sum in the Euler-Maclaurin formula is not generally + convergent (a notable exception is if `f` is a polynomial, in + which case Euler-Maclaurin actually gives an exact result). + + The summation is stopped as soon as the quotient between two + consecutive terms falls below *reject*. That is, by default + (*reject* = 10), the summation is continued as long as each + term adds at least one decimal. + + Although not convergent, convergence to a given tolerance can + often be "forced" if `b = \infty` by summing up to `a+N` and then + applying the Euler-Maclaurin formula to the sum over the range + `(a+N+1, \ldots, \infty)`. This procedure is implemented by + :func:`~mpmath.nsum`. + + By default numerical quadrature and differentiation is used. + If the symbolic values of the integral and endpoint derivatives + are known, it is more efficient to pass the value of the + integral explicitly as ``integral`` and the derivatives + explicitly as ``adiffs`` and ``bdiffs``. The derivatives + should be given as iterables that yield + `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`). + + **Examples** + + Summation of an infinite series, with automatic and symbolic + integral and derivative values (the second should be much faster):: + + >>> from mpmath import * + >>> mp.dps = 50; mp.pretty = True + >>> sumem(lambda n: 1/n**2, [32, inf]) + 0.03174336652030209012658168043874142714132886413417 + >>> I = mpf(1)/32 + >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999)) + >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D) + 0.03174336652030209012658168043874142714132886413417 + + An exact evaluation of a finite polynomial sum:: + + >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000]) + 10500155000624963999742499550000.0 + >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001))) + 10500155000624963999742499550000 + + """ + tol = tol or +ctx.eps + interval = ctx._as_points(interval) + a = ctx.convert(interval[0]) + b = ctx.convert(interval[-1]) + err = ctx.zero + prev = 0 + M = 10000 + if a == ctx.ninf: adiffs = (0 for n in xrange(M)) + else: adiffs = adiffs or ctx.diffs(f, a) + if b == ctx.inf: bdiffs = (0 for n in xrange(M)) + else: bdiffs = bdiffs or ctx.diffs(f, b) + orig = ctx.prec + #verbose = 1 + try: + ctx.prec += 10 + s = ctx.zero + for k, (da, db) in enumerate(izip(adiffs, bdiffs)): + if k & 1: + term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1) + mag = abs(term) + if verbose: + print("term", k, "magnitude =", ctx.nstr(mag)) + if k > 4 and mag < tol: + s += term + break + elif k > 4 and abs(prev) / mag < reject: + err += mag + if _fast_abort: + return [s, (s, err)][error] + if verbose: + print("Failed to converge") + break + else: + s += term + prev = term + # Endpoint correction + if a != ctx.ninf: s += f(a)/2 + if b != ctx.inf: s += f(b)/2 + # Tail integral + if verbose: + print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b))) + if integral: + s += integral + else: + integral, ierr = ctx.quad(f, interval, error=True) + if verbose: + print("Integration error:", ierr) + s += integral + err += ierr + finally: + ctx.prec = orig + if error: + return s, err + else: + return s + +@defun +def adaptive_extrapolation(ctx, update, emfun, kwargs): + option = kwargs.get + if ctx._fixed_precision: + tol = option('tol', ctx.eps*2**10) + else: + tol = option('tol', ctx.eps/2**10) + verbose = option('verbose', False) + maxterms = option('maxterms', ctx.dps*10) + method = set(option('method', 'r+s').split('+')) + skip = option('skip', 0) + steps = iter(option('steps', xrange(10, 10**9, 10))) + strict = option('strict') + #steps = (10 for i in xrange(1000)) + summer=[] + if 'd' in method or 'direct' in method: + TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False + else: + TRY_RICHARDSON = ('r' in method) or ('richardson' in method) + TRY_SHANKS = ('s' in method) or ('shanks' in method) + TRY_EULER_MACLAURIN = ('e' in method) or \ + ('euler-maclaurin' in method) + + def init_levin(m): + variant = kwargs.get("levin_variant", "u") + if isinstance(variant, str): + if variant == "all": + variant = ["u", "v", "t"] + else: + variant = [variant] + for s in variant: + L = levin_class(method = m, variant = s) + L.ctx = ctx + L.name = m + "(" + s + ")" + summer.append(L) + + if ('l' in method) or ('levin' in method): + init_levin("levin") + + if ('sidi' in method): + init_levin("sidi") + + if ('a' in method) or ('alternating' in method): + L = cohen_alt_class() + L.ctx = ctx + L.name = "alternating" + summer.append(L) + + last_richardson_value = 0 + shanks_table = [] + index = 0 + step = 10 + partial = [] + best = ctx.zero + orig = ctx.prec + try: + if 'workprec' in kwargs: + ctx.prec = kwargs['workprec'] + elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0: + ctx.prec = (ctx.prec+10) * 4 + else: + ctx.prec += 30 + while 1: + if index >= maxterms: + break + + # Get new batch of terms + try: + step = next(steps) + except StopIteration: + pass + if verbose: + print("-"*70) + print("Adding terms #%i-#%i" % (index, index+step)) + update(partial, xrange(index, index+step)) + index += step + + # Check direct error + best = partial[-1] + error = abs(best - partial[-2]) + if verbose: + print("Direct error: %s" % ctx.nstr(error)) + if error <= tol: + return best + + # Check each extrapolation method + if TRY_RICHARDSON: + value, maxc = ctx.richardson(partial) + # Convergence + richardson_error = abs(value - last_richardson_value) + if verbose: + print("Richardson error: %s" % ctx.nstr(richardson_error)) + # Convergence + if richardson_error <= tol: + return value + last_richardson_value = value + # Unreliable due to cancellation + if ctx.eps*maxc > tol: + if verbose: + print("Ran out of precision for Richardson") + TRY_RICHARDSON = False + if richardson_error < error: + error = richardson_error + best = value + if TRY_SHANKS: + shanks_table = ctx.shanks(partial, shanks_table, randomized=True) + row = shanks_table[-1] + if len(row) == 2: + est1 = row[-1] + shanks_error = 0 + else: + est1, maxc, est2 = row[-1], abs(row[-2]), row[-3] + shanks_error = abs(est1-est2) + if verbose: + print("Shanks error: %s" % ctx.nstr(shanks_error)) + if shanks_error <= tol: + return est1 + if ctx.eps*maxc > tol: + if verbose: + print("Ran out of precision for Shanks") + TRY_SHANKS = False + if shanks_error < error: + error = shanks_error + best = est1 + for L in summer: + est, lerror = L.update_psum(partial) + if verbose: + print("%s error: %s" % (L.name, ctx.nstr(lerror))) + if lerror <= tol: + return est + if lerror < error: + error = lerror + best = est + if TRY_EULER_MACLAURIN: + if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1): + if verbose: + print ("NOT using Euler-Maclaurin: the series appears" + " to be alternating, so numerical\n quadrature" + " will most likely fail") + TRY_EULER_MACLAURIN = False + else: + value, em_error = emfun(index, tol) + value += partial[-1] + if verbose: + print("Euler-Maclaurin error: %s" % ctx.nstr(em_error)) + if em_error <= tol: + return value + if em_error < error: + best = value + finally: + ctx.prec = orig + if strict: + raise ctx.NoConvergence + if verbose: + print("Warning: failed to converge to target accuracy") + return best + +@defun +def nsum(ctx, f, *intervals, **options): + r""" + Computes the sum + + .. math :: S = \sum_{k=a}^b f(k) + + where `(a, b)` = *interval*, and where `a = -\infty` and/or + `b = \infty` are allowed, or more generally + + .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots + \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n) + + if multiple intervals are given. + + Two examples of infinite series that can be summed by :func:`~mpmath.nsum`, + where the first converges rapidly and the second converges slowly, + are:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nsum(lambda n: 1/fac(n), [0, inf]) + 2.71828182845905 + >>> nsum(lambda n: 1/n**2, [1, inf]) + 1.64493406684823 + + When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to + accurately estimate the sums of slowly convergent series. If the series is + finite, :func:`~mpmath.nsum` currently does not attempt to perform any + extrapolation, and simply calls :func:`~mpmath.fsum`. + + Multidimensional infinite series are reduced to a single-dimensional + series over expanding hypercubes; if both infinite and finite dimensions + are present, the finite ranges are moved innermost. For more advanced + control over the summation order, use nested calls to :func:`~mpmath.nsum`, + or manually rewrite the sum as a single-dimensional series. + + **Options** + + *tol* + Desired maximum final error. Defaults roughly to the + epsilon of the working precision. + + *method* + Which summation algorithm to use (described below). + Default: ``'richardson+shanks'``. + + *maxterms* + Cancel after at most this many terms. Default: 10*dps. + + *steps* + An iterable giving the number of terms to add between + each extrapolation attempt. The default sequence is + [10, 20, 30, 40, ...]. For example, if you know that + approximately 100 terms will be required, efficiency might be + improved by setting this to [100, 10]. Then the first + extrapolation will be performed after 100 terms, the second + after 110, etc. + + *verbose* + Print details about progress. + + *ignore* + If enabled, any term that raises ``ArithmeticError`` + or ``ValueError`` (e.g. through division by zero) is replaced + by a zero. This is convenient for lattice sums with + a singular term near the origin. + + **Methods** + + Unfortunately, an algorithm that can efficiently sum any infinite + series does not exist. :func:`~mpmath.nsum` implements several different + algorithms that each work well in different cases. The *method* + keyword argument selects a method. + + The default method is ``'r+s'``, i.e. both Richardson extrapolation + and Shanks transformation is attempted. A slower method that + handles more cases is ``'r+s+e'``. For very high precision + summation, or if the summation needs to be fast (for example if + multiple sums need to be evaluated), it is a good idea to + investigate which one method works best and only use that. + + ``'richardson'`` / ``'r'``: + Uses Richardson extrapolation. Provides useful extrapolation + when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)` + for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for + additional information. + + ``'shanks'`` / ``'s'``: + Uses Shanks transformation. Typically provides useful + extrapolation when `f(k) \sim c^k` or when successive terms + alternate signs. Is able to sum some divergent series. + See :func:`~mpmath.shanks` for additional information. + + ``'levin'`` / ``'l'``: + Uses the Levin transformation. It performs better than the Shanks + transformation for logarithmic convergent or alternating divergent + series. The ``'levin_variant'``-keyword selects the variant. Valid + choices are "u", "t", "v" and "all" whereby "all" uses all three + u,t and v simultanously (This is good for performance comparison in + conjunction with "verbose=True"). Instead of the Levin transform one can + also use the Sidi-S transform by selecting the method ``'sidi'``. + See :func:`~mpmath.levin` for additional details. + + ``'alternating'`` / ``'a'``: + This is the convergence acceleration of alternating series developped + by Cohen, Villegras and Zagier. + See :func:`~mpmath.cohen_alt` for additional details. + + ``'euler-maclaurin'`` / ``'e'``: + Uses the Euler-Maclaurin summation formula to approximate + the remainder sum by an integral. This requires high-order + numerical derivatives and numerical integration. The advantage + of this algorithm is that it works regardless of the + decay rate of `f`, as long as `f` is sufficiently smooth. + See :func:`~mpmath.sumem` for additional information. + + ``'direct'`` / ``'d'``: + Does not perform any extrapolation. This can be used + (and should only be used for) rapidly convergent series. + The summation automatically stops when the terms + decrease below the target tolerance. + + **Basic examples** + + A finite sum:: + + >>> nsum(lambda k: 1/k, [1, 6]) + 2.45 + + Summation of a series going to negative infinity and a doubly + infinite series:: + + >>> nsum(lambda k: 1/k**2, [-inf, -1]) + 1.64493406684823 + >>> nsum(lambda k: 1/(1+k**2), [-inf, inf]) + 3.15334809493716 + + :func:`~mpmath.nsum` handles sums of complex numbers:: + + >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf]) + (1.6 + 0.8j) + + The following sum converges very rapidly, so it is most + efficient to sum it by disabling convergence acceleration:: + + >>> mp.dps = 1000 + >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf], + ... method='direct') + >>> b = (cos(1)+sin(1))/4 + >>> abs(a-b) < mpf('1e-998') + True + + **Examples with Richardson extrapolation** + + Richardson extrapolation works well for sums over rational + functions, as well as their alternating counterparts:: + + >>> mp.dps = 50 + >>> nsum(lambda k: 1 / k**3, [1, inf], + ... method='richardson') + 1.2020569031595942853997381615114499907649862923405 + >>> zeta(3) + 1.2020569031595942853997381615114499907649862923405 + + >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf], + ... method='richardson') + 2.9348022005446793094172454999380755676568497036204 + >>> pi**2/2-2 + 2.9348022005446793094172454999380755676568497036204 + + >>> nsum(lambda k: (-1)**k / k**3, [1, inf], + ... method='richardson') + -0.90154267736969571404980362113358749307373971925537 + >>> -3*zeta(3)/4 + -0.90154267736969571404980362113358749307373971925538 + + **Examples with Shanks transformation** + + The Shanks transformation works well for geometric series + and typically provides excellent acceleration for Taylor + series near the border of their disk of convergence. + Here we apply it to a series for `\log(2)`, which can be + seen as the Taylor series for `\log(1+x)` with `x = 1`:: + + >>> nsum(lambda k: -(-1)**k/k, [1, inf], + ... method='shanks') + 0.69314718055994530941723212145817656807550013436025 + >>> log(2) + 0.69314718055994530941723212145817656807550013436025 + + Here we apply it to a slowly convergent geometric series:: + + >>> nsum(lambda k: mpf('0.995')**k, [0, inf], + ... method='shanks') + 200.0 + + Finally, Shanks' method works very well for alternating series + where `f(k) = (-1)^k g(k)`, and often does so regardless of + the exact decay rate of `g(k)`:: + + >>> mp.dps = 15 + >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf], + ... method='shanks') + 0.765147024625408 + >>> (2-sqrt(2))*zeta(1.5)/2 + 0.765147024625408 + + The following slowly convergent alternating series has no known + closed-form value. Evaluating the sum a second time at higher + precision indicates that the value is probably correct:: + + >>> nsum(lambda k: (-1)**k / log(k), [2, inf], + ... method='shanks') + 0.924299897222939 + >>> mp.dps = 30 + >>> nsum(lambda k: (-1)**k / log(k), [2, inf], + ... method='shanks') + 0.92429989722293885595957018136 + + **Examples with Levin transformation** + + The following example calculates Euler's constant as the constant term in + the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow + because of the logarithmic convergence behaviour of the Dirichlet series + for zeta. + + >>> mp.dps = 30 + >>> z = mp.mpf(10) ** (-10) + >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z + >>> print(mp.chop(a - mp.euler, tol = 1e-10)) + 0.0 + + Now we sum the zeta function outside its range of convergence + (attention: This does not work at the negative integers!): + + >>> mp.dps = 15 + >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v") + >>> print(mp.chop(w - mp.zeta(-2-3j))) + 0.0 + + The next example resummates an asymptotic series expansion of an integral + related to the exponential integral. + + >>> mp.dps = 15 + >>> z = mp.mpf(10) + >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf]) + >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral + >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t") + >>> print(mp.chop(w - exact)) + 0.0 + + Following highly divergent asymptotic expansion needs some care. Firstly we + need copious amount of working precision. Secondly the stepsize must not be + chosen to large, otherwise nsum may miss the point where the Levin transform + converges and reach the point where only numerical garbage is produced due to + numerical cancellation. + + >>> mp.dps = 15 + >>> z = mp.mpf(2) + >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi) + >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral + >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)), + ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)]) + >>> print(mp.chop(w - exact)) + 0.0 + + The hypergeoemtric function can also be summed outside its range of convergence: + + >>> mp.dps = 15 + >>> z = 2 + 1j + >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z) + >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n)) + >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)]) + >>> print(mp.chop(exact-v)) + 0.0 + + **Examples with Cohen's alternating series resummation** + + The next example sums the alternating zeta function: + + >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.log(2))) + 0.0 + + The derivate of the alternating zeta function outside its range of + convergence: + + >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a") + >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1))) + 0.0 + + **Examples with Euler-Maclaurin summation** + + The sum in the following example has the wrong rate of convergence + for either Richardson or Shanks to be effective. + + >>> f = lambda k: log(k)/k**2.5 + >>> mp.dps = 15 + >>> nsum(f, [1, inf], method='euler-maclaurin') + 0.38734195032621 + >>> -diff(zeta, 2.5) + 0.38734195032621 + + Increasing ``steps`` improves speed at higher precision:: + + >>> mp.dps = 50 + >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250]) + 0.38734195032620997271199237593105101319948228874688 + >>> -diff(zeta, 2.5) + 0.38734195032620997271199237593105101319948228874688 + + **Divergent series** + + The Shanks transformation is able to sum some *divergent* + series. In particular, it is often able to sum Taylor series + beyond their radius of convergence (this is due to a relation + between the Shanks transformation and Pade approximations; + see :func:`~mpmath.pade` for an alternative way to evaluate divergent + Taylor series). Furthermore the Levin-transform examples above + contain some divergent series resummation. + + Here we apply it to `\log(1+x)` far outside the region of + convergence:: + + >>> mp.dps = 50 + >>> nsum(lambda k: -(-9)**k/k, [1, inf], + ... method='shanks') + 2.3025850929940456840179914546843642076011014886288 + >>> log(10) + 2.3025850929940456840179914546843642076011014886288 + + A particular type of divergent series that can be summed + using the Shanks transformation is geometric series. + The result is the same as using the closed-form formula + for an infinite geometric series:: + + >>> mp.dps = 15 + >>> for n in range(-8, 8): + ... if n == 1: + ... continue + ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n), + ... nsum(lambda k: n**k, [0, inf], method='shanks'))) + ... + -8.0 0.111111111111111 0.111111111111111 + -7.0 0.125 0.125 + -6.0 0.142857142857143 0.142857142857143 + -5.0 0.166666666666667 0.166666666666667 + -4.0 0.2 0.2 + -3.0 0.25 0.25 + -2.0 0.333333333333333 0.333333333333333 + -1.0 0.5 0.5 + 0.0 1.0 1.0 + 2.0 -1.0 -1.0 + 3.0 -0.5 -0.5 + 4.0 -0.333333333333333 -0.333333333333333 + 5.0 -0.25 -0.25 + 6.0 -0.2 -0.2 + 7.0 -0.166666666666667 -0.166666666666667 + + **Multidimensional sums** + + Any combination of finite and infinite ranges is allowed for the + summation indices:: + + >>> mp.dps = 15 + >>> nsum(lambda x,y: x+y, [2,3], [4,5]) + 28.0 + >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf]) + 6.0 + >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3]) + 6.0 + >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4]) + 7.0 + >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf]) + 7.0 + >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf]) + 7.0 + + Some nice examples of double series with analytic solutions or + reductions to single-dimensional series (see [1]):: + + >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf]) + 1.60669515241529 + >>> nsum(lambda n: 1/(2**n-1), [1,inf]) + 1.60669515241529 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf]) + 0.278070510848213 + >>> pi*(pi-3*ln2)/12 + 0.278070510848213 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf]) + 0.129319852864168 + >>> altzeta(2) - altzeta(1) + 0.129319852864168 + + >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf]) + 0.0790756439455825 + >>> altzeta(3) - altzeta(2) + 0.0790756439455825 + + >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)), + ... [1,inf], [1,inf]) + 0.28125 + >>> mpf(9)/32 + 0.28125 + + >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j), + ... [1,inf], [1,inf], workprec=400) + 1.64493406684823 + >>> zeta(2) + 1.64493406684823 + + A hard example of a multidimensional sum is the Madelung constant + in three dimensions (see [2]). The defining sum converges very + slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to + obtain an accurate value through convergence acceleration. The + second evaluation below uses a much more efficient, rapidly + convergent 2D sum:: + + >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5, + ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True) + -1.74756459463318 + >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \ + ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf]) + -1.74756459463318 + + Another example of a lattice sum in 2D:: + + >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf], + ... [-inf,inf], ignore=True) + -2.1775860903036 + >>> -pi*ln2 + -2.1775860903036 + + An example of an Eisenstein series:: + + >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf], + ... ignore=True) + (3.1512120021539 + 0.0j) + + **References** + + 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html, + 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html + + """ + infinite, g = standardize(ctx, f, intervals, options) + if not infinite: + return +g() + + def update(partial_sums, indices): + if partial_sums: + psum = partial_sums[-1] + else: + psum = ctx.zero + for k in indices: + psum = psum + g(ctx.mpf(k)) + partial_sums.append(psum) + + prec = ctx.prec + + def emfun(point, tol): + workprec = ctx.prec + ctx.prec = prec + 10 + v = ctx.sumem(g, [point, ctx.inf], tol, error=1) + ctx.prec = workprec + return v + + return +ctx.adaptive_extrapolation(update, emfun, options) + + +def wrapsafe(f): + def g(*args): + try: + return f(*args) + except (ArithmeticError, ValueError): + return 0 + return g + +def standardize(ctx, f, intervals, options): + if options.get("ignore"): + f = wrapsafe(f) + finite = [] + infinite = [] + for k, points in enumerate(intervals): + a, b = ctx._as_points(points) + if b < a: + return False, (lambda: ctx.zero) + if a == ctx.ninf or b == ctx.inf: + infinite.append((k, (a,b))) + else: + finite.append((k, (int(a), int(b)))) + if finite: + f = fold_finite(ctx, f, finite) + if not infinite: + return False, lambda: f(*([0]*len(intervals))) + if infinite: + f = standardize_infinite(ctx, f, infinite) + f = fold_infinite(ctx, f, infinite) + args = [0] * len(intervals) + d = infinite[0][0] + def g(k): + args[d] = k + return f(*args) + return True, g + +# backwards compatible itertools.product +def cartesian_product(args): + pools = map(tuple, args) + result = [[]] + for pool in pools: + result = [x+[y] for x in result for y in pool] + for prod in result: + yield tuple(prod) + +def fold_finite(ctx, f, intervals): + if not intervals: + return f + indices = [v[0] for v in intervals] + points = [v[1] for v in intervals] + ranges = [xrange(a, b+1) for (a,b) in points] + def g(*args): + args = list(args) + s = ctx.zero + for xs in cartesian_product(ranges): + for dim, x in zip(indices, xs): + args[dim] = ctx.mpf(x) + s += f(*args) + return s + #print "Folded finite", indices + return g + +# Standardize each interval to [0,inf] +def standardize_infinite(ctx, f, intervals): + if not intervals: + return f + dim, [a,b] = intervals[-1] + if a == ctx.ninf: + if b == ctx.inf: + def g(*args): + args = list(args) + k = args[dim] + if k: + s = f(*args) + args[dim] = -k + s += f(*args) + return s + else: + return f(*args) + else: + def g(*args): + args = list(args) + args[dim] = b - args[dim] + return f(*args) + else: + def g(*args): + args = list(args) + args[dim] += a + return f(*args) + #print "Standardized infinity along dimension", dim, a, b + return standardize_infinite(ctx, g, intervals[:-1]) + +def fold_infinite(ctx, f, intervals): + if len(intervals) < 2: + return f + dim1 = intervals[-2][0] + dim2 = intervals[-1][0] + # Assume intervals are [0,inf] x [0,inf] x ... + def g(*args): + args = list(args) + #args.insert(dim2, None) + n = int(args[dim1]) + s = ctx.zero + #y = ctx.mpf(n) + args[dim2] = ctx.mpf(n) #y + for x in xrange(n+1): + args[dim1] = ctx.mpf(x) + s += f(*args) + args[dim1] = ctx.mpf(n) #ctx.mpf(n) + for y in xrange(n): + args[dim2] = ctx.mpf(y) + s += f(*args) + return s + #print "Folded infinite from", len(intervals), "to", (len(intervals)-1) + return fold_infinite(ctx, g, intervals[:-1]) + +@defun +def nprod(ctx, f, interval, nsum=False, **kwargs): + r""" + Computes the product + + .. math :: + + P = \prod_{k=a}^b f(k) + + where `(a, b)` = *interval*, and where `a = -\infty` and/or + `b = \infty` are allowed. + + By default, :func:`~mpmath.nprod` uses the same extrapolation methods as + :func:`~mpmath.nsum`, except applied to the partial products rather than + partial sums, and the same keyword options as for :func:`~mpmath.nsum` are + supported. If ``nsum=True``, the product is instead computed via + :func:`~mpmath.nsum` as + + .. math :: + + P = \exp\left( \sum_{k=a}^b \log(f(k)) \right). + + This is slower, but can sometimes yield better results. It is + also required (and used automatically) when Euler-Maclaurin + summation is requested. + + **Examples** + + A simple finite product:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> nprod(lambda k: k, [1, 4]) + 24.0 + + A large number of infinite products have known exact values, + and can therefore be used as a reference. Most of the following + examples are taken from MathWorld [1]. + + A few infinite products with simple values are:: + + >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf]) + 3.141592653589793238462643 + >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf]) + 2.0 + >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf]) + 0.6666666666666666666666667 + >>> nprod(lambda k: (1-1/k**2), [2, inf]) + 0.5 + + Next, several more infinite products with more complicated + values:: + + >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6) + 5.180668317897115748416626 + 5.180668317897115748416626 + + >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi) + 0.2720290549821331629502366 + 0.2720290549821331629502366 + + >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf]) + 0.8480540493529003921296502 + >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi)) + 0.8480540493529003921296502 + + >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf]) + 1.848936182858244485224927 + >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi + 1.848936182858244485224927 + + >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi) + 0.9190194775937444301739244 + 0.9190194775937444301739244 + + >>> nprod(lambda k: (1-1/k**6), [2, inf]) + 0.9826842777421925183244759 + >>> (1+cosh(pi*sqrt(3)))/(12*pi**2) + 0.9826842777421925183244759 + + >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi) + 1.838038955187488860347849 + 1.838038955187488860347849 + + >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf]) + 1.447255926890365298959138 + >>> exp(1+euler/2)/sqrt(2*pi) + 1.447255926890365298959138 + + The following two products are equivalent and can be evaluated in + terms of a Jacobi theta function. Pi can be replaced by any value + (as long as convergence is preserved):: + + >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf]) + 0.3838451207481672404778686 + >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf]) + 0.3838451207481672404778686 + >>> jtheta(4,0,1/pi) + 0.3838451207481672404778686 + + This product does not have a known closed form value:: + + >>> nprod(lambda k: (1-1/2**k), [1, inf]) + 0.2887880950866024212788997 + + A product taken from `-\infty`:: + + >>> nprod(lambda k: 1-k**(-3), [-inf,-2]) + 0.8093965973662901095786805 + >>> cosh(pi*sqrt(3)/2)/(3*pi) + 0.8093965973662901095786805 + + A doubly infinite product:: + + >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf]) + 23.41432688231864337420035 + >>> exp(pi/tanh(pi)) + 23.41432688231864337420035 + + A product requiring the use of Euler-Maclaurin summation to compute + an accurate value:: + + >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e') + 0.696155111336231052898125 + + **References** + + 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html + + """ + if nsum or ('e' in kwargs.get('method', '')): + orig = ctx.prec + try: + # TODO: we are evaluating log(1+eps) -> eps, which is + # inaccurate. This currently works because nsum greatly + # increases the working precision. But we should be + # more intelligent and handle the precision here. + ctx.prec += 10 + v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs) + finally: + ctx.prec = orig + return +ctx.exp(v) + + a, b = ctx._as_points(interval) + if a == ctx.ninf: + if b == ctx.inf: + return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs) + return ctx.nprod(f, [-b, ctx.inf], **kwargs) + elif b != ctx.inf: + return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1)) + + a = int(a) + + def update(partial_products, indices): + if partial_products: + pprod = partial_products[-1] + else: + pprod = ctx.one + for k in indices: + pprod = pprod * f(a + ctx.mpf(k)) + partial_products.append(pprod) + + return +ctx.adaptive_extrapolation(update, None, kwargs) + + +@defun +def limit(ctx, f, x, direction=1, exp=False, **kwargs): + r""" + Computes an estimate of the limit + + .. math :: + + \lim_{t \to x} f(t) + + where `x` may be finite or infinite. + + For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for + consecutive integer values of `n`, where the approach direction + `d` may be specified using the *direction* keyword argument. + For infinite `x`, :func:`~mpmath.limit` evaluates values of + `f(\mathrm{sign}(x) \cdot n)`. + + If the approach to the limit is not sufficiently fast to give + an accurate estimate directly, :func:`~mpmath.limit` attempts to find + the limit using Richardson extrapolation or the Shanks + transformation. You can select between these methods using + the *method* keyword (see documentation of :func:`~mpmath.nsum` for + more information). + + **Options** + + The following options are available with essentially the + same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*, + *steps*, *verbose*. + + If the option *exp=True* is set, `f` will be + sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots` + instead of the linearly spaced points `n = 1, 2, 3, \ldots`. + This can sometimes improve the rate of convergence so that + :func:`~mpmath.limit` may return a more accurate answer (and faster). + However, do note that this can only be used if `f` + supports fast and accurate evaluation for arguments that + are extremely close to the limit point (or if infinite, + very large arguments). + + **Examples** + + A basic evaluation of a removable singularity:: + + >>> from mpmath import * + >>> mp.dps = 30; mp.pretty = True + >>> limit(lambda x: (x-sin(x))/x**3, 0) + 0.166666666666666666666666666667 + + Computing the exponential function using its limit definition:: + + >>> limit(lambda n: (1+3/n)**n, inf) + 20.0855369231876677409285296546 + >>> exp(3) + 20.0855369231876677409285296546 + + A limit for `\pi`:: + + >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2 + >>> limit(f, inf) + 3.14159265358979323846264338328 + + Calculating the coefficient in Stirling's formula:: + + >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf) + 2.50662827463100050241576528481 + >>> sqrt(2*pi) + 2.50662827463100050241576528481 + + Evaluating Euler's constant `\gamma` using the limit representation + + .. math :: + + \gamma = \lim_{n \rightarrow \infty } \left[ \left( + \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right] + + (which converges notoriously slowly):: + + >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n) + >>> limit(f, inf) + 0.577215664901532860606512090082 + >>> +euler + 0.577215664901532860606512090082 + + With default settings, the following limit converges too slowly + to be evaluated accurately. Changing to exponential sampling + however gives a perfect result:: + + >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x) + >>> limit(f, inf) + 0.992831158558330281129249686491 + >>> limit(f, inf, exp=True) + 1.0 + + """ + + if ctx.isinf(x): + direction = ctx.sign(x) + g = lambda k: f(ctx.mpf(k+1)*direction) + else: + direction *= ctx.one + g = lambda k: f(x + direction/(k+1)) + if exp: + h = g + g = lambda k: h(2**k) + + def update(values, indices): + for k in indices: + values.append(g(k+1)) + + # XXX: steps used by nsum don't work well + if not 'steps' in kwargs: + kwargs['steps'] = [10] + + return +ctx.adaptive_extrapolation(update, None, kwargs) diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/inverselaplace.py b/.venv/lib/python3.11/site-packages/mpmath/calculus/inverselaplace.py new file mode 100644 index 0000000000000000000000000000000000000000..d2206b05c1601ee781b09dcbedf3c0fcd89cfa59 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/calculus/inverselaplace.py @@ -0,0 +1,973 @@ +# contributed to mpmath by Kristopher L. Kuhlman, February 2017 +# contributed to mpmath by Guillermo Navas-Palencia, February 2022 + +class InverseLaplaceTransform(object): + r""" + Inverse Laplace transform methods are implemented using this + class, in order to simplify the code and provide a common + infrastructure. + + Implement a custom inverse Laplace transform algorithm by + subclassing :class:`InverseLaplaceTransform` and implementing the + appropriate methods. The subclass can then be used by + :func:`~mpmath.invertlaplace` by passing it as the *method* + argument. + """ + + def __init__(self, ctx): + self.ctx = ctx + + def calc_laplace_parameter(self, t, **kwargs): + r""" + Determine the vector of Laplace parameter values needed for an + algorithm, this will depend on the choice of algorithm (de + Hoog is default), the algorithm-specific parameters passed (or + default ones), and desired time. + """ + raise NotImplementedError + + def calc_time_domain_solution(self, fp): + r""" + Compute the time domain solution, after computing the + Laplace-space function evaluations at the abscissa required + for the algorithm. Abscissa computed for one algorithm are + typically not useful for another algorithm. + """ + raise NotImplementedError + + +class FixedTalbot(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""The "fixed" Talbot method deforms the Bromwich contour towards + `-\infty` in the shape of a parabola. Traditionally the Talbot + algorithm has adjustable parameters, but the "fixed" version + does not. The `r` parameter could be passed in as a parameter, + if you want to override the default given by (Abate & Valko, + 2004). + + The Laplace parameter is sampled along a parabola opening + along the negative imaginary axis, with the base of the + parabola along the real axis at + `p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in + the approximation (degree) grows, the abscissa required for + function evaluation tend towards `-\infty`, requiring high + precision to prevent overflow. If any poles, branch cuts or + other singularities exist such that the deformed Bromwich + contour lies to the left of the singularity, the method will + fail. + + **Optional arguments** + + :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter` + recognizes the following keywords + + *tmax* + maximum time associated with vector of times + (typically just the time requested) + *degree* + integer order of approximation (M = number of terms) + *r* + abscissa for `p_0` (otherwise computed using rule + of thumb `2M/5`) + + The working precision will be increased according to a rule of + thumb. If 'degree' is not specified, the working precision and + degree are chosen to hopefully achieve the dps of the calling + context. If 'degree' is specified, the working precision is + chosen to achieve maximum resulting precision for the + specified degree. + + .. math :: + + p_0=\frac{r}{t} + + .. math :: + + p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left( + \frac{i\pi}{M}\right) + j \right] \qquad 1\le i 0: + self.degree += 1 + + M = self.degree + + # this is adjusting the dps of the calling context + # hopefully the caller doesn't monkey around with it + # between calling this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + self.V = self._coeff() + self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t + + # NB: p is real (mpf) + + def _coeff(self): + r"""Salzer summation weights (aka, "Stehfest coefficients") + only depend on the approximation order (M) and the precision""" + + M = self.degree + M2 = int(M/2) # checked earlier that M is even + + V = self.ctx.matrix(M, 1) + + # Salzer summation weights + # get very large in magnitude and oscillate in sign, + # if the precision is not high enough, there will be + # catastrophic cancellation + for k in range(1, M+1): + z = self.ctx.matrix(min(k, M2)+1, 1) + for j in range(int((k+1)/2), min(k, M2)+1): + z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/ + (self.ctx.fac(M2-j)*self.ctx.fac(j)* + self.ctx.fac(j-1)*self.ctx.fac(k-j)* + self.ctx.fac(2*j-k))) + V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z) + + return V + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Compute time-domain Stehfest algorithm solution. + + .. math :: + + f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left( + p_k \right) + + where + + .. math :: + + V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor} + \frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \, + \left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!} + + As the degree increases, the abscissa (`p_k`) only increase + linearly towards `\infty`, but the Stehfest coefficients + (`V_k`) alternate in sign and increase rapidly in sign, + requiring high precision to prevent overflow or loss of + significance when evaluating the sum. + + **References** + + 1. Widder, D. (1941). *The Laplace Transform*. Princeton. + 2. Stehfest, H. (1970). Algorithm 368: numerical inversion of + Laplace transforms. *Communications of the ACM* 13(1):47-49, + http://dx.doi.org/10.1145/361953.361969 + + """ + + # required + self.t = self.ctx.convert(t) + + # assume fp was computed from p matrix returned from + # calc_laplace_parameter(), so is already + # a list or matrix of mpmath 'mpf' types + + result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t + + # setting dps back to value when calc_laplace_parameter was called + if not manual_prec: + self.ctx.dps = self.dps_orig + + # ignore any small imaginary part + return result.real + + +# **************************************** + +class deHoog(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""the de Hoog, Knight & Stokes algorithm is an + accelerated form of the Fourier series numerical + inverse Laplace transform algorithms. + + .. math :: + + p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1 + + where + + .. math :: + + \gamma = \alpha - \frac{\log \mathrm{tol}}{2T}, + + `j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time, + `\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the + rightmost pole or singularity, which is chosen based on the + desired accuracy (assuming the rightmost singularity is 0), + and `\mathrm{tol}=10\alpha` is the desired tolerance, which is + chosen in relation to `\alpha`.` + + When increasing the degree, the abscissa increase towards + `j\infty`, but more slowly than the fixed Talbot + algorithm. The de Hoog et al. algorithm typically does better + with oscillatory functions of time, and less well-behaved + functions. The method tends to be slower than the Talbot and + Stehfest algorithsm, especially so at very high precision + (e.g., `>500` digits precision). + + """ + + # required + # ------------------------------ + self.t = self.ctx.convert(t) + + # optional + # ------------------------------ + self.tmax = kwargs.get('tmax', self.t) + + # empirical relationships used here based on a linear fit of + # requested and delivered dps for exponentially decaying time + # functions for requested dps up to 512. + + if 'degree' in kwargs: + self.degree = kwargs['degree'] + self.dps_goal = int(1.38*self.degree) + else: + self.dps_goal = int(self.ctx.dps*1.36) + self.degree = max(10, self.dps_goal) + + # 2*M+1 terms in approximation + M = self.degree + + # adjust alpha component of abscissa of convergence for higher + # precision + tmp = self.ctx.power(10.0, -self.dps_goal) + self.alpha = self.ctx.convert(kwargs.get('alpha', tmp)) + + # desired tolerance (here simply related to alpha) + self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0)) + self.np = 2*self.degree+1 # number of terms in approximation + + # this is adjusting the dps of the calling context + # hopefully the caller doesn't monkey around with it + # between calling this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + # scaling factor (likely tun-able, but 2 is typical) + self.scale = kwargs.get('scale', 2) + self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax)) + + self.p = self.ctx.matrix(2*M+1, 1) + self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T) + self.p = (self.gamma + self.ctx.pi* + self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j) + + # NB: p is complex (mpc) + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Calculate time-domain solution for + de Hoog, Knight & Stokes algorithm. + + The un-accelerated Fourier series approach is: + + .. math :: + + f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'} + \Re\left[\bar{f}\left( p_k \right) + e^{i\pi t/T} \right], + + where the prime on the summation indicates the first term is halved. + + This simplistic approach requires so many function evaluations + that it is not practical. Non-linear acceleration is + accomplished via Pade-approximation and an analytic expression + for the remainder of the continued fraction. See the original + paper (reference 2 below) a detailed description of the + numerical approach. + + **References** + + 1. Davies, B. (2005). *Integral Transforms and their + Applications*, Third Edition. Springer. + 2. de Hoog, F., J. Knight, A. Stokes (1982). An improved + method for numerical inversion of Laplace transforms. *SIAM + Journal of Scientific and Statistical Computing* 3:357-366, + http://dx.doi.org/10.1137/0903022 + + """ + + M = self.degree + np = self.np + T = self.T + + self.t = self.ctx.convert(t) + + # would it be useful to try re-using + # space between e&q and A&B? + e = self.ctx.zeros(np, M+1) + q = self.ctx.matrix(2*M, M) + d = self.ctx.matrix(np, 1) + A = self.ctx.zeros(np+1, 1) + B = self.ctx.ones(np+1, 1) + + # initialize Q-D table + e[:, 0] = 0.0 + 0j + q[0, 0] = fp[1]/(fp[0]/2) + for i in range(1, 2*M): + q[i, 0] = fp[i+1]/fp[i] + + # rhombus rule for filling triangular Q-D table (e & q) + for r in range(1, M+1): + # start with e, column 1, 0:2*M-2 + mr = 2*(M-r) + 1 + e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1] + if not r == M: + rq = r+1 + mr = 2*(M-rq)+1 + 2 + for i in range(mr): + q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1] + + # build up continued fraction coefficients (d) + d[0] = fp[0]/2 + for r in range(1, M+1): + d[2*r-1] = -q[0, r-1] # even terms + d[2*r] = -e[0, r] # odd terms + + # seed A and B for recurrence + A[0] = 0.0 + 0.0j + A[1] = d[0] + B[0:2] = 1.0 + 0.0j + + # base of the power series + z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn + + # coefficients of Pade approximation (A & B) + # using recurrence for all but last term + for i in range(1, 2*M): + A[i+1] = A[i] + d[i]*A[i-1]*z + B[i+1] = B[i] + d[i]*B[i-1]*z + + # "improved remainder" to continued fraction + brem = (1 + (d[2*M-1] - d[2*M])*z)/2 + # powm1(x,y) computes x^y - 1 more accurately near zero + rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem, + self.ctx.fraction(1, 2)) + + # last term of recurrence using new remainder + A[np] = A[2*M] + rem*A[2*M-1] + B[np] = B[2*M] + rem*B[2*M-1] + + # diagonal Pade approximation + # F=A/B represents accelerated trapezoid rule + result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real + + # setting dps back to value when calc_laplace_parameter was called + if not manual_prec: + self.ctx.dps = self.dps_orig + + return result + + +# **************************************** + +class Cohen(InverseLaplaceTransform): + + def calc_laplace_parameter(self, t, **kwargs): + r"""The Cohen algorithm accelerates the convergence of the nearly + alternating series resulting from the application of the trapezoidal + rule to the Bromwich contour inversion integral. + + .. math :: + + p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M + + where + + .. math :: + + \gamma = \frac{2}{3} (d + \log(10) + \log(2 t)), + + `d = \mathrm{dps\_goal}`, which is chosen based on the desired + accuracy using the method developed in [1] to improve numerical + stability. The Cohen algorithm shows robustness similar to the de Hoog + et al. algorithm, but it is faster than the fixed Talbot algorithm. + + **Optional arguments** + + *degree* + integer order of the approximation (M = number of terms) + *alpha* + abscissa for `p_0` (controls the discretization error) + + The working precision will be increased according to a rule of + thumb. If 'degree' is not specified, the working precision and + degree are chosen to hopefully achieve the dps of the calling + context. If 'degree' is specified, the working precision is + chosen to achieve maximum resulting precision for the + specified degree. + + **References** + + 1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss + distribution in the Gaussian copula model: a comparison of methods. + *Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057 + + """ + self.t = self.ctx.convert(t) + + if 'degree' in kwargs: + self.degree = kwargs['degree'] + self.dps_goal = int(1.5 * self.degree) + else: + self.dps_goal = int(self.ctx.dps * 1.74) + self.degree = max(22, int(1.31 * self.dps_goal)) + + M = self.degree + 1 + + # this is adjusting the dps of the calling context hopefully + # the caller doesn't monkey around with it between calling + # this routine and calc_time_domain_solution() + self.dps_orig = self.ctx.dps + self.ctx.dps = self.dps_goal + + ttwo = 2 * self.t + tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo) + tmp = self.ctx.fraction(2, 3) * tmp + self.alpha = self.ctx.convert(kwargs.get('alpha', tmp)) + + # all but time-dependent part of p + a_t = self.alpha / ttwo + p_t = self.ctx.pi * 1j / self.t + + self.p = self.ctx.matrix(M, 1) + self.p[0] = a_t + + for i in range(1, M): + self.p[i] = a_t + i * p_t + + def calc_time_domain_solution(self, fp, t, manual_prec=False): + r"""Calculate time-domain solution for Cohen algorithm. + + The accelerated nearly alternating series is: + + .. math :: + + f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2} + \Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) - + \sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f} + \left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right], + + where coefficients `\frac{c_{M, k}}{d_M}` are described in [1]. + + 1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence + acceleration of alternating series. *Experiment. Math* 9(1):3-12 + + """ + self.t = self.ctx.convert(t) + + n = self.degree + M = n + 1 + + A = self.ctx.matrix(M, 1) + for i in range(M): + A[i] = fp[i].real + + d = (3 + self.ctx.sqrt(8)) ** n + d = (d + 1 / d) / 2 + b = -self.ctx.one + c = -d + s = 0 + + for k in range(n): + c = b - c + s = s + c * A[k + 1] + b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one)) + + result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d) + + # setting dps back to value when calc_laplace_parameter was + # called, unless flag is set. + if not manual_prec: + self.ctx.dps = self.dps_orig + + return result + + +# **************************************** + +class LaplaceTransformInversionMethods(object): + def __init__(ctx, *args, **kwargs): + ctx._fixed_talbot = FixedTalbot(ctx) + ctx._stehfest = Stehfest(ctx) + ctx._de_hoog = deHoog(ctx) + ctx._cohen = Cohen(ctx) + + def invertlaplace(ctx, f, t, **kwargs): + r"""Computes the numerical inverse Laplace transform for a + Laplace-space function at a given time. The function being + evaluated is assumed to be a real-valued function of time. + + The user must supply a Laplace-space function `\bar{f}(p)`, + and a desired time at which to estimate the time-domain + solution `f(t)`. + + A few basic examples of Laplace-space functions with known + inverses (see references [1,2]) : + + .. math :: + + \mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p) + + .. math :: + + \mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t) + + .. math :: + + \bar{f}(p) = \frac{1}{(p+1)^2} + + .. math :: + + f(t) = t e^{-t} + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> tt = [0.001, 0.01, 0.1, 1, 10] + >>> fp = lambda p: 1/(p+1)**2 + >>> ft = lambda t: t*exp(-t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot') + (0.000999000499833375, 8.57923043561212e-20) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot') + (0.00990049833749168, 3.27007646698047e-19) + >>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot') + (0.090483741803596, -1.75215800052168e-18) + >>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot') + (0.367879441171442, 1.2428864009344e-17) + >>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot') + (0.000453999297624849, 4.04513489306658e-20) + + The methods also work for higher precision: + + >>> mp.dps = 100; mp.pretty = True + >>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15) + ('0.000999000499833375', '-4.96868310693356e-105') + >>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15) + ('0.00990049833749168', '1.23032291513122e-104') + + .. math :: + + \bar{f}(p) = \frac{1}{p^2+1} + + .. math :: + + f(t) = \mathrm{J}_0(t) + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: 1/sqrt(p*p + 1) + >>> ft = lambda t: besselj(0,t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog') + (0.999999750000016, -6.09717765032273e-18) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog') + (0.99997500015625, -5.61756281076169e-17) + + .. math :: + + \bar{f}(p) = \frac{\log p}{p} + + .. math :: + + f(t) = -\gamma -\log t + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: log(p)/p + >>> ft = lambda t: -euler-log(t) + >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest') + (6.3305396140806, -1.92126634837863e-16) + >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest') + (4.02795452108656, -4.81486093200704e-16) + + **Options** + + :func:`~mpmath.invertlaplace` recognizes the following optional + keywords valid for all methods: + + *method* + Chooses numerical inverse Laplace transform algorithm + (described below). + *degree* + Number of terms used in the approximation + + **Algorithms** + + Mpmath implements four numerical inverse Laplace transform + algorithms, attributed to: Talbot, Stehfest, and de Hoog, + Knight and Stokes. These can be selected by using + *method='talbot'*, *method='stehfest'*, *method='dehoog'* or + *method='cohen'* or by passing the classes *method=FixedTalbot*, + *method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions + :func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`, + :func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen` + are also available as shortcuts. + + All four algorithms implement a heuristic balance between the + requested precision and the precision used internally for the + calculations. This has been tuned for a typical exponentially + decaying function and precision up to few hundred decimal + digits. + + The Laplace transform converts the variable time (i.e., along + a line) into a parameter given by the right half of the + complex `p`-plane. Singularities, poles, and branch cuts in + the complex `p`-plane contain all the information regarding + the time behavior of the corresponding function. Any numerical + method must therefore sample `p`-plane "close enough" to the + singularities to accurately characterize them, while not + getting too close to have catastrophic cancellation, overflow, + or underflow issues. Most significantly, if one or more of the + singularities in the `p`-plane is not on the left side of the + Bromwich contour, its effects will be left out of the computed + solution, and the answer will be completely wrong. + + *Talbot* + + The fixed Talbot method is high accuracy and fast, but the + method can catastrophically fail for certain classes of time-domain + behavior, including a Heaviside step function for positive + time (e.g., `H(t-2)`), or some oscillatory behaviors. The + Talbot method usually has adjustable parameters, but the + "fixed" variety implemented here does not. This method + deforms the Bromwich integral contour in the shape of a + parabola towards `-\infty`, which leads to problems + when the solution has a decaying exponential in it (e.g., a + Heaviside step function is equivalent to multiplying by a + decaying exponential in Laplace space). + + *Stehfest* + + The Stehfest algorithm only uses abscissa along the real axis + of the complex `p`-plane to estimate the time-domain + function. Oscillatory time-domain functions have poles away + from the real axis, so this method does not work well with + oscillatory functions, especially high-frequency ones. This + method also depends on summation of terms in a series that + grows very large, and will have catastrophic cancellation + during summation if the working precision is too low. + + *de Hoog et al.* + + The de Hoog, Knight, and Stokes method is essentially a + Fourier-series quadrature-type approximation to the Bromwich + contour integral, with non-linear series acceleration and an + analytical expression for the remainder term. This method is + typically one of the most robust. This method also involves the + greatest amount of overhead, so it is typically the slowest of the + four methods at high precision. + + *Cohen* + + The Cohen method is a trapezoidal rule approximation to the Bromwich + contour integral, with linear acceleration for alternating + series. This method is as robust as the de Hoog et al method and the + fastest of the four methods at high precision, and is therefore the + default method. + + **Singularities** + + All numerical inverse Laplace transform methods have problems + at large time when the Laplace-space function has poles, + singularities, or branch cuts to the right of the origin in + the complex plane. For simple poles in `\bar{f}(p)` at the + `p`-plane origin, the time function is constant in time (e.g., + `\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at + `p=0`). A pole in `\bar{f}(p)` to the left of the origin is a + decreasing function of time (e.g., `\mathcal{L}\left\lbrace + e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and + a pole to the right of the origin leads to an increasing + function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4} + \right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When + singularities occur off the real `p` axis, the time-domain + function is oscillatory. For example `\mathcal{L}\left\lbrace + \mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut + starting at `p=j=\sqrt{-1}` and is a decaying oscillatory + function, This range of behaviors is illustrated in Duffy [3] + Figure 4.10.4, p. 228. + + In general as `p \rightarrow \infty` `t \rightarrow 0` and + vice-versa. All numerical inverse Laplace transform methods + require their abscissa to shift closer to the origin for + larger times. If the abscissa shift left of the rightmost + singularity in the Laplace domain, the answer will be + completely wrong (the effect of singularities to the right of + the Bromwich contour are not included in the results). + + For example, the following exponentially growing function has + a pole at `p=3`: + + .. math :: + + \bar{f}(p)=\frac{1}{p^2-9} + + .. math :: + + f(t)=\frac{1}{3}\sinh 3t + + >>> mp.dps = 15; mp.pretty = True + >>> fp = lambda p: 1/(p*p-9) + >>> ft = lambda t: sinh(3*t)/3 + >>> tt = [0.01,0.1,1.0,10.0] + >>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot') + (0.0100015000675014, 0.0100015000675014) + >>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot') + (0.101506764482381, 0.101506764482381) + >>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot') + (3.33929164246997, 3.33929164246997) + >>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot') + (1781079096920.74, -1.61331069624091e-14) + + **References** + + 1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4) + 2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform + Inversion, Springer. + 3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press. + + **Numerical Inverse Laplace Transform Reviews** + + 1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical + inversion of the Laplace transform: Applications to Biology, + Economics, Engineering, and Physics*. Elsevier. + 2. Davies, B., B. Martin (1979). Numerical inversion of the + Laplace transform: a survey and comparison of methods. *Journal + of Computational Physics* 33:1-32, + http://dx.doi.org/10.1016/0021-9991(79)90025-1 + 3. Duffy, D.G. (1993). On the numerical inversion of Laplace + transforms: Comparison of three new methods on characteristic + problems from applications. *ACM Transactions on Mathematical + Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788 + 4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform + Algorithms for Laplace-Space Numerical Approaches, *Numerical + Algorithms*, 63(2):339-355. + http://dx.doi.org/10.1007/s11075-012-9625-3 + + """ + + rule = kwargs.get('method', 'cohen') + if type(rule) is str: + lrule = rule.lower() + if lrule == 'talbot': + rule = ctx._fixed_talbot + elif lrule == 'stehfest': + rule = ctx._stehfest + elif lrule == 'dehoog': + rule = ctx._de_hoog + elif rule == 'cohen': + rule = ctx._cohen + else: + raise ValueError("unknown invlap algorithm: %s" % rule) + else: + rule = rule(ctx) + + # determine the vector of Laplace-space parameter + # needed for the requested method and desired time + rule.calc_laplace_parameter(t, **kwargs) + + # compute the Laplace-space function evalutations + # at the required abscissa. + fp = [f(p) for p in rule.p] + + # compute the time-domain solution from the + # Laplace-space function evaluations + return rule.calc_time_domain_solution(fp, t) + + # shortcuts for the above function for specific methods + def invlaptalbot(ctx, *args, **kwargs): + kwargs['method'] = 'talbot' + return ctx.invertlaplace(*args, **kwargs) + + def invlapstehfest(ctx, *args, **kwargs): + kwargs['method'] = 'stehfest' + return ctx.invertlaplace(*args, **kwargs) + + def invlapdehoog(ctx, *args, **kwargs): + kwargs['method'] = 'dehoog' + return ctx.invertlaplace(*args, **kwargs) + + def invlapcohen(ctx, *args, **kwargs): + kwargs['method'] = 'cohen' + return ctx.invertlaplace(*args, **kwargs) + + +# **************************************** + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/.venv/lib/python3.11/site-packages/mpmath/calculus/polynomials.py b/.venv/lib/python3.11/site-packages/mpmath/calculus/polynomials.py new file mode 100644 index 0000000000000000000000000000000000000000..ba75c1e88cbc5d40aa590a786c0af5229f193103 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/calculus/polynomials.py @@ -0,0 +1,213 @@ +from ..libmp.backend import xrange +from .calculus import defun + +#----------------------------------------------------------------------------# +# Polynomials # +#----------------------------------------------------------------------------# + +# XXX: extra precision +@defun +def polyval(ctx, coeffs, x, derivative=False): + r""" + Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`, + :func:`~mpmath.polyval` evaluates the polynomial + + .. math :: + + P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0. + + If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously + evaluates `P(x)` with the derivative, `P'(x)`, and returns the + tuple `(P(x), P'(x))`. + + >>> from mpmath import * + >>> mp.pretty = True + >>> polyval([3, 0, 2], 0.5) + 2.75 + >>> polyval([3, 0, 2], 0.5, derivative=True) + (2.75, 3.0) + + The coefficients and the evaluation point may be any combination + of real or complex numbers. + """ + if not coeffs: + return ctx.zero + p = ctx.convert(coeffs[0]) + q = ctx.zero + for c in coeffs[1:]: + if derivative: + q = p + x*q + p = c + x*p + if derivative: + return p, q + else: + return p + +@defun +def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10, + error=False, roots_init=None): + """ + Computes all roots (real or complex) of a given polynomial. + + The roots are returned as a sorted list, where real roots appear first + followed by complex conjugate roots as adjacent elements. The polynomial + should be given as a list of coefficients, in the format used by + :func:`~mpmath.polyval`. The leading coefficient must be nonzero. + + With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)* + where *err* is an estimate of the maximum error among the computed roots. + + **Examples** + + Finding the three real roots of `x^3 - x^2 - 14x + 24`:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> nprint(polyroots([1,-1,-14,24]), 4) + [-4.0, 2.0, 3.0] + + Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an + error estimate:: + + >>> roots, err = polyroots([4,3,2], error=True) + >>> for r in roots: + ... print(r) + ... + (-0.375 + 0.59947894041409j) + (-0.375 - 0.59947894041409j) + >>> + >>> err + 2.22044604925031e-16 + >>> + >>> polyval([4,3,2], roots[0]) + (2.22044604925031e-16 + 0.0j) + >>> polyval([4,3,2], roots[1]) + (2.22044604925031e-16 + 0.0j) + + The following example computes all the 5th roots of unity; that is, + the roots of `x^5 - 1`:: + + >>> mp.dps = 20 + >>> for r in polyroots([1, 0, 0, 0, 0, -1]): + ... print(r) + ... + 1.0 + (-0.8090169943749474241 + 0.58778525229247312917j) + (-0.8090169943749474241 - 0.58778525229247312917j) + (0.3090169943749474241 + 0.95105651629515357212j) + (0.3090169943749474241 - 0.95105651629515357212j) + + **Precision and conditioning** + + The roots are computed to the current working precision accuracy. If this + accuracy cannot be achieved in ``maxsteps`` steps, then a + ``NoConvergence`` exception is raised. The algorithm internally is using + the current working precision extended by ``extraprec``. If + ``NoConvergence`` was raised, that is caused either by not having enough + extra precision to achieve convergence (in which case increasing + ``extraprec`` should fix the problem) or too low ``maxsteps`` (in which + case increasing ``maxsteps`` should fix the problem), or a combination of + both. + + The user should always do a convergence study with regards to + ``extraprec`` to ensure accurate results. It is possible to get + convergence to a wrong answer with too low ``extraprec``. + + Provided there are no repeated roots, :func:`~mpmath.polyroots` can + typically compute all roots of an arbitrary polynomial to high precision:: + + >>> mp.dps = 60 + >>> for r in polyroots([1, 0, -10, 0, 1]): + ... print(r) + ... + -3.14626436994197234232913506571557044551247712918732870123249 + -0.317837245195782244725757617296174288373133378433432554879127 + 0.317837245195782244725757617296174288373133378433432554879127 + 3.14626436994197234232913506571557044551247712918732870123249 + >>> + >>> sqrt(3) + sqrt(2) + 3.14626436994197234232913506571557044551247712918732870123249 + >>> sqrt(3) - sqrt(2) + 0.317837245195782244725757617296174288373133378433432554879127 + + **Algorithm** + + :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which + uses complex arithmetic to locate all roots simultaneously. + The Durand-Kerner method can be viewed as approximately performing + simultaneous Newton iteration for all the roots. In particular, + the convergence to simple roots is quadratic, just like Newton's + method. + + Although all roots are internally calculated using complex arithmetic, any + root found to have an imaginary part smaller than the estimated numerical + error is truncated to a real number (small real parts are also chopped). + Real roots are placed first in the returned list, sorted by value. The + remaining complex roots are sorted by their real parts so that conjugate + roots end up next to each other. + + **References** + + 1. http://en.wikipedia.org/wiki/Durand-Kerner_method + + """ + if len(coeffs) <= 1: + if not coeffs or not coeffs[0]: + raise ValueError("Input to polyroots must not be the zero polynomial") + # Constant polynomial with no roots + return [] + + orig = ctx.prec + tol = +ctx.eps + with ctx.extraprec(extraprec): + deg = len(coeffs) - 1 + # Must be monic + lead = ctx.convert(coeffs[0]) + if lead == 1: + coeffs = [ctx.convert(c) for c in coeffs] + else: + coeffs = [c/lead for c in coeffs] + f = lambda x: ctx.polyval(coeffs, x) + if roots_init is None: + roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)] + else: + roots = [None]*deg; + deg_init = min(deg, len(roots_init)) + roots[:deg_init] = list(roots_init[:deg_init]) + roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n + in xrange(deg_init,deg)] + err = [ctx.one for n in xrange(deg)] + # Durand-Kerner iteration until convergence + for step in xrange(maxsteps): + if abs(max(err)) < tol: + break + for i in xrange(deg): + p = roots[i] + x = f(p) + for j in range(deg): + if i != j: + try: + x /= (p-roots[j]) + except ZeroDivisionError: + continue + roots[i] = p - x + err[i] = abs(x) + if abs(max(err)) >= tol: + raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \ + % maxsteps) + # Remove small real or imaginary parts + if cleanup: + for i in xrange(deg): + if abs(roots[i]) < tol: + roots[i] = ctx.zero + elif abs(ctx._im(roots[i])) < tol: + roots[i] = roots[i].real + elif abs(ctx._re(roots[i])) < tol: + roots[i] = roots[i].imag * 1j + roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x))) + if error: + err = max(err) + err = max(err, ctx.ldexp(1, -orig+1)) + return [+r for r in roots], +err + else: + return [+r for r in roots] diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/__init__.py b/.venv/lib/python3.11/site-packages/mpmath/matrices/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..293697b9fcf8bd82d58ac4ff45acd73fadac82f9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/matrices/__init__.py @@ -0,0 +1,2 @@ +from . import eigen # to set methods +from . import eigen_symmetric # to set methods diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..560dd29bd3753b9b797ebc9e93c2b238961bf319 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/calculus.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/calculus.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ed3f22aa011b7b18da284a2b7d7115b4461d1e0d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/calculus.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5ce357dded445f1a0e70c8bdbf45558fbb20fa40 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..645ed864c9b6947a8445a9f215c2227d3e052553 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/linalg.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/linalg.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..fb604eda8ee057f46975ee58a28fc78db2df2f33 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/linalg.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/matrices.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/matrices.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd9726a0083423ec0600b22e8dc3b23a1e213b31 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/matrices.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/calculus.py b/.venv/lib/python3.11/site-packages/mpmath/matrices/calculus.py new file mode 100644 index 0000000000000000000000000000000000000000..7fae2a7a9a29898241ed41810331b480ff70798f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/matrices/calculus.py @@ -0,0 +1,531 @@ +from ..libmp.backend import xrange + +# TODO: should use diagonalization-based algorithms + +class MatrixCalculusMethods(object): + + def _exp_pade(ctx, a): + """ + Exponential of a matrix using Pade approximants. + + See G. H. Golub, C. F. van Loan 'Matrix Computations', + third Ed., page 572 + + TODO: + - find a good estimate for q + - reduce the number of matrix multiplications to improve + performance + """ + def eps_pade(p): + return ctx.mpf(2)**(3-2*p) * \ + ctx.factorial(p)**2/(ctx.factorial(2*p)**2 * (2*p + 1)) + q = 4 + extraq = 8 + while 1: + if eps_pade(q) < ctx.eps: + break + q += 1 + q += extraq + j = int(max(1, ctx.mag(ctx.mnorm(a,'inf')))) + extra = q + prec = ctx.prec + ctx.dps += extra + 3 + try: + a = a/2**j + na = a.rows + den = ctx.eye(na) + num = ctx.eye(na) + x = ctx.eye(na) + c = ctx.mpf(1) + for k in range(1, q+1): + c *= ctx.mpf(q - k + 1)/((2*q - k + 1) * k) + x = a*x + cx = c*x + num += cx + den += (-1)**k * cx + f = ctx.lu_solve_mat(den, num) + for k in range(j): + f = f*f + finally: + ctx.prec = prec + return f*1 + + def expm(ctx, A, method='taylor'): + r""" + Computes the matrix exponential of a square matrix `A`, which is defined + by the power series + + .. math :: + + \exp(A) = I + A + \frac{A^2}{2!} + \frac{A^3}{3!} + \ldots + + With method='taylor', the matrix exponential is computed + using the Taylor series. With method='pade', Pade approximants + are used instead. + + **Examples** + + Basic examples:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> expm(zeros(3)) + [1.0 0.0 0.0] + [0.0 1.0 0.0] + [0.0 0.0 1.0] + >>> expm(eye(3)) + [2.71828182845905 0.0 0.0] + [ 0.0 2.71828182845905 0.0] + [ 0.0 0.0 2.71828182845905] + >>> expm([[1,1,0],[1,0,1],[0,1,0]]) + [ 3.86814500615414 2.26812870852145 0.841130841230196] + [ 2.26812870852145 2.44114713886289 1.42699786729125] + [0.841130841230196 1.42699786729125 1.6000162976327] + >>> expm([[1,1,0],[1,0,1],[0,1,0]], method='pade') + [ 3.86814500615414 2.26812870852145 0.841130841230196] + [ 2.26812870852145 2.44114713886289 1.42699786729125] + [0.841130841230196 1.42699786729125 1.6000162976327] + >>> expm([[1+j, 0], [1+j,1]]) + [(1.46869393991589 + 2.28735528717884j) 0.0] + [ (1.03776739863568 + 3.536943175722j) (2.71828182845905 + 0.0j)] + + Matrices with large entries are allowed:: + + >>> expm(matrix([[1,2],[2,3]])**25) + [5.65024064048415e+2050488462815550 9.14228140091932e+2050488462815550] + [9.14228140091932e+2050488462815550 1.47925220414035e+2050488462815551] + + The identity `\exp(A+B) = \exp(A) \exp(B)` does not hold for + noncommuting matrices:: + + >>> A = hilbert(3) + >>> B = A + eye(3) + >>> chop(mnorm(A*B - B*A)) + 0.0 + >>> chop(mnorm(expm(A+B) - expm(A)*expm(B))) + 0.0 + >>> B = A + ones(3) + >>> mnorm(A*B - B*A) + 1.8 + >>> mnorm(expm(A+B) - expm(A)*expm(B)) + 42.0927851137247 + + """ + if method == 'pade': + prec = ctx.prec + try: + A = ctx.matrix(A) + ctx.prec += 2*A.rows + res = ctx._exp_pade(A) + finally: + ctx.prec = prec + return res + A = ctx.matrix(A) + prec = ctx.prec + j = int(max(1, ctx.mag(ctx.mnorm(A,'inf')))) + j += int(0.5*prec**0.5) + try: + ctx.prec += 10 + 2*j + tol = +ctx.eps + A = A/2**j + T = A + Y = A**0 + A + k = 2 + while 1: + T *= A * (1/ctx.mpf(k)) + if ctx.mnorm(T, 'inf') < tol: + break + Y += T + k += 1 + for k in xrange(j): + Y = Y*Y + finally: + ctx.prec = prec + Y *= 1 + return Y + + def cosm(ctx, A): + r""" + Gives the cosine of a square matrix `A`, defined in analogy + with the matrix exponential. + + Examples:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> X = eye(3) + >>> cosm(X) + [0.54030230586814 0.0 0.0] + [ 0.0 0.54030230586814 0.0] + [ 0.0 0.0 0.54030230586814] + >>> X = hilbert(3) + >>> cosm(X) + [ 0.424403834569555 -0.316643413047167 -0.221474945949293] + [-0.316643413047167 0.820646708837824 -0.127183694770039] + [-0.221474945949293 -0.127183694770039 0.909236687217541] + >>> X = matrix([[1+j,-2],[0,-j]]) + >>> cosm(X) + [(0.833730025131149 - 0.988897705762865j) (1.07485840848393 - 0.17192140544213j)] + [ 0.0 (1.54308063481524 + 0.0j)] + """ + B = 0.5 * (ctx.expm(A*ctx.j) + ctx.expm(A*(-ctx.j))) + if not sum(A.apply(ctx.im).apply(abs)): + B = B.apply(ctx.re) + return B + + def sinm(ctx, A): + r""" + Gives the sine of a square matrix `A`, defined in analogy + with the matrix exponential. + + Examples:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> X = eye(3) + >>> sinm(X) + [0.841470984807897 0.0 0.0] + [ 0.0 0.841470984807897 0.0] + [ 0.0 0.0 0.841470984807897] + >>> X = hilbert(3) + >>> sinm(X) + [0.711608512150994 0.339783913247439 0.220742837314741] + [0.339783913247439 0.244113865695532 0.187231271174372] + [0.220742837314741 0.187231271174372 0.155816730769635] + >>> X = matrix([[1+j,-2],[0,-j]]) + >>> sinm(X) + [(1.29845758141598 + 0.634963914784736j) (-1.96751511930922 + 0.314700021761367j)] + [ 0.0 (0.0 - 1.1752011936438j)] + """ + B = (-0.5j) * (ctx.expm(A*ctx.j) - ctx.expm(A*(-ctx.j))) + if not sum(A.apply(ctx.im).apply(abs)): + B = B.apply(ctx.re) + return B + + def _sqrtm_rot(ctx, A, _may_rotate): + # If the iteration fails to converge, cheat by performing + # a rotation by a complex number + u = ctx.j**0.3 + return ctx.sqrtm(u*A, _may_rotate) / ctx.sqrt(u) + + def sqrtm(ctx, A, _may_rotate=2): + r""" + Computes a square root of the square matrix `A`, i.e. returns + a matrix `B = A^{1/2}` such that `B^2 = A`. The square root + of a matrix, if it exists, is not unique. + + **Examples** + + Square roots of some simple matrices:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> sqrtm([[1,0], [0,1]]) + [1.0 0.0] + [0.0 1.0] + >>> sqrtm([[0,0], [0,0]]) + [0.0 0.0] + [0.0 0.0] + >>> sqrtm([[2,0],[0,1]]) + [1.4142135623731 0.0] + [ 0.0 1.0] + >>> sqrtm([[1,1],[1,0]]) + [ (0.920442065259926 - 0.21728689675164j) (0.568864481005783 + 0.351577584254143j)] + [(0.568864481005783 + 0.351577584254143j) (0.351577584254143 - 0.568864481005783j)] + >>> sqrtm([[1,0],[0,1]]) + [1.0 0.0] + [0.0 1.0] + >>> sqrtm([[-1,0],[0,1]]) + [(0.0 - 1.0j) 0.0] + [ 0.0 (1.0 + 0.0j)] + >>> sqrtm([[j,0],[0,j]]) + [(0.707106781186547 + 0.707106781186547j) 0.0] + [ 0.0 (0.707106781186547 + 0.707106781186547j)] + + A square root of a rotation matrix, giving the corresponding + half-angle rotation matrix:: + + >>> t1 = 0.75 + >>> t2 = t1 * 0.5 + >>> A1 = matrix([[cos(t1), -sin(t1)], [sin(t1), cos(t1)]]) + >>> A2 = matrix([[cos(t2), -sin(t2)], [sin(t2), cos(t2)]]) + >>> sqrtm(A1) + [0.930507621912314 -0.366272529086048] + [0.366272529086048 0.930507621912314] + >>> A2 + [0.930507621912314 -0.366272529086048] + [0.366272529086048 0.930507621912314] + + The identity `(A^2)^{1/2} = A` does not necessarily hold:: + + >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]]) + >>> sqrtm(A**2) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> sqrtm(A)**2 + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> A = matrix([[-4,1,4],[7,-8,9],[10,2,11]]) + >>> sqrtm(A**2) + [ 7.43715112194995 -0.324127569985474 1.8481718827526] + [-0.251549715716942 9.32699765900402 2.48221180985147] + [ 4.11609388833616 0.775751877098258 13.017955697342] + >>> chop(sqrtm(A)**2) + [-4.0 1.0 4.0] + [ 7.0 -8.0 9.0] + [10.0 2.0 11.0] + + For some matrices, a square root does not exist:: + + >>> sqrtm([[0,1], [0,0]]) + Traceback (most recent call last): + ... + ZeroDivisionError: matrix is numerically singular + + Two examples from the documentation for Matlab's ``sqrtm``:: + + >>> mp.dps = 15; mp.pretty = True + >>> sqrtm([[7,10],[15,22]]) + [1.56669890360128 1.74077655955698] + [2.61116483933547 4.17786374293675] + >>> + >>> X = matrix(\ + ... [[5,-4,1,0,0], + ... [-4,6,-4,1,0], + ... [1,-4,6,-4,1], + ... [0,1,-4,6,-4], + ... [0,0,1,-4,5]]) + >>> Y = matrix(\ + ... [[2,-1,-0,-0,-0], + ... [-1,2,-1,0,-0], + ... [0,-1,2,-1,0], + ... [-0,0,-1,2,-1], + ... [-0,-0,-0,-1,2]]) + >>> mnorm(sqrtm(X) - Y) + 4.53155328326114e-19 + + """ + A = ctx.matrix(A) + # Trivial + if A*0 == A: + return A + prec = ctx.prec + if _may_rotate: + d = ctx.det(A) + if abs(ctx.im(d)) < 16*ctx.eps and ctx.re(d) < 0: + return ctx._sqrtm_rot(A, _may_rotate-1) + try: + ctx.prec += 10 + tol = ctx.eps * 128 + Y = A + Z = I = A**0 + k = 0 + # Denman-Beavers iteration + while 1: + Yprev = Y + try: + Y, Z = 0.5*(Y+ctx.inverse(Z)), 0.5*(Z+ctx.inverse(Y)) + except ZeroDivisionError: + if _may_rotate: + Y = ctx._sqrtm_rot(A, _may_rotate-1) + break + else: + raise + mag1 = ctx.mnorm(Y-Yprev, 'inf') + mag2 = ctx.mnorm(Y, 'inf') + if mag1 <= mag2*tol: + break + if _may_rotate and k > 6 and not mag1 < mag2 * 0.001: + return ctx._sqrtm_rot(A, _may_rotate-1) + k += 1 + if k > ctx.prec: + raise ctx.NoConvergence + finally: + ctx.prec = prec + Y *= 1 + return Y + + def logm(ctx, A): + r""" + Computes a logarithm of the square matrix `A`, i.e. returns + a matrix `B = \log(A)` such that `\exp(B) = A`. The logarithm + of a matrix, if it exists, is not unique. + + **Examples** + + Logarithms of some simple matrices:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> X = eye(3) + >>> logm(X) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + >>> logm(2*X) + [0.693147180559945 0.0 0.0] + [ 0.0 0.693147180559945 0.0] + [ 0.0 0.0 0.693147180559945] + >>> logm(expm(X)) + [1.0 0.0 0.0] + [0.0 1.0 0.0] + [0.0 0.0 1.0] + + A logarithm of a complex matrix:: + + >>> X = matrix([[2+j, 1, 3], [1-j, 1-2*j, 1], [-4, -5, j]]) + >>> B = logm(X) + >>> nprint(B) + [ (0.808757 + 0.107759j) (2.20752 + 0.202762j) (1.07376 - 0.773874j)] + [ (0.905709 - 0.107795j) (0.0287395 - 0.824993j) (0.111619 + 0.514272j)] + [(-0.930151 + 0.399512j) (-2.06266 - 0.674397j) (0.791552 + 0.519839j)] + >>> chop(expm(B)) + [(2.0 + 1.0j) 1.0 3.0] + [(1.0 - 1.0j) (1.0 - 2.0j) 1.0] + [ -4.0 -5.0 (0.0 + 1.0j)] + + A matrix `X` close to the identity matrix, for which + `\log(\exp(X)) = \exp(\log(X)) = X` holds:: + + >>> X = eye(3) + hilbert(3)/4 + >>> X + [ 1.25 0.125 0.0833333333333333] + [ 0.125 1.08333333333333 0.0625] + [0.0833333333333333 0.0625 1.05] + >>> logm(expm(X)) + [ 1.25 0.125 0.0833333333333333] + [ 0.125 1.08333333333333 0.0625] + [0.0833333333333333 0.0625 1.05] + >>> expm(logm(X)) + [ 1.25 0.125 0.0833333333333333] + [ 0.125 1.08333333333333 0.0625] + [0.0833333333333333 0.0625 1.05] + + A logarithm of a rotation matrix, giving back the angle of + the rotation:: + + >>> t = 3.7 + >>> A = matrix([[cos(t),sin(t)],[-sin(t),cos(t)]]) + >>> chop(logm(A)) + [ 0.0 -2.58318530717959] + [2.58318530717959 0.0] + >>> (2*pi-t) + 2.58318530717959 + + For some matrices, a logarithm does not exist:: + + >>> logm([[1,0], [0,0]]) + Traceback (most recent call last): + ... + ZeroDivisionError: matrix is numerically singular + + Logarithm of a matrix with large entries:: + + >>> logm(hilbert(3) * 10**20).apply(re) + [ 45.5597513593433 1.27721006042799 0.317662687717978] + [ 1.27721006042799 42.5222778973542 2.24003708791604] + [0.317662687717978 2.24003708791604 42.395212822267] + + """ + A = ctx.matrix(A) + prec = ctx.prec + try: + ctx.prec += 10 + tol = ctx.eps * 128 + I = A**0 + B = A + n = 0 + while 1: + B = ctx.sqrtm(B) + n += 1 + if ctx.mnorm(B-I, 'inf') < 0.125: + break + T = X = B-I + L = X*0 + k = 1 + while 1: + if k & 1: + L += T / k + else: + L -= T / k + T *= X + if ctx.mnorm(T, 'inf') < tol: + break + k += 1 + if k > ctx.prec: + raise ctx.NoConvergence + finally: + ctx.prec = prec + L *= 2**n + return L + + def powm(ctx, A, r): + r""" + Computes `A^r = \exp(A \log r)` for a matrix `A` and complex + number `r`. + + **Examples** + + Powers and inverse powers of a matrix:: + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = True + >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]]) + >>> powm(A, 2) + [ 63.0 20.0 69.0] + [174.0 89.0 199.0] + [164.0 48.0 179.0] + >>> chop(powm(powm(A, 4), 1/4.)) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> powm(extraprec(20)(powm)(A, -4), -1/4.) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> chop(powm(powm(A, 1+0.5j), 1/(1+0.5j))) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + >>> powm(extraprec(5)(powm)(A, -1.5), -1/(1.5)) + [ 4.0 1.0 4.0] + [ 7.0 8.0 9.0] + [10.0 2.0 11.0] + + A Fibonacci-generating matrix:: + + >>> powm([[1,1],[1,0]], 10) + [89.0 55.0] + [55.0 34.0] + >>> fib(10) + 55.0 + >>> powm([[1,1],[1,0]], 6.5) + [(16.5166626964253 - 0.0121089837381789j) (10.2078589271083 + 0.0195927472575932j)] + [(10.2078589271083 + 0.0195927472575932j) (6.30880376931698 - 0.0317017309957721j)] + >>> (phi**6.5 - (1-phi)**6.5)/sqrt(5) + (10.2078589271083 - 0.0195927472575932j) + >>> powm([[1,1],[1,0]], 6.2) + [ (14.3076953002666 - 0.008222855781077j) (8.81733464837593 + 0.0133048601383712j)] + [(8.81733464837593 + 0.0133048601383712j) (5.49036065189071 - 0.0215277159194482j)] + >>> (phi**6.2 - (1-phi)**6.2)/sqrt(5) + (8.81733464837593 - 0.0133048601383712j) + + """ + A = ctx.matrix(A) + r = ctx.convert(r) + prec = ctx.prec + try: + ctx.prec += 10 + if ctx.isint(r): + v = A ** int(r) + elif ctx.isint(r*2): + y = int(r*2) + v = ctx.sqrtm(A) ** y + else: + v = ctx.expm(r*ctx.logm(A)) + finally: + ctx.prec = prec + v *= 1 + return v diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/eigen.py b/.venv/lib/python3.11/site-packages/mpmath/matrices/eigen.py new file mode 100644 index 0000000000000000000000000000000000000000..885d604203195b695183329acc637de91aeaf5ea --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/matrices/eigen.py @@ -0,0 +1,877 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +################################################################################################## +# module for the eigenvalue problem +# Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) +# +# todo: +# - implement balancing +# - agressive early deflation +# +################################################################################################## + +""" +The eigenvalue problem +---------------------- + +This file contains routines for the eigenvalue problem. + +high level routines: + + hessenberg : reduction of a real or complex square matrix to upper Hessenberg form + schur : reduction of a real or complex square matrix to upper Schur form + eig : eigenvalues and eigenvectors of a real or complex square matrix + +low level routines: + + hessenberg_reduce_0 : reduction of a real or complex square matrix to upper Hessenberg form + hessenberg_reduce_1 : auxiliary routine to hessenberg_reduce_0 + qr_step : a single implicitly shifted QR step for an upper Hessenberg matrix + hessenberg_qr : Schur decomposition of an upper Hessenberg matrix + eig_tr_r : right eigenvectors of an upper triangular matrix + eig_tr_l : left eigenvectors of an upper triangular matrix +""" + +from ..libmp.backend import xrange + +class Eigen(object): + pass + +def defun(f): + setattr(Eigen, f.__name__, f) + return f + +def hessenberg_reduce_0(ctx, A, T): + """ + This routine computes the (upper) Hessenberg decomposition of a square matrix A. + Given A, an unitary matrix Q is calculated such that + + Q' A Q = H and Q' Q = Q Q' = 1 + + where H is an upper Hessenberg matrix, meaning that it only contains zeros + below the first subdiagonal. Here ' denotes the hermitian transpose (i.e. + transposition and conjugation). + + parameters: + A (input/output) On input, A contains the square matrix A of + dimension (n,n). On output, A contains a compressed representation + of Q and H. + T (output) An array of length n containing the first elements of + the Householder reflectors. + """ + + # internally we work with householder reflections from the right. + # let u be a row vector (i.e. u[i]=A[i,:i]). then + # Q is build up by reflectors of the type (1-v'v) where v is a suitable + # modification of u. these reflectors are applyed to A from the right. + # because we work with reflectors from the right we have to start with + # the bottom row of A and work then upwards (this corresponds to + # some kind of RQ decomposition). + # the first part of the vectors v (i.e. A[i,:(i-1)]) are stored as row vectors + # in the lower left part of A (excluding the diagonal and subdiagonal). + # the last entry of v is stored in T. + # the upper right part of A (including diagonal and subdiagonal) becomes H. + + + n = A.rows + if n <= 2: return + + for i in xrange(n-1, 1, -1): + + # scale the vector + + scale = 0 + for k in xrange(0, i): + scale += abs(ctx.re(A[i,k])) + abs(ctx.im(A[i,k])) + + scale_inv = 0 + if scale != 0: + scale_inv = 1 / scale + + if scale == 0 or ctx.isinf(scale_inv): + # sadly there are floating point numbers not equal to zero whose reciprocal is infinity + T[i] = 0 + A[i,i-1] = 0 + continue + + # calculate parameters for housholder transformation + + H = 0 + for k in xrange(0, i): + A[i,k] *= scale_inv + rr = ctx.re(A[i,k]) + ii = ctx.im(A[i,k]) + H += rr * rr + ii * ii + + F = A[i,i-1] + f = abs(F) + G = ctx.sqrt(H) + A[i,i-1] = - G * scale + + if f == 0: + T[i] = G + else: + ff = F / f + T[i] = F + G * ff + A[i,i-1] *= ff + + H += G * f + H = 1 / ctx.sqrt(H) + + T[i] *= H + for k in xrange(0, i - 1): + A[i,k] *= H + + for j in xrange(0, i): + # apply housholder transformation (from right) + + G = ctx.conj(T[i]) * A[j,i-1] + for k in xrange(0, i-1): + G += ctx.conj(A[i,k]) * A[j,k] + + A[j,i-1] -= G * T[i] + for k in xrange(0, i-1): + A[j,k] -= G * A[i,k] + + for j in xrange(0, n): + # apply housholder transformation (from left) + + G = T[i] * A[i-1,j] + for k in xrange(0, i-1): + G += A[i,k] * A[k,j] + + A[i-1,j] -= G * ctx.conj(T[i]) + for k in xrange(0, i-1): + A[k,j] -= G * ctx.conj(A[i,k]) + + + +def hessenberg_reduce_1(ctx, A, T): + """ + This routine forms the unitary matrix Q described in hessenberg_reduce_0. + + parameters: + A (input/output) On input, A is the same matrix as delivered by + hessenberg_reduce_0. On output, A is set to Q. + + T (input) On input, T is the same array as delivered by hessenberg_reduce_0. + """ + + n = A.rows + + if n == 1: + A[0,0] = 1 + return + + A[0,0] = A[1,1] = 1 + A[0,1] = A[1,0] = 0 + + for i in xrange(2, n): + if T[i] != 0: + + for j in xrange(0, i): + G = T[i] * A[i-1,j] + for k in xrange(0, i-1): + G += A[i,k] * A[k,j] + + A[i-1,j] -= G * ctx.conj(T[i]) + for k in xrange(0, i-1): + A[k,j] -= G * ctx.conj(A[i,k]) + + A[i,i] = 1 + for j in xrange(0, i): + A[j,i] = A[i,j] = 0 + + + +@defun +def hessenberg(ctx, A, overwrite_a = False): + """ + This routine computes the Hessenberg decomposition of a square matrix A. + Given A, an unitary matrix Q is determined such that + + Q' A Q = H and Q' Q = Q Q' = 1 + + where H is an upper right Hessenberg matrix. Here ' denotes the hermitian + transpose (i.e. transposition and conjugation). + + input: + A : a real or complex square matrix + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + Q : an unitary matrix + H : an upper right Hessenberg matrix + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> Q, H = mp.hessenberg(A) + >>> mp.nprint(H, 3) # doctest:+SKIP + [ 3.15 2.23 4.44] + [-0.769 4.85 3.05] + [ 0.0 3.61 7.0] + >>> print(mp.chop(A - Q * H * Q.transpose_conj())) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + return value: (Q, H) + """ + + n = A.rows + + if n == 1: + return (ctx.matrix([[1]]), A) + + if not overwrite_a: + A = A.copy() + + T = ctx.matrix(n, 1) + + hessenberg_reduce_0(ctx, A, T) + Q = A.copy() + hessenberg_reduce_1(ctx, Q, T) + + for x in xrange(n): + for y in xrange(x+2, n): + A[y,x] = 0 + + return Q, A + + +########################################################################### + + +def qr_step(ctx, n0, n1, A, Q, shift): + """ + This subroutine executes a single implicitly shifted QR step applied to an + upper Hessenberg matrix A. Given A and shift as input, first an QR + decomposition is calculated: + + Q R = A - shift * 1 . + + The output is then following matrix: + + R Q + shift * 1 + + parameters: + n0, n1 (input) Two integers which specify the submatrix A[n0:n1,n0:n1] + on which this subroutine operators. The subdiagonal elements + to the left and below this submatrix must be deflated (i.e. zero). + following restriction is imposed: n1>=n0+2 + A (input/output) On input, A is an upper Hessenberg matrix. + On output, A is replaced by "R Q + shift * 1" + Q (input/output) The parameter Q is multiplied by the unitary matrix + Q arising from the QR decomposition. Q can also be false, in which + case the unitary matrix Q is not computated. + shift (input) a complex number specifying the shift. idealy close to an + eigenvalue of the bottemmost part of the submatrix A[n0:n1,n0:n1]. + + references: + Stoer, Bulirsch - Introduction to Numerical Analysis. + Kresser : Numerical Methods for General and Structured Eigenvalue Problems + """ + + # implicitly shifted and bulge chasing is explained at p.398/399 in "Stoer, Bulirsch - Introduction to Numerical Analysis" + # for bulge chasing see also "Watkins - The Matrix Eigenvalue Problem" sec.4.5,p.173 + + # the Givens rotation we used is determined as follows: let c,s be two complex + # numbers. then we have following relation: + # + # v = sqrt(|c|^2 + |s|^2) + # + # 1/v [ c~ s~] [c] = [v] + # [-s c ] [s] [0] + # + # the matrix on the left is our Givens rotation. + + n = A.rows + + # first step + + # calculate givens rotation + c = A[n0 ,n0] - shift + s = A[n0+1,n0] + + v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s))) + + if v == 0: + v = 1 + c = 1 + s = 0 + else: + c /= v + s /= v + + cc = ctx.conj(c) + cs = ctx.conj(s) + + for k in xrange(n0, n): + # apply givens rotation from the left + x = A[n0 ,k] + y = A[n0+1,k] + A[n0 ,k] = cc * x + cs * y + A[n0+1,k] = c * y - s * x + + for k in xrange(min(n1, n0+3)): + # apply givens rotation from the right + x = A[k,n0 ] + y = A[k,n0+1] + A[k,n0 ] = c * x + s * y + A[k,n0+1] = cc * y - cs * x + + if not isinstance(Q, bool): + for k in xrange(n): + # eigenvectors + x = Q[k,n0 ] + y = Q[k,n0+1] + Q[k,n0 ] = c * x + s * y + Q[k,n0+1] = cc * y - cs * x + + # chase the bulge + + for j in xrange(n0, n1 - 2): + # calculate givens rotation + + c = A[j+1,j] + s = A[j+2,j] + + v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s))) + + if v == 0: + A[j+1,j] = 0 + v = 1 + c = 1 + s = 0 + else: + A[j+1,j] = v + c /= v + s /= v + + A[j+2,j] = 0 + + cc = ctx.conj(c) + cs = ctx.conj(s) + + for k in xrange(j+1, n): + # apply givens rotation from the left + x = A[j+1,k] + y = A[j+2,k] + A[j+1,k] = cc * x + cs * y + A[j+2,k] = c * y - s * x + + for k in xrange(0, min(n1, j+4)): + # apply givens rotation from the right + x = A[k,j+1] + y = A[k,j+2] + A[k,j+1] = c * x + s * y + A[k,j+2] = cc * y - cs * x + + if not isinstance(Q, bool): + for k in xrange(0, n): + # eigenvectors + x = Q[k,j+1] + y = Q[k,j+2] + Q[k,j+1] = c * x + s * y + Q[k,j+2] = cc * y - cs * x + + + +def hessenberg_qr(ctx, A, Q): + """ + This routine computes the Schur decomposition of an upper Hessenberg matrix A. + Given A, an unitary matrix Q is determined such that + + Q' A Q = R and Q' Q = Q Q' = 1 + + where R is an upper right triangular matrix. Here ' denotes the hermitian + transpose (i.e. transposition and conjugation). + + parameters: + A (input/output) On input, A contains an upper Hessenberg matrix. + On output, A is replace by the upper right triangluar matrix R. + + Q (input/output) The parameter Q is multiplied by the unitary + matrix Q arising from the Schur decomposition. Q can also be + false, in which case the unitary matrix Q is not computated. + """ + + n = A.rows + + norm = 0 + for x in xrange(n): + for y in xrange(min(x+2, n)): + norm += ctx.re(A[y,x]) ** 2 + ctx.im(A[y,x]) ** 2 + norm = ctx.sqrt(norm) / n + + if norm == 0: + return + + n0 = 0 + n1 = n + + eps = ctx.eps / (100 * n) + maxits = ctx.dps * 4 + + its = totalits = 0 + + while 1: + # kressner p.32 algo 3 + # the active submatrix is A[n0:n1,n0:n1] + + k = n0 + + while k + 1 < n1: + s = abs(ctx.re(A[k,k])) + abs(ctx.im(A[k,k])) + abs(ctx.re(A[k+1,k+1])) + abs(ctx.im(A[k+1,k+1])) + if s < eps * norm: + s = norm + if abs(A[k+1,k]) < eps * s: + break + k += 1 + + if k + 1 < n1: + # deflation found at position (k+1, k) + + A[k+1,k] = 0 + n0 = k + 1 + + its = 0 + + if n0 + 1 >= n1: + # block of size at most two has converged + n0 = 0 + n1 = k + 1 + if n1 < 2: + # QR algorithm has converged + return + else: + if (its % 30) == 10: + # exceptional shift + shift = A[n1-1,n1-2] + elif (its % 30) == 20: + # exceptional shift + shift = abs(A[n1-1,n1-2]) + elif (its % 30) == 29: + # exceptional shift + shift = norm + else: + # A = [ a b ] det(x-A)=x*x-x*tr(A)+det(A) + # [ c d ] + # + # eigenvalues bad: (tr(A)+sqrt((tr(A))**2-4*det(A)))/2 + # bad because of cancellation if |c| is small and |a-d| is small, too. + # + # eigenvalues good: (a+d+sqrt((a-d)**2+4*b*c))/2 + + t = A[n1-2,n1-2] + A[n1-1,n1-1] + s = (A[n1-1,n1-1] - A[n1-2,n1-2]) ** 2 + 4 * A[n1-1,n1-2] * A[n1-2,n1-1] + if ctx.re(s) > 0: + s = ctx.sqrt(s) + else: + s = ctx.sqrt(-s) * 1j + a = (t + s) / 2 + b = (t - s) / 2 + if abs(A[n1-1,n1-1] - a) > abs(A[n1-1,n1-1] - b): + shift = b + else: + shift = a + + its += 1 + totalits += 1 + + qr_step(ctx, n0, n1, A, Q, shift) + + if its > maxits: + raise RuntimeError("qr: failed to converge after %d steps" % its) + + +@defun +def schur(ctx, A, overwrite_a = False): + """ + This routine computes the Schur decomposition of a square matrix A. + Given A, an unitary matrix Q is determined such that + + Q' A Q = R and Q' Q = Q Q' = 1 + + where R is an upper right triangular matrix. Here ' denotes the + hermitian transpose (i.e. transposition and conjugation). + + input: + A : a real or complex square matrix + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + Q : an unitary matrix + R : an upper right triangular matrix + + return value: (Q, R) + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> Q, R = mp.schur(A) + >>> mp.nprint(R, 3) # doctest:+SKIP + [2.0 0.417 -2.53] + [0.0 4.0 -4.74] + [0.0 0.0 9.0] + >>> print(mp.chop(A - Q * R * Q.transpose_conj())) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + warning: The Schur decomposition is not unique. + """ + + n = A.rows + + if n == 1: + return (ctx.matrix([[1]]), A) + + if not overwrite_a: + A = A.copy() + + T = ctx.matrix(n, 1) + + hessenberg_reduce_0(ctx, A, T) + Q = A.copy() + hessenberg_reduce_1(ctx, Q, T) + + for x in xrange(n): + for y in xrange(x + 2, n): + A[y,x] = 0 + + hessenberg_qr(ctx, A, Q) + + return Q, A + + +def eig_tr_r(ctx, A): + """ + This routine calculates the right eigenvectors of an upper right triangular matrix. + + input: + A an upper right triangular matrix + + output: + ER a matrix whose columns form the right eigenvectors of A + + return value: ER + """ + + # this subroutine is inspired by the lapack routines ctrevc.f,clatrs.f + + n = A.rows + + ER = ctx.eye(n) + + eps = ctx.eps + + unfl = ctx.ldexp(ctx.one, -ctx.prec * 30) + # since mpmath effectively has no limits on the exponent, we simply scale doubles up + # original double has prec*20 + + smlnum = unfl * (n / eps) + simin = 1 / ctx.sqrt(eps) + + rmax = 1 + + for i in xrange(1, n): + s = A[i,i] + + smin = max(eps * abs(s), smlnum) + + for j in xrange(i - 1, -1, -1): + + r = 0 + for k in xrange(j + 1, i + 1): + r += A[j,k] * ER[k,i] + + t = A[j,j] - s + if abs(t) < smin: + t = smin + + r = -r / t + ER[j,i] = r + + rmax = max(rmax, abs(r)) + if rmax > simin: + for k in xrange(j, i+1): + ER[k,i] /= rmax + rmax = 1 + + if rmax != 1: + for k in xrange(0, i + 1): + ER[k,i] /= rmax + + return ER + +def eig_tr_l(ctx, A): + """ + This routine calculates the left eigenvectors of an upper right triangular matrix. + + input: + A an upper right triangular matrix + + output: + EL a matrix whose rows form the left eigenvectors of A + + return value: EL + """ + + n = A.rows + + EL = ctx.eye(n) + + eps = ctx.eps + + unfl = ctx.ldexp(ctx.one, -ctx.prec * 30) + # since mpmath effectively has no limits on the exponent, we simply scale doubles up + # original double has prec*20 + + smlnum = unfl * (n / eps) + simin = 1 / ctx.sqrt(eps) + + rmax = 1 + + for i in xrange(0, n - 1): + s = A[i,i] + + smin = max(eps * abs(s), smlnum) + + for j in xrange(i + 1, n): + + r = 0 + for k in xrange(i, j): + r += EL[i,k] * A[k,j] + + t = A[j,j] - s + if abs(t) < smin: + t = smin + + r = -r / t + EL[i,j] = r + + rmax = max(rmax, abs(r)) + if rmax > simin: + for k in xrange(i, j + 1): + EL[i,k] /= rmax + rmax = 1 + + if rmax != 1: + for k in xrange(i, n): + EL[i,k] /= rmax + + return EL + +@defun +def eig(ctx, A, left = False, right = True, overwrite_a = False): + """ + This routine computes the eigenvalues and optionally the left and right + eigenvectors of a square matrix A. Given A, a vector E and matrices ER + and EL are calculated such that + + A ER[:,i] = E[i] ER[:,i] + EL[i,:] A = EL[i,:] E[i] + + E contains the eigenvalues of A. The columns of ER contain the right eigenvectors + of A whereas the rows of EL contain the left eigenvectors. + + + input: + A : a real or complex square matrix of shape (n, n) + left : if true, the left eigenvectors are calculated. + right : if true, the right eigenvectors are calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + E : a list of length n containing the eigenvalues of A. + ER : a matrix whose columns contain the right eigenvectors of A. + EL : a matrix whose rows contain the left eigenvectors of A. + + return values: + E if left and right are both false. + (E, ER) if right is true and left is false. + (E, EL) if left is true and right is false. + (E, EL, ER) if left and right are true. + + + examples: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> E, ER = mp.eig(A) + >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) + [0.0] + [0.0] + [0.0] + + >>> E, EL, ER = mp.eig(A,left = True, right = True) + >>> E, EL, ER = mp.eig_sort(E, EL, ER) + >>> mp.nprint(E) + [2.0, 4.0, 9.0] + >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) + [0.0] + [0.0] + [0.0] + >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0])) + [0.0 0.0 0.0] + + warning: + - If there are multiple eigenvalues, the eigenvectors do not necessarily + span the whole vectorspace, i.e. ER and EL may have not full rank. + Furthermore in that case the eigenvectors are numerical ill-conditioned. + - In the general case the eigenvalues have no natural order. + + see also: + - eigh (or eigsy, eighe) for the symmetric eigenvalue problem. + - eig_sort for sorting of eigenvalues and eigenvectors + """ + + n = A.rows + + if n == 1: + if left and (not right): + return ([A[0]], ctx.matrix([[1]])) + + if right and (not left): + return ([A[0]], ctx.matrix([[1]])) + + return ([A[0]], ctx.matrix([[1]]), ctx.matrix([[1]])) + + if not overwrite_a: + A = A.copy() + + T = ctx.zeros(n, 1) + + hessenberg_reduce_0(ctx, A, T) + + if left or right: + Q = A.copy() + hessenberg_reduce_1(ctx, Q, T) + else: + Q = False + + for x in xrange(n): + for y in xrange(x + 2, n): + A[y,x] = 0 + + hessenberg_qr(ctx, A, Q) + + E = [0 for i in xrange(n)] + for i in xrange(n): + E[i] = A[i,i] + + if not (left or right): + return E + + if left: + EL = eig_tr_l(ctx, A) + EL = EL * Q.transpose_conj() + + if right: + ER = eig_tr_r(ctx, A) + ER = Q * ER + + if left and (not right): + return (E, EL) + + if right and (not left): + return (E, ER) + + return (E, EL, ER) + +@defun +def eig_sort(ctx, E, EL = False, ER = False, f = "real"): + """ + This routine sorts the eigenvalues and eigenvectors delivered by ``eig``. + + parameters: + E : the eigenvalues as delivered by eig + EL : the left eigenvectors as delivered by eig, or false + ER : the right eigenvectors as delivered by eig, or false + f : either a string ("real" sort by increasing real part, "imag" sort by + increasing imag part, "abs" sort by absolute value) or a function + mapping complexs to the reals, i.e. ``f = lambda x: -mp.re(x) `` + would sort the eigenvalues by decreasing real part. + + return values: + E if EL and ER are both false. + (E, ER) if ER is not false and left is false. + (E, EL) if EL is not false and right is false. + (E, EL, ER) if EL and ER are not false. + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]]) + >>> E, EL, ER = mp.eig(A,left = True, right = True) + >>> E, EL, ER = mp.eig_sort(E, EL, ER) + >>> mp.nprint(E) + [2.0, 4.0, 9.0] + >>> E, EL, ER = mp.eig_sort(E, EL, ER,f = lambda x: -mp.re(x)) + >>> mp.nprint(E) + [9.0, 4.0, 2.0] + >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0])) + [0.0] + [0.0] + [0.0] + >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0])) + [0.0 0.0 0.0] + """ + + if isinstance(f, str): + if f == "real": + f = ctx.re + elif f == "imag": + f = ctx.im + elif f == "abs": + f = abs + else: + raise RuntimeError("unknown function %s" % f) + + n = len(E) + + # Sort eigenvalues (bubble-sort) + + for i in xrange(n): + imax = i + s = f(E[i]) # s is the current maximal element + + for j in xrange(i + 1, n): + c = f(E[j]) + if c < s: + s = c + imax = j + + if imax != i: + # swap eigenvalues + + z = E[i] + E[i] = E[imax] + E[imax] = z + + if not isinstance(EL, bool): + for j in xrange(n): + z = EL[i,j] + EL[i,j] = EL[imax,j] + EL[imax,j] = z + + if not isinstance(ER, bool): + for j in xrange(n): + z = ER[j,i] + ER[j,i] = ER[j,imax] + ER[j,imax] = z + + if isinstance(EL, bool) and isinstance(ER, bool): + return E + + if isinstance(EL, bool) and not(isinstance(ER, bool)): + return (E, ER) + + if isinstance(ER, bool) and not(isinstance(EL, bool)): + return (E, EL) + + return (E, EL, ER) diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/eigen_symmetric.py b/.venv/lib/python3.11/site-packages/mpmath/matrices/eigen_symmetric.py new file mode 100644 index 0000000000000000000000000000000000000000..c82c0bb061d22c37a89f82a0b9bdab3e9ba7ddde --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/matrices/eigen_symmetric.py @@ -0,0 +1,1807 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +################################################################################################## +# module for the symmetric eigenvalue problem +# Copyright 2013 Timo Hartmann (thartmann15 at gmail.com) +# +# todo: +# - implement balancing +# +################################################################################################## + +""" +The symmetric eigenvalue problem. +--------------------------------- + +This file contains routines for the symmetric eigenvalue problem. + +high level routines: + + eigsy : real symmetric (ordinary) eigenvalue problem + eighe : complex hermitian (ordinary) eigenvalue problem + eigh : unified interface for eigsy and eighe + svd_r : singular value decomposition for real matrices + svd_c : singular value decomposition for complex matrices + svd : unified interface for svd_r and svd_c + + +low level routines: + + r_sy_tridiag : reduction of real symmetric matrix to real symmetric tridiagonal matrix + c_he_tridiag_0 : reduction of complex hermitian matrix to real symmetric tridiagonal matrix + c_he_tridiag_1 : auxiliary routine to c_he_tridiag_0 + c_he_tridiag_2 : auxiliary routine to c_he_tridiag_0 + tridiag_eigen : solves the real symmetric tridiagonal matrix eigenvalue problem + svd_r_raw : raw singular value decomposition for real matrices + svd_c_raw : raw singular value decomposition for complex matrices +""" + +from ..libmp.backend import xrange +from .eigen import defun + + +def r_sy_tridiag(ctx, A, D, E, calc_ev = True): + """ + This routine transforms a real symmetric matrix A to a real symmetric + tridiagonal matrix T using an orthogonal similarity transformation: + Q' * A * Q = T (here ' denotes the matrix transpose). + The orthogonal matrix Q is build up from Householder reflectors. + + parameters: + A (input/output) On input, A contains the real symmetric matrix of + dimension (n,n). On output, if calc_ev is true, A contains the + orthogonal matrix Q, otherwise A is destroyed. + + D (output) real array of length n, contains the diagonal elements + of the tridiagonal matrix + + E (output) real array of length n, contains the offdiagonal elements + of the tridiagonal matrix in E[0:(n-1)] where is the dimension of + the matrix A. E[n-1] is undefined. + + calc_ev (input) If calc_ev is true, this routine explicitly calculates the + orthogonal matrix Q which is then returned in A. If calc_ev is + false, Q is not explicitly calculated resulting in a shorter run time. + + This routine is a python translation of the fortran routine tred2.f in the + software library EISPACK (see netlib.org) which itself is based on the algol + procedure tred2 described in: + - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson + - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971) + + For a good introduction to Householder reflections, see also + Stoer, Bulirsch - Introduction to Numerical Analysis. + """ + + # note : the vector v of the i-th houshoulder reflector is stored in a[(i+1):,i] + # whereas v/ is stored in a[i,(i+1):] + + n = A.rows + for i in xrange(n - 1, 0, -1): + # scale the vector + + scale = 0 + for k in xrange(0, i): + scale += abs(A[k,i]) + + scale_inv = 0 + if scale != 0: + scale_inv = 1/scale + + # sadly there are floating point numbers not equal to zero whose reciprocal is infinity + + if i == 1 or scale == 0 or ctx.isinf(scale_inv): + E[i] = A[i-1,i] # nothing to do + D[i] = 0 + continue + + # calculate parameters for housholder transformation + + H = 0 + for k in xrange(0, i): + A[k,i] *= scale_inv + H += A[k,i] * A[k,i] + + F = A[i-1,i] + G = ctx.sqrt(H) + if F > 0: + G = -G + E[i] = scale * G + H -= F * G + A[i-1,i] = F - G + F = 0 + + # apply housholder transformation + + for j in xrange(0, i): + if calc_ev: + A[i,j] = A[j,i] / H + + G = 0 # calculate A*U + for k in xrange(0, j + 1): + G += A[k,j] * A[k,i] + for k in xrange(j + 1, i): + G += A[j,k] * A[k,i] + + E[j] = G / H # calculate P + F += E[j] * A[j,i] + + HH = F / (2 * H) + + for j in xrange(0, i): # calculate reduced A + F = A[j,i] + G = E[j] - HH * F # calculate Q + E[j] = G + + for k in xrange(0, j + 1): + A[k,j] -= F * E[k] + G * A[k,i] + + D[i] = H + + for i in xrange(1, n): # better for compatibility + E[i-1] = E[i] + E[n-1] = 0 + + if calc_ev: + D[0] = 0 + for i in xrange(0, n): + if D[i] != 0: + for j in xrange(0, i): # accumulate transformation matrices + G = 0 + for k in xrange(0, i): + G += A[i,k] * A[k,j] + for k in xrange(0, i): + A[k,j] -= G * A[k,i] + + D[i] = A[i,i] + A[i,i] = 1 + + for j in xrange(0, i): + A[j,i] = A[i,j] = 0 + else: + for i in xrange(0, n): + D[i] = A[i,i] + + + + + +def c_he_tridiag_0(ctx, A, D, E, T): + """ + This routine transforms a complex hermitian matrix A to a real symmetric + tridiagonal matrix T using an unitary similarity transformation: + Q' * A * Q = T (here ' denotes the hermitian matrix transpose, + i.e. transposition und conjugation). + The unitary matrix Q is build up from Householder reflectors and + an unitary diagonal matrix. + + parameters: + A (input/output) On input, A contains the complex hermitian matrix + of dimension (n,n). On output, A contains the unitary matrix Q + in compressed form. + + D (output) real array of length n, contains the diagonal elements + of the tridiagonal matrix. + + E (output) real array of length n, contains the offdiagonal elements + of the tridiagonal matrix in E[0:(n-1)] where is the dimension of + the matrix A. E[n-1] is undefined. + + T (output) complex array of length n, contains a unitary diagonal + matrix. + + This routine is a python translation (in slightly modified form) of the fortran + routine htridi.f in the software library EISPACK (see netlib.org) which itself + is a complex version of the algol procedure tred1 described in: + - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson + - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971) + + For a good introduction to Householder reflections, see also + Stoer, Bulirsch - Introduction to Numerical Analysis. + """ + + n = A.rows + T[n-1] = 1 + for i in xrange(n - 1, 0, -1): + + # scale the vector + + scale = 0 + for k in xrange(0, i): + scale += abs(ctx.re(A[k,i])) + abs(ctx.im(A[k,i])) + + scale_inv = 0 + if scale != 0: + scale_inv = 1 / scale + + # sadly there are floating point numbers not equal to zero whose reciprocal is infinity + + if scale == 0 or ctx.isinf(scale_inv): + E[i] = 0 + D[i] = 0 + T[i-1] = 1 + continue + + if i == 1: + F = A[i-1,i] + f = abs(F) + E[i] = f + D[i] = 0 + if f != 0: + T[i-1] = T[i] * F / f + else: + T[i-1] = T[i] + continue + + # calculate parameters for housholder transformation + + H = 0 + for k in xrange(0, i): + A[k,i] *= scale_inv + rr = ctx.re(A[k,i]) + ii = ctx.im(A[k,i]) + H += rr * rr + ii * ii + + F = A[i-1,i] + f = abs(F) + G = ctx.sqrt(H) + H += G * f + E[i] = scale * G + if f != 0: + F = F / f + TZ = - T[i] * F # T[i-1]=-T[i]*F, but we need T[i-1] as temporary storage + G *= F + else: + TZ = -T[i] # T[i-1]=-T[i] + A[i-1,i] += G + F = 0 + + # apply housholder transformation + + for j in xrange(0, i): + A[i,j] = A[j,i] / H + + G = 0 # calculate A*U + for k in xrange(0, j + 1): + G += ctx.conj(A[k,j]) * A[k,i] + for k in xrange(j + 1, i): + G += A[j,k] * A[k,i] + + T[j] = G / H # calculate P + F += ctx.conj(T[j]) * A[j,i] + + HH = F / (2 * H) + + for j in xrange(0, i): # calculate reduced A + F = A[j,i] + G = T[j] - HH * F # calculate Q + T[j] = G + + for k in xrange(0, j + 1): + A[k,j] -= ctx.conj(F) * T[k] + ctx.conj(G) * A[k,i] + # as we use the lower left part for storage + # we have to use the transpose of the normal formula + + T[i-1] = TZ + D[i] = H + + for i in xrange(1, n): # better for compatibility + E[i-1] = E[i] + E[n-1] = 0 + + D[0] = 0 + for i in xrange(0, n): + zw = D[i] + D[i] = ctx.re(A[i,i]) + A[i,i] = zw + + + + + + + +def c_he_tridiag_1(ctx, A, T): + """ + This routine forms the unitary matrix Q described in c_he_tridiag_0. + + parameters: + A (input/output) On input, A is the same matrix as delivered by + c_he_tridiag_0. On output, A is set to Q. + + T (input) On input, T is the same array as delivered by c_he_tridiag_0. + + """ + + n = A.rows + + for i in xrange(0, n): + if A[i,i] != 0: + for j in xrange(0, i): + G = 0 + for k in xrange(0, i): + G += ctx.conj(A[i,k]) * A[k,j] + for k in xrange(0, i): + A[k,j] -= G * A[k,i] + + A[i,i] = 1 + + for j in xrange(0, i): + A[j,i] = A[i,j] = 0 + + for i in xrange(0, n): + for k in xrange(0, n): + A[i,k] *= T[k] + + + + +def c_he_tridiag_2(ctx, A, T, B): + """ + This routine applied the unitary matrix Q described in c_he_tridiag_0 + onto the the matrix B, i.e. it forms Q*B. + + parameters: + A (input) On input, A is the same matrix as delivered by c_he_tridiag_0. + + T (input) On input, T is the same array as delivered by c_he_tridiag_0. + + B (input/output) On input, B is a complex matrix. On output B is replaced + by Q*B. + + This routine is a python translation of the fortran routine htribk.f in the + software library EISPACK (see netlib.org). See c_he_tridiag_0 for more + references. + """ + + n = A.rows + + for i in xrange(0, n): + for k in xrange(0, n): + B[k,i] *= T[k] + + for i in xrange(0, n): + if A[i,i] != 0: + for j in xrange(0, n): + G = 0 + for k in xrange(0, i): + G += ctx.conj(A[i,k]) * B[k,j] + for k in xrange(0, i): + B[k,j] -= G * A[k,i] + + + + + +def tridiag_eigen(ctx, d, e, z = False): + """ + This subroutine find the eigenvalues and the first components of the + eigenvectors of a real symmetric tridiagonal matrix using the implicit + QL method. + + parameters: + + d (input/output) real array of length n. on input, d contains the diagonal + elements of the input matrix. on output, d contains the eigenvalues in + ascending order. + + e (input) real array of length n. on input, e contains the offdiagonal + elements of the input matrix in e[0:(n-1)]. On output, e has been + destroyed. + + z (input/output) If z is equal to False, no eigenvectors will be computed. + Otherwise on input z should have the format z[0:m,0:n] (i.e. a real or + complex matrix of dimension (m,n) ). On output this matrix will be + multiplied by the matrix of the eigenvectors (i.e. the columns of this + matrix are the eigenvectors): z --> z*EV + That means if z[i,j]={1 if j==j; 0 otherwise} on input, then on output + z will contain the first m components of the eigenvectors. That means + if m is equal to n, the i-th eigenvector will be z[:,i]. + + This routine is a python translation (in slightly modified form) of the + fortran routine imtql2.f in the software library EISPACK (see netlib.org) + which itself is based on the algol procudure imtql2 desribed in: + - num. math. 12, p. 377-383(1968) by matrin and wilkinson + - modified in num. math. 15, p. 450(1970) by dubrulle + - handbook for auto. comp., vol. II-linear algebra, p. 241-248 (1971) + See also the routine gaussq.f in netlog.org or acm algorithm 726. + """ + + n = len(d) + e[n-1] = 0 + iterlim = 2 * ctx.dps + + for l in xrange(n): + j = 0 + while 1: + m = l + while 1: + # look for a small subdiagonal element + if m + 1 == n: + break + if abs(e[m]) <= ctx.eps * (abs(d[m]) + abs(d[m + 1])): + break + m = m + 1 + if m == l: + break + + if j >= iterlim: + raise RuntimeError("tridiag_eigen: no convergence to an eigenvalue after %d iterations" % iterlim) + + j += 1 + + # form shift + + p = d[l] + g = (d[l + 1] - p) / (2 * e[l]) + r = ctx.hypot(g, 1) + + if g < 0: + s = g - r + else: + s = g + r + + g = d[m] - p + e[l] / s + + s, c, p = 1, 1, 0 + + for i in xrange(m - 1, l - 1, -1): + f = s * e[i] + b = c * e[i] + if abs(f) > abs(g): # this here is a slight improvement also used in gaussq.f or acm algorithm 726. + c = g / f + r = ctx.hypot(c, 1) + e[i + 1] = f * r + s = 1 / r + c = c * s + else: + s = f / g + r = ctx.hypot(s, 1) + e[i + 1] = g * r + c = 1 / r + s = s * c + g = d[i + 1] - p + r = (d[i] - g) * s + 2 * c * b + p = s * r + d[i + 1] = g + p + g = c * r - b + + if not isinstance(z, bool): + # calculate eigenvectors + for w in xrange(z.rows): + f = z[w,i+1] + z[w,i+1] = s * z[w,i] + c * f + z[w,i ] = c * z[w,i] - s * f + + d[l] = d[l] - p + e[l] = g + e[m] = 0 + + for ii in xrange(1, n): + # sort eigenvalues and eigenvectors (bubble-sort) + i = ii - 1 + k = i + p = d[i] + for j in xrange(ii, n): + if d[j] >= p: + continue + k = j + p = d[k] + if k == i: + continue + d[k] = d[i] + d[i] = p + + if not isinstance(z, bool): + for w in xrange(z.rows): + p = z[w,i] + z[w,i] = z[w,k] + z[w,k] = p + +######################################################################################## + +@defun +def eigsy(ctx, A, eigvals_only = False, overwrite_a = False): + """ + This routine solves the (ordinary) eigenvalue problem for a real symmetric + square matrix A. Given A, an orthogonal matrix Q is calculated which + diagonalizes A: + + Q' A Q = diag(E) and Q Q' = Q' Q = 1 + + Here diag(E) is a diagonal matrix whose diagonal is E. + ' denotes the transpose. + + The columns of Q are the eigenvectors of A and E contains the eigenvalues: + + A Q[:,i] = E[i] Q[:,i] + + + input: + + A: real matrix of format (n,n) which is symmetric + (i.e. A=A' or A[i,j]=A[j,i]) + + eigvals_only: if true, calculates only the eigenvalues E. + if false, calculates both eigenvectors and eigenvalues. + + overwrite_a: if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + + E: vector of format (n). contains the eigenvalues of A in ascending order. + + Q: orthogonal matrix of format (n,n). contains the eigenvectors + of A as columns. + + return value: + + E if eigvals_only is true + (E, Q) if eigvals_only is false + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, 2], [2, 0]]) + >>> E = mp.eigsy(A, eigvals_only = True) + >>> print(E) + [-1.0] + [ 4.0] + + >>> A = mp.matrix([[1, 2], [2, 3]]) + >>> E, Q = mp.eigsy(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + see also: eighe, eigh, eig + """ + + if not overwrite_a: + A = A.copy() + + d = ctx.zeros(A.rows, 1) + e = ctx.zeros(A.rows, 1) + + if eigvals_only: + r_sy_tridiag(ctx, A, d, e, calc_ev = False) + tridiag_eigen(ctx, d, e, False) + return d + else: + r_sy_tridiag(ctx, A, d, e, calc_ev = True) + tridiag_eigen(ctx, d, e, A) + return (d, A) + + +@defun +def eighe(ctx, A, eigvals_only = False, overwrite_a = False): + """ + This routine solves the (ordinary) eigenvalue problem for a complex + hermitian square matrix A. Given A, an unitary matrix Q is calculated which + diagonalizes A: + + Q' A Q = diag(E) and Q Q' = Q' Q = 1 + + Here diag(E) a is diagonal matrix whose diagonal is E. + ' denotes the hermitian transpose (i.e. ordinary transposition and + complex conjugation). + + The columns of Q are the eigenvectors of A and E contains the eigenvalues: + + A Q[:,i] = E[i] Q[:,i] + + + input: + + A: complex matrix of format (n,n) which is hermitian + (i.e. A=A' or A[i,j]=conj(A[j,i])) + + eigvals_only: if true, calculates only the eigenvalues E. + if false, calculates both eigenvectors and eigenvalues. + + overwrite_a: if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + + E: vector of format (n). contains the eigenvalues of A in ascending order. + + Q: unitary matrix of format (n,n). contains the eigenvectors + of A as columns. + + return value: + + E if eigvals_only is true + (E, Q) if eigvals_only is false + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[1, -3 - 1j], [-3 + 1j, -2]]) + >>> E = mp.eighe(A, eigvals_only = True) + >>> print(E) + [-4.0] + [ 3.0] + + >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]]) + >>> E, Q = mp.eighe(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + see also: eigsy, eigh, eig + """ + + if not overwrite_a: + A = A.copy() + + d = ctx.zeros(A.rows, 1) + e = ctx.zeros(A.rows, 1) + t = ctx.zeros(A.rows, 1) + + if eigvals_only: + c_he_tridiag_0(ctx, A, d, e, t) + tridiag_eigen(ctx, d, e, False) + return d + else: + c_he_tridiag_0(ctx, A, d, e, t) + B = ctx.eye(A.rows) + tridiag_eigen(ctx, d, e, B) + c_he_tridiag_2(ctx, A, t, B) + return (d, B) + +@defun +def eigh(ctx, A, eigvals_only = False, overwrite_a = False): + """ + "eigh" is a unified interface for "eigsy" and "eighe". Depending on + whether A is real or complex the appropriate function is called. + + This routine solves the (ordinary) eigenvalue problem for a real symmetric + or complex hermitian square matrix A. Given A, an orthogonal (A real) or + unitary (A complex) matrix Q is calculated which diagonalizes A: + + Q' A Q = diag(E) and Q Q' = Q' Q = 1 + + Here diag(E) a is diagonal matrix whose diagonal is E. + ' denotes the hermitian transpose (i.e. ordinary transposition and + complex conjugation). + + The columns of Q are the eigenvectors of A and E contains the eigenvalues: + + A Q[:,i] = E[i] Q[:,i] + + input: + + A: a real or complex square matrix of format (n,n) which is symmetric + (i.e. A[i,j]=A[j,i]) or hermitian (i.e. A[i,j]=conj(A[j,i])). + + eigvals_only: if true, calculates only the eigenvalues E. + if false, calculates both eigenvectors and eigenvalues. + + overwrite_a: if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + + E: vector of format (n). contains the eigenvalues of A in ascending order. + + Q: an orthogonal or unitary matrix of format (n,n). contains the + eigenvectors of A as columns. + + return value: + + E if eigvals_only is true + (E, Q) if eigvals_only is false + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[3, 2], [2, 0]]) + >>> E = mp.eigh(A, eigvals_only = True) + >>> print(E) + [-1.0] + [ 4.0] + + >>> A = mp.matrix([[1, 2], [2, 3]]) + >>> E, Q = mp.eigh(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]]) + >>> E, Q = mp.eigh(A) + >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0])) + [0.0] + [0.0] + + see also: eigsy, eighe, eig + """ + + iscomplex = any(type(x) is ctx.mpc for x in A) + + if iscomplex: + return ctx.eighe(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a) + else: + return ctx.eigsy(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a) + + +@defun +def gauss_quadrature(ctx, n, qtype = "legendre", alpha = 0, beta = 0): + """ + This routine calulates gaussian quadrature rules for different + families of orthogonal polynomials. Let (a, b) be an interval, + W(x) a positive weight function and n a positive integer. + Then the purpose of this routine is to calculate pairs (x_k, w_k) + for k=0, 1, 2, ... (n-1) which give + + int(W(x) * F(x), x = a..b) = sum(w_k * F(x_k),k = 0..(n-1)) + + exact for all polynomials F(x) of degree (strictly) less than 2*n. For all + integrable functions F(x) the sum is a (more or less) good approximation to + the integral. The x_k are called nodes (which are the zeros of the + related orthogonal polynomials) and the w_k are called the weights. + + parameters + n (input) The degree of the quadrature rule, i.e. its number of + nodes. + + qtype (input) The family of orthogonal polynmomials for which to + compute the quadrature rule. See the list below. + + alpha (input) real number, used as parameter for some orthogonal + polynomials + + beta (input) real number, used as parameter for some orthogonal + polynomials. + + return value + + (X, W) a pair of two real arrays where x_k = X[k] and w_k = W[k]. + + + orthogonal polynomials: + + qtype polynomial + ----- ---------- + + "legendre" Legendre polynomials, W(x)=1 on the interval (-1, +1) + "legendre01" shifted Legendre polynomials, W(x)=1 on the interval (0, +1) + "hermite" Hermite polynomials, W(x)=exp(-x*x) on (-infinity,+infinity) + "laguerre" Laguerre polynomials, W(x)=exp(-x) on (0,+infinity) + "glaguerre" generalized Laguerre polynomials, W(x)=exp(-x)*x**alpha + on (0, +infinity) + "chebyshev1" Chebyshev polynomials of the first kind, W(x)=1/sqrt(1-x*x) + on (-1, +1) + "chebyshev2" Chebyshev polynomials of the second kind, W(x)=sqrt(1-x*x) + on (-1, +1) + "jacobi" Jacobi polynomials, W(x)=(1-x)**alpha * (1+x)**beta on (-1, +1) + with alpha>-1 and beta>-1 + + examples: + >>> from mpmath import mp + >>> f = lambda x: x**8 + 2 * x**6 - 3 * x**4 + 5 * x**2 - 7 + >>> X, W = mp.gauss_quadrature(5, "hermite") + >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) + >>> B = mp.sqrt(mp.pi) * 57 / 16 + >>> C = mp.quad(lambda x: mp.exp(- x * x) * f(x), [-mp.inf, +mp.inf]) + >>> mp.nprint((mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10))) + (0.0, 0.0) + + >>> f = lambda x: x**5 - 2 * x**4 + 3 * x**3 - 5 * x**2 + 7 * x - 11 + >>> X, W = mp.gauss_quadrature(3, "laguerre") + >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) + >>> B = 76 + >>> C = mp.quad(lambda x: mp.exp(-x) * f(x), [0, +mp.inf]) + >>> mp.nprint(mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10)) + .0 + + # orthogonality of the chebyshev polynomials: + >>> f = lambda x: mp.chebyt(3, x) * mp.chebyt(2, x) + >>> X, W = mp.gauss_quadrature(3, "chebyshev1") + >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)]) + >>> print(mp.chop(A, tol = 1e-10)) + 0.0 + + references: + - golub and welsch, "calculations of gaussian quadrature rules", mathematics of + computation 23, p. 221-230 (1969) + - golub, "some modified matrix eigenvalue problems", siam review 15, p. 318-334 (1973) + - stroud and secrest, "gaussian quadrature formulas", prentice-hall (1966) + + See also the routine gaussq.f in netlog.org or ACM Transactions on + Mathematical Software algorithm 726. + """ + + d = ctx.zeros(n, 1) + e = ctx.zeros(n, 1) + z = ctx.zeros(1, n) + + z[0,0] = 1 + + if qtype == "legendre": + # legendre on the range -1 +1 , abramowitz, table 25.4, p.916 + w = 2 + for i in xrange(n): + j = i + 1 + e[i] = ctx.sqrt(j * j / (4 * j * j - ctx.mpf(1))) + elif qtype == "legendre01": + # legendre shifted to 0 1 , abramowitz, table 25.8, p.921 + w = 1 + for i in xrange(n): + d[i] = 1 / ctx.mpf(2) + j = i + 1 + e[i] = ctx.sqrt(j * j / (16 * j * j - ctx.mpf(4))) + elif qtype == "hermite": + # hermite on the range -inf +inf , abramowitz, table 25.10,p.924 + w = ctx.sqrt(ctx.pi) + for i in xrange(n): + j = i + 1 + e[i] = ctx.sqrt(j / ctx.mpf(2)) + elif qtype == "laguerre": + # laguerre on the range 0 +inf , abramowitz, table 25.9, p. 923 + w = 1 + for i in xrange(n): + j = i + 1 + d[i] = 2 * j - 1 + e[i] = j + elif qtype=="chebyshev1": + # chebyshev polynimials of the first kind + w = ctx.pi + for i in xrange(n): + e[i] = 1 / ctx.mpf(2) + e[0] = ctx.sqrt(1 / ctx.mpf(2)) + elif qtype == "chebyshev2": + # chebyshev polynimials of the second kind + w = ctx.pi / 2 + for i in xrange(n): + e[i] = 1 / ctx.mpf(2) + elif qtype == "glaguerre": + # generalized laguerre on the range 0 +inf + w = ctx.gamma(1 + alpha) + for i in xrange(n): + j = i + 1 + d[i] = 2 * j - 1 + alpha + e[i] = ctx.sqrt(j * (j + alpha)) + elif qtype == "jacobi": + # jacobi polynomials + alpha = ctx.mpf(alpha) + beta = ctx.mpf(beta) + ab = alpha + beta + abi = ab + 2 + w = (2**(ab+1)) * ctx.gamma(alpha + 1) * ctx.gamma(beta + 1) / ctx.gamma(abi) + d[0] = (beta - alpha) / abi + e[0] = ctx.sqrt(4 * (1 + alpha) * (1 + beta) / ((abi + 1) * (abi * abi))) + a2b2 = beta * beta - alpha * alpha + for i in xrange(1, n): + j = i + 1 + abi = 2 * j + ab + d[i] = a2b2 / ((abi - 2) * abi) + e[i] = ctx.sqrt(4 * j * (j + alpha) * (j + beta) * (j + ab) / ((abi * abi - 1) * abi * abi)) + elif isinstance(qtype, str): + raise ValueError("unknown quadrature rule \"%s\"" % qtype) + elif not isinstance(qtype, str): + w = qtype(d, e) + else: + assert 0 + + tridiag_eigen(ctx, d, e, z) + + for i in xrange(len(z)): + z[i] *= z[i] + + z = z.transpose() + return (d, w * z) + +################################################################################################## +################################################################################################## +################################################################################################## + +def svd_r_raw(ctx, A, V = False, calc_u = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two orthogonal matrices U and V are calculated such that + + A = U S V + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + The diagonal elements of S are the singular values of A, i.e. the + squareroots of the eigenvalues of A' A or A A'. Here ' denotes the transpose. + Householder bidiagonalization and a variant of the QR algorithm is used. + + overview of the matrices : + + A : m*n A gets replaced by U + U : m*n U replaces A. If n>m then only the first m*m block of U is + non-zero. column-orthogonal: U' U = B + here B is a n*n matrix whose first min(m,n) diagonal + elements are 1 and all other elements are zero. + S : n*n diagonal matrix, only the diagonal elements are stored in + the array S. only the first min(m,n) diagonal elements are non-zero. + V : n*n orthogonal: V V' = V' V = 1 + + parameters: + A (input/output) On input, A contains a real matrix of shape m*n. + On output, if calc_u is true A contains the column-orthogonal + matrix U; otherwise A is simply used as workspace and thus destroyed. + + V (input/output) if false, the matrix V is not calculated. otherwise + V must be a matrix of shape n*n. + + calc_u (input) If true, the matrix U is calculated and replaces A. + if false, U is not calculated and A is simply destroyed + + return value: + S an array of length n containing the singular values of A sorted by + decreasing magnitude. only the first min(m,n) elements are non-zero. + + This routine is a python translation of the fortran routine svd.f in the + software library EISPACK (see netlib.org) which itself is based on the + algol procedure svd described in: + - num. math. 14, 403-420(1970) by golub and reinsch. + - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971). + + """ + + m, n = A.rows, A.cols + + S = ctx.zeros(n, 1) + + # work is a temporary array of size n + work = ctx.zeros(n, 1) + + g = scale = anorm = 0 + maxits = 3 * ctx.dps + + for i in xrange(n): # householder reduction to bidiagonal form + work[i] = scale*g + g = s = scale = 0 + if i < m: + for k in xrange(i, m): + scale += ctx.fabs(A[k,i]) + if scale != 0: + for k in xrange(i, m): + A[k,i] /= scale + s += A[k,i] * A[k,i] + f = A[i,i] + g = -ctx.sqrt(s) + if f < 0: + g = -g + h = f * g - s + A[i,i] = f - g + for j in xrange(i+1, n): + s = 0 + for k in xrange(i, m): + s += A[k,i] * A[k,j] + f = s / h + for k in xrange(i, m): + A[k,j] += f * A[k,i] + for k in xrange(i,m): + A[k,i] *= scale + + S[i] = scale * g + g = s = scale = 0 + + if i < m and i != n - 1: + for k in xrange(i+1, n): + scale += ctx.fabs(A[i,k]) + if scale: + for k in xrange(i+1, n): + A[i,k] /= scale + s += A[i,k] * A[i,k] + f = A[i,i+1] + g = -ctx.sqrt(s) + if f < 0: + g = -g + h = f * g - s + A[i,i+1] = f - g + + for k in xrange(i+1, n): + work[k] = A[i,k] / h + + for j in xrange(i+1, m): + s = 0 + for k in xrange(i+1, n): + s += A[j,k] * A[i,k] + for k in xrange(i+1, n): + A[j,k] += s * work[k] + + for k in xrange(i+1, n): + A[i,k] *= scale + + anorm = max(anorm, ctx.fabs(S[i]) + ctx.fabs(work[i])) + + if not isinstance(V, bool): + for i in xrange(n-2, -1, -1): # accumulation of right hand transformations + V[i+1,i+1] = 1 + + if work[i+1] != 0: + for j in xrange(i+1, n): + V[i,j] = (A[i,j] / A[i,i+1]) / work[i+1] + for j in xrange(i+1, n): + s = 0 + for k in xrange(i+1, n): + s += A[i,k] * V[j,k] + for k in xrange(i+1, n): + V[j,k] += s * V[i,k] + + for j in xrange(i+1, n): + V[j,i] = V[i,j] = 0 + + V[0,0] = 1 + + if m= maxits: + raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its) + + x = S[l] # shift from bottom 2 by 2 minor + nm = k-1 + y = S[nm] + g = work[nm] + h = work[k] + f = ((y - z) * (y + z) + (g - h) * (g + h))/(2 * h * y) + g = ctx.hypot(f, 1) + if f >= 0: f = ((x - z) * (x + z) + h * ((y / (f + g)) - h)) / x + else: f = ((x - z) * (x + z) + h * ((y / (f - g)) - h)) / x + + c = s = 1 # next qt transformation + + for j in xrange(l, nm + 1): + g = work[j+1] + y = S[j+1] + h = s * g + g = c * g + z = ctx.hypot(f, h) + work[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + if not isinstance(V, bool): + for jj in xrange(n): + x = V[j ,jj] + z = V[j+1,jj] + V[j ,jj]= x * c + z * s + V[j+1 ,jj]= z * c - x * s + z = ctx.hypot(f, h) + S[j] = z + if z != 0: # rotation can be arbitray if z=0 + z = 1 / z + c = f * z + s = h * z + f = c * g + s * y + x = c * y - s * g + + if calc_u: + for jj in xrange(m): + y = A[jj,j ] + z = A[jj,j+1] + A[jj,j ] = y * c + z * s + A[jj,j+1 ] = z * c - y * s + + work[l] = 0 + work[k] = f + S[k] = x + + ########################## + + # Sort singular values into decreasing order (bubble-sort) + + for i in xrange(n): + imax = i + s = ctx.fabs(S[i]) # s is the current maximal element + + for j in xrange(i + 1, n): + c = ctx.fabs(S[j]) + if c > s: + s = c + imax = j + + if imax != i: + # swap singular values + + z = S[i] + S[i] = S[imax] + S[imax] = z + + if calc_u: + for j in xrange(m): + z = A[j,i] + A[j,i] = A[j,imax] + A[j,imax] = z + + if not isinstance(V, bool): + for j in xrange(n): + z = V[i,j] + V[i,j] = V[imax,j] + V[imax,j] = z + + return S + +####################### + +def svd_c_raw(ctx, A, V = False, calc_u = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two unitary matrices U and V are calculated such that + + A = U S V + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + The diagonal elements of S are the singular values of A, i.e. the + squareroots of the eigenvalues of A' A or A A'. Here ' denotes the hermitian + transpose (i.e. transposition and conjugation). Householder bidiagonalization + and a variant of the QR algorithm is used. + + overview of the matrices : + + A : m*n A gets replaced by U + U : m*n U replaces A. If n>m then only the first m*m block of U is + non-zero. column-unitary: U' U = B + here B is a n*n matrix whose first min(m,n) diagonal + elements are 1 and all other elements are zero. + S : n*n diagonal matrix, only the diagonal elements are stored in + the array S. only the first min(m,n) diagonal elements are non-zero. + V : n*n unitary: V V' = V' V = 1 + + parameters: + A (input/output) On input, A contains a complex matrix of shape m*n. + On output, if calc_u is true A contains the column-unitary + matrix U; otherwise A is simply used as workspace and thus destroyed. + + V (input/output) if false, the matrix V is not calculated. otherwise + V must be a matrix of shape n*n. + + calc_u (input) If true, the matrix U is calculated and replaces A. + if false, U is not calculated and A is simply destroyed + + return value: + S an array of length n containing the singular values of A sorted by + decreasing magnitude. only the first min(m,n) elements are non-zero. + + This routine is a python translation of the fortran routine svd.f in the + software library EISPACK (see netlib.org) which itself is based on the + algol procedure svd described in: + - num. math. 14, 403-420(1970) by golub and reinsch. + - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971). + + """ + + m, n = A.rows, A.cols + + S = ctx.zeros(n, 1) + + # work is a temporary array of size n + work = ctx.zeros(n, 1) + lbeta = ctx.zeros(n, 1) + rbeta = ctx.zeros(n, 1) + dwork = ctx.zeros(n, 1) + + g = scale = anorm = 0 + maxits = 3 * ctx.dps + + for i in xrange(n): # householder reduction to bidiagonal form + dwork[i] = scale * g # dwork are the side-diagonal elements + g = s = scale = 0 + if i < m: + for k in xrange(i, m): + scale += ctx.fabs(ctx.re(A[k,i])) + ctx.fabs(ctx.im(A[k,i])) + if scale != 0: + for k in xrange(i, m): + A[k,i] /= scale + ar = ctx.re(A[k,i]) + ai = ctx.im(A[k,i]) + s += ar * ar + ai * ai + f = A[i,i] + g = -ctx.sqrt(s) + if ctx.re(f) < 0: + beta = -g - ctx.conj(f) + g = -g + else: + beta = -g + ctx.conj(f) + beta /= ctx.conj(beta) + beta += 1 + h = 2 * (ctx.re(f) * g - s) + A[i,i] = f - g + beta /= h + lbeta[i] = (beta / scale) / scale + for j in xrange(i+1, n): + s = 0 + for k in xrange(i, m): + s += ctx.conj(A[k,i]) * A[k,j] + f = beta * s + for k in xrange(i, m): + A[k,j] += f * A[k,i] + for k in xrange(i, m): + A[k,i] *= scale + + S[i] = scale * g # S are the diagonal elements + g = s = scale = 0 + + if i < m and i != n - 1: + for k in xrange(i+1, n): + scale += ctx.fabs(ctx.re(A[i,k])) + ctx.fabs(ctx.im(A[i,k])) + if scale: + for k in xrange(i+1, n): + A[i,k] /= scale + ar = ctx.re(A[i,k]) + ai = ctx.im(A[i,k]) + s += ar * ar + ai * ai + f = A[i,i+1] + g = -ctx.sqrt(s) + if ctx.re(f) < 0: + beta = -g - ctx.conj(f) + g = -g + else: + beta = -g + ctx.conj(f) + + beta /= ctx.conj(beta) + beta += 1 + + h = 2 * (ctx.re(f) * g - s) + A[i,i+1] = f - g + + beta /= h + rbeta[i] = (beta / scale) / scale + + for k in xrange(i+1, n): + work[k] = A[i, k] + + for j in xrange(i+1, m): + s = 0 + for k in xrange(i+1, n): + s += ctx.conj(A[i,k]) * A[j,k] + f = s * beta + for k in xrange(i+1,n): + A[j,k] += f * work[k] + + for k in xrange(i+1, n): + A[i,k] *= scale + + anorm = max(anorm,ctx.fabs(S[i]) + ctx.fabs(dwork[i])) + + if not isinstance(V, bool): + for i in xrange(n-2, -1, -1): # accumulation of right hand transformations + V[i+1,i+1] = 1 + + if dwork[i+1] != 0: + f = ctx.conj(rbeta[i]) + for j in xrange(i+1, n): + V[i,j] = A[i,j] * f + for j in xrange(i+1, n): + s = 0 + for k in xrange(i+1, n): + s += ctx.conj(A[i,k]) * V[j,k] + for k in xrange(i+1, n): + V[j,k] += s * V[i,k] + + for j in xrange(i+1,n): + V[j,i] = V[i,j] = 0 + + V[0,0] = 1 + + if m < n : minnm = m + else : minnm = n + + if calc_u: + for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations + g = S[i] + for j in xrange(i+1, n): + A[i,j] = 0 + if g != 0: + g = 1 / g + for j in xrange(i+1, n): + s = 0 + for k in xrange(i+1, m): + s += ctx.conj(A[k,i]) * A[k,j] + f = s * ctx.conj(lbeta[i]) + for k in xrange(i, m): + A[k,j] += f * A[k,i] + for j in xrange(i, m): + A[j,i] *= g + else: + for j in xrange(i, m): + A[j,i] = 0 + A[i,i] += 1 + + for k in xrange(n-1, -1, -1): + # diagonalization of the bidiagonal form: + # loop over singular values, and over allowed itations + + its = 0 + while 1: + its += 1 + flag = True + + for l in xrange(k, -1, -1): + nm = l - 1 + + if ctx.fabs(dwork[l]) + anorm == anorm: + flag = False + break + + if ctx.fabs(S[nm]) + anorm == anorm: + break + + if flag: + c = 0 + s = 1 + for i in xrange(l, k+1): + f = s * dwork[i] + dwork[i] *= c + if ctx.fabs(f) + anorm == anorm: + break + g = S[i] + h = ctx.hypot(f, g) + S[i] = h + h = 1 / h + c = g * h + s = -f * h + + if calc_u: + for j in xrange(m): + y = A[j,nm] + z = A[j,i] + A[j,nm]= y * c + z * s + A[j,i] = z * c - y * s + + z = S[k] + + if l == k: # convergence + if z < 0: # singular value is made nonnegative + S[k] = -z + if not isinstance(V, bool): + for j in xrange(n): + V[k,j] = -V[k,j] + break + + if its >= maxits: + raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its) + + x = S[l] # shift from bottom 2 by 2 minor + nm = k-1 + y = S[nm] + g = dwork[nm] + h = dwork[k] + f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2 * h * y) + g = ctx.hypot(f, 1) + if f >=0: f = (( x - z) *( x + z) + h *((y / (f + g)) - h)) / x + else: f = (( x - z) *( x + z) + h *((y / (f - g)) - h)) / x + + c = s = 1 # next qt transformation + + for j in xrange(l, nm + 1): + g = dwork[j+1] + y = S[j+1] + h = s * g + g = c * g + z = ctx.hypot(f, h) + dwork[j] = z + c = f / z + s = h / z + f = x * c + g * s + g = g * c - x * s + h = y * s + y *= c + if not isinstance(V, bool): + for jj in xrange(n): + x = V[j ,jj] + z = V[j+1,jj] + V[j ,jj]= x * c + z * s + V[j+1,jj ]= z * c - x * s + z = ctx.hypot(f, h) + S[j] = z + if z != 0: # rotation can be arbitray if z=0 + z = 1 / z + c = f * z + s = h * z + f = c * g + s * y + x = c * y - s * g + if calc_u: + for jj in xrange(m): + y = A[jj,j ] + z = A[jj,j+1] + A[jj,j ]= y * c + z * s + A[jj,j+1 ]= z * c - y * s + + dwork[l] = 0 + dwork[k] = f + S[k] = x + + ########################## + + # Sort singular values into decreasing order (bubble-sort) + + for i in xrange(n): + imax = i + s = ctx.fabs(S[i]) # s is the current maximal element + + for j in xrange(i + 1, n): + c = ctx.fabs(S[j]) + if c > s: + s = c + imax = j + + if imax != i: + # swap singular values + + z = S[i] + S[i] = S[imax] + S[imax] = z + + if calc_u: + for j in xrange(m): + z = A[j,i] + A[j,i] = A[j,imax] + A[j,imax] = z + + if not isinstance(V, bool): + for j in xrange(n): + z = V[i,j] + V[i,j] = V[imax,j] + V[imax,j] = z + + return S + +################################################################################################## + +@defun +def svd_r(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two orthogonal matrices U and V are calculated such that + + A = U S V and U' U = 1 and V V' = 1 + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + Here ' denotes the transpose. The diagonal elements of S are the singular + values of A, i.e. the squareroots of the eigenvalues of A' A or A A'. + + input: + A : a real matrix of shape (m, n) + full_matrices : if true, U and V are of shape (m, m) and (n, n). + if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). + compute_uv : if true, U and V are calculated. if false, only S is calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + U : an orthogonal matrix: U' U = 1. if full_matrices is true, U is of + shape (m, m). ortherwise it is of shape (m, min(m, n)). + + S : an array of length min(m, n) containing the singular values of A sorted by + decreasing magnitude. + + V : an orthogonal matrix: V V' = 1. if full_matrices is true, V is of + shape (n, n). ortherwise it is of shape (min(m, n), n). + + return value: + + S if compute_uv is false + (U, S, V) if compute_uv is true + + overview of the matrices: + + full_matrices true: + A : m*n + U : m*m U' U = 1 + S as matrix : m*n + V : n*n V V' = 1 + + full_matrices false: + A : m*n + U : m*min(n,m) U' U = 1 + S as matrix : min(m,n)*min(m,n) + V : min(m,n)*n V V' = 1 + + examples: + + >>> from mpmath import mp + >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]]) + >>> S = mp.svd_r(A, compute_uv = False) + >>> print(S) + [6.0] + [3.0] + [1.0] + + >>> U, S, V = mp.svd_r(A) + >>> print(mp.chop(A - U * mp.diag(S) * V)) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + + see also: svd, svd_c + """ + + m, n = A.rows, A.cols + + if not compute_uv: + if not overwrite_a: + A = A.copy() + S = svd_r_raw(ctx, A, V = False, calc_u = False) + S = S[:min(m,n)] + return S + + if full_matrices and n < m: + V = ctx.zeros(m, m) + A0 = ctx.zeros(m, m) + A0[:,:n] = A + S = svd_r_raw(ctx, A0, V, calc_u = True) + + S = S[:n] + V = V[:n,:n] + + return (A0, S, V) + else: + if not overwrite_a: + A = A.copy() + V = ctx.zeros(n, n) + S = svd_r_raw(ctx, A, V, calc_u = True) + + if n > m: + if full_matrices == False: + V = V[:m,:] + + S = S[:m] + A = A[:,:m] + + return (A, S, V) + +############################## + +@defun +def svd_c(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): + """ + This routine computes the singular value decomposition of a matrix A. + Given A, two unitary matrices U and V are calculated such that + + A = U S V and U' U = 1 and V V' = 1 + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + Here ' denotes the hermitian transpose (i.e. transposition and complex + conjugation). The diagonal elements of S are the singular values of A, + i.e. the squareroots of the eigenvalues of A' A or A A'. + + input: + A : a complex matrix of shape (m, n) + full_matrices : if true, U and V are of shape (m, m) and (n, n). + if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). + compute_uv : if true, U and V are calculated. if false, only S is calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + U : an unitary matrix: U' U = 1. if full_matrices is true, U is of + shape (m, m). ortherwise it is of shape (m, min(m, n)). + + S : an array of length min(m, n) containing the singular values of A sorted by + decreasing magnitude. + + V : an unitary matrix: V V' = 1. if full_matrices is true, V is of + shape (n, n). ortherwise it is of shape (min(m, n), n). + + return value: + + S if compute_uv is false + (U, S, V) if compute_uv is true + + overview of the matrices: + + full_matrices true: + A : m*n + U : m*m U' U = 1 + S as matrix : m*n + V : n*n V V' = 1 + + full_matrices false: + A : m*n + U : m*min(n,m) U' U = 1 + S as matrix : min(m,n)*min(m,n) + V : min(m,n)*n V V' = 1 + + example: + >>> from mpmath import mp + >>> A = mp.matrix([[-2j, -1-3j, -2+2j], [2-2j, -1-3j, 1], [-3+1j,-2j,0]]) + >>> S = mp.svd_c(A, compute_uv = False) + >>> print(mp.chop(S - mp.matrix([mp.sqrt(34), mp.sqrt(15), mp.sqrt(6)]))) + [0.0] + [0.0] + [0.0] + + >>> U, S, V = mp.svd_c(A) + >>> print(mp.chop(A - U * mp.diag(S) * V)) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + see also: svd, svd_r + """ + + m, n = A.rows, A.cols + + if not compute_uv: + if not overwrite_a: + A = A.copy() + S = svd_c_raw(ctx, A, V = False, calc_u = False) + S = S[:min(m,n)] + return S + + if full_matrices and n < m: + V = ctx.zeros(m, m) + A0 = ctx.zeros(m, m) + A0[:,:n] = A + S = svd_c_raw(ctx, A0, V, calc_u = True) + + S = S[:n] + V = V[:n,:n] + + return (A0, S, V) + else: + if not overwrite_a: + A = A.copy() + V = ctx.zeros(n, n) + S = svd_c_raw(ctx, A, V, calc_u = True) + + if n > m: + if full_matrices == False: + V = V[:m,:] + + S = S[:m] + A = A[:,:m] + + return (A, S, V) + +@defun +def svd(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False): + """ + "svd" is a unified interface for "svd_r" and "svd_c". Depending on + whether A is real or complex the appropriate function is called. + + This routine computes the singular value decomposition of a matrix A. + Given A, two orthogonal (A real) or unitary (A complex) matrices U and V + are calculated such that + + A = U S V and U' U = 1 and V V' = 1 + + where S is a suitable shaped matrix whose off-diagonal elements are zero. + Here ' denotes the hermitian transpose (i.e. transposition and complex + conjugation). The diagonal elements of S are the singular values of A, + i.e. the squareroots of the eigenvalues of A' A or A A'. + + input: + A : a real or complex matrix of shape (m, n) + full_matrices : if true, U and V are of shape (m, m) and (n, n). + if false, U and V are of shape (m, min(m, n)) and (min(m, n), n). + compute_uv : if true, U and V are calculated. if false, only S is calculated. + overwrite_a : if true, allows modification of A which may improve + performance. if false, A is not modified. + + output: + U : an orthogonal or unitary matrix: U' U = 1. if full_matrices is true, U is of + shape (m, m). ortherwise it is of shape (m, min(m, n)). + + S : an array of length min(m, n) containing the singular values of A sorted by + decreasing magnitude. + + V : an orthogonal or unitary matrix: V V' = 1. if full_matrices is true, V is of + shape (n, n). ortherwise it is of shape (min(m, n), n). + + return value: + + S if compute_uv is false + (U, S, V) if compute_uv is true + + overview of the matrices: + + full_matrices true: + A : m*n + U : m*m U' U = 1 + S as matrix : m*n + V : n*n V V' = 1 + + full_matrices false: + A : m*n + U : m*min(n,m) U' U = 1 + S as matrix : min(m,n)*min(m,n) + V : min(m,n)*n V V' = 1 + + examples: + + >>> from mpmath import mp + >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]]) + >>> S = mp.svd(A, compute_uv = False) + >>> print(S) + [6.0] + [3.0] + [1.0] + + >>> U, S, V = mp.svd(A) + >>> print(mp.chop(A - U * mp.diag(S) * V)) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + see also: svd_r, svd_c + """ + + iscomplex = any(type(x) is ctx.mpc for x in A) + + if iscomplex: + return ctx.svd_c(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a) + else: + return ctx.svd_r(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a) diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/linalg.py b/.venv/lib/python3.11/site-packages/mpmath/matrices/linalg.py new file mode 100644 index 0000000000000000000000000000000000000000..e2fe643e809822e3d05a52b73c965edb622f9af9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/matrices/linalg.py @@ -0,0 +1,790 @@ +""" +Linear algebra +-------------- + +Linear equations +................ + +Basic linear algebra is implemented; you can for example solve the linear +equation system:: + + x + 2*y = -10 + 3*x + 4*y = 10 + +using ``lu_solve``:: + + >>> from mpmath import * + >>> mp.pretty = False + >>> A = matrix([[1, 2], [3, 4]]) + >>> b = matrix([-10, 10]) + >>> x = lu_solve(A, b) + >>> x + matrix( + [['30.0'], + ['-20.0']]) + +If you don't trust the result, use ``residual`` to calculate the residual ||A*x-b||:: + + >>> residual(A, x, b) + matrix( + [['3.46944695195361e-18'], + ['3.46944695195361e-18']]) + >>> str(eps) + '2.22044604925031e-16' + +As you can see, the solution is quite accurate. The error is caused by the +inaccuracy of the internal floating point arithmetic. Though, it's even smaller +than the current machine epsilon, which basically means you can trust the +result. + +If you need more speed, use NumPy, or ``fp.lu_solve`` for a floating-point computation. + + >>> fp.lu_solve(A, b) # doctest: +ELLIPSIS + matrix(...) + +``lu_solve`` accepts overdetermined systems. It is usually not possible to solve +such systems, so the residual is minimized instead. Internally this is done +using Cholesky decomposition to compute a least squares approximation. This means +that that ``lu_solve`` will square the errors. If you can't afford this, use +``qr_solve`` instead. It is twice as slow but more accurate, and it calculates +the residual automatically. + + +Matrix factorization +.................... + +The function ``lu`` computes an explicit LU factorization of a matrix:: + + >>> P, L, U = lu(matrix([[0,2,3],[4,5,6],[7,8,9]])) + >>> print(P) + [0.0 0.0 1.0] + [1.0 0.0 0.0] + [0.0 1.0 0.0] + >>> print(L) + [ 1.0 0.0 0.0] + [ 0.0 1.0 0.0] + [0.571428571428571 0.214285714285714 1.0] + >>> print(U) + [7.0 8.0 9.0] + [0.0 2.0 3.0] + [0.0 0.0 0.214285714285714] + >>> print(P.T*L*U) + [0.0 2.0 3.0] + [4.0 5.0 6.0] + [7.0 8.0 9.0] + +Interval matrices +----------------- + +Matrices may contain interval elements. This allows one to perform +basic linear algebra operations such as matrix multiplication +and equation solving with rigorous error bounds:: + + >>> a = iv.matrix([['0.1','0.3','1.0'], + ... ['7.1','5.5','4.8'], + ... ['3.2','4.4','5.6']]) + >>> + >>> b = iv.matrix(['4','0.6','0.5']) + >>> c = iv.lu_solve(a, b) + >>> print(c) + [ [5.2582327113062568605927528666, 5.25823271130625686059275702219]] + [[-13.1550493962678375411635581388, -13.1550493962678375411635540152]] + [ [7.42069154774972557628979076189, 7.42069154774972557628979190734]] + >>> print(a*c) + [ [3.99999999999999999999999844904, 4.00000000000000000000000155096]] + [[0.599999999999999999999968898009, 0.600000000000000000000031763736]] + [[0.499999999999999999999979320485, 0.500000000000000000000020679515]] +""" + +# TODO: +# *implement high-level qr() +# *test unitvector +# *iterative solving + +from copy import copy + +from ..libmp.backend import xrange + +class LinearAlgebraMethods(object): + + def LU_decomp(ctx, A, overwrite=False, use_cache=True): + """ + LU-factorization of a n*n matrix using the Gauss algorithm. + Returns L and U in one matrix and the pivot indices. + + Use overwrite to specify whether A will be overwritten with L and U. + """ + if not A.rows == A.cols: + raise ValueError('need n*n matrix') + # get from cache if possible + if use_cache and isinstance(A, ctx.matrix) and A._LU: + return A._LU + if not overwrite: + orig = A + A = A.copy() + tol = ctx.absmin(ctx.mnorm(A,1) * ctx.eps) # each pivot element has to be bigger + n = A.rows + p = [None]*(n - 1) + for j in xrange(n - 1): + # pivoting, choose max(abs(reciprocal row sum)*abs(pivot element)) + biggest = 0 + for k in xrange(j, n): + s = ctx.fsum([ctx.absmin(A[k,l]) for l in xrange(j, n)]) + if ctx.absmin(s) <= tol: + raise ZeroDivisionError('matrix is numerically singular') + current = 1/s * ctx.absmin(A[k,j]) + if current > biggest: # TODO: what if equal? + biggest = current + p[j] = k + # swap rows according to p + ctx.swap_row(A, j, p[j]) + if ctx.absmin(A[j,j]) <= tol: + raise ZeroDivisionError('matrix is numerically singular') + # calculate elimination factors and add rows + for i in xrange(j + 1, n): + A[i,j] /= A[j,j] + for k in xrange(j + 1, n): + A[i,k] -= A[i,j]*A[j,k] + if ctx.absmin(A[n - 1,n - 1]) <= tol: + raise ZeroDivisionError('matrix is numerically singular') + # cache decomposition + if not overwrite and isinstance(orig, ctx.matrix): + orig._LU = (A, p) + return A, p + + def L_solve(ctx, L, b, p=None): + """ + Solve the lower part of a LU factorized matrix for y. + """ + if L.rows != L.cols: + raise RuntimeError("need n*n matrix") + n = L.rows + if len(b) != n: + raise ValueError("Value should be equal to n") + b = copy(b) + if p: # swap b according to p + for k in xrange(0, len(p)): + ctx.swap_row(b, k, p[k]) + # solve + for i in xrange(1, n): + for j in xrange(i): + b[i] -= L[i,j] * b[j] + return b + + def U_solve(ctx, U, y): + """ + Solve the upper part of a LU factorized matrix for x. + """ + if U.rows != U.cols: + raise RuntimeError("need n*n matrix") + n = U.rows + if len(y) != n: + raise ValueError("Value should be equal to n") + x = copy(y) + for i in xrange(n - 1, -1, -1): + for j in xrange(i + 1, n): + x[i] -= U[i,j] * x[j] + x[i] /= U[i,i] + return x + + def lu_solve(ctx, A, b, **kwargs): + """ + Ax = b => x + + Solve a determined or overdetermined linear equations system. + Fast LU decomposition is used, which is less accurate than QR decomposition + (especially for overdetermined systems), but it's twice as efficient. + Use qr_solve if you want more precision or have to solve a very ill- + conditioned system. + + If you specify real=True, it does not check for overdeterminded complex + systems. + """ + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A nor b + A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() + if A.rows < A.cols: + raise ValueError('cannot solve underdetermined system') + if A.rows > A.cols: + # use least-squares method if overdetermined + # (this increases errors) + AH = A.H + A = AH * A + b = AH * b + if (kwargs.get('real', False) or + not sum(type(i) is ctx.mpc for i in A)): + # TODO: necessary to check also b? + x = ctx.cholesky_solve(A, b) + else: + x = ctx.lu_solve(A, b) + else: + # LU factorization + A, p = ctx.LU_decomp(A) + b = ctx.L_solve(A, b, p) + x = ctx.U_solve(A, b) + finally: + ctx.prec = prec + return x + + def improve_solution(ctx, A, x, b, maxsteps=1): + """ + Improve a solution to a linear equation system iteratively. + + This re-uses the LU decomposition and is thus cheap. + Usually 3 up to 4 iterations are giving the maximal improvement. + """ + if A.rows != A.cols: + raise RuntimeError("need n*n matrix") # TODO: really? + for _ in xrange(maxsteps): + r = ctx.residual(A, x, b) + if ctx.norm(r, 2) < 10*ctx.eps: + break + # this uses cached LU decomposition and is thus cheap + dx = ctx.lu_solve(A, -r) + x += dx + return x + + def lu(ctx, A): + """ + A -> P, L, U + + LU factorisation of a square matrix A. L is the lower, U the upper part. + P is the permutation matrix indicating the row swaps. + + P*A = L*U + + If you need efficiency, use the low-level method LU_decomp instead, it's + much more memory efficient. + """ + # get factorization + A, p = ctx.LU_decomp(A) + n = A.rows + L = ctx.matrix(n) + U = ctx.matrix(n) + for i in xrange(n): + for j in xrange(n): + if i > j: + L[i,j] = A[i,j] + elif i == j: + L[i,j] = 1 + U[i,j] = A[i,j] + else: + U[i,j] = A[i,j] + # calculate permutation matrix + P = ctx.eye(n) + for k in xrange(len(p)): + ctx.swap_row(P, k, p[k]) + return P, L, U + + def unitvector(ctx, n, i): + """ + Return the i-th n-dimensional unit vector. + """ + assert 0 < i <= n, 'this unit vector does not exist' + return [ctx.zero]*(i-1) + [ctx.one] + [ctx.zero]*(n-i) + + def inverse(ctx, A, **kwargs): + """ + Calculate the inverse of a matrix. + + If you want to solve an equation system Ax = b, it's recommended to use + solve(A, b) instead, it's about 3 times more efficient. + """ + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A + A = ctx.matrix(A, **kwargs).copy() + n = A.rows + # get LU factorisation + A, p = ctx.LU_decomp(A) + cols = [] + # calculate unit vectors and solve corresponding system to get columns + for i in xrange(1, n + 1): + e = ctx.unitvector(n, i) + y = ctx.L_solve(A, e, p) + cols.append(ctx.U_solve(A, y)) + # convert columns to matrix + inv = [] + for i in xrange(n): + row = [] + for j in xrange(n): + row.append(cols[j][i]) + inv.append(row) + result = ctx.matrix(inv, **kwargs) + finally: + ctx.prec = prec + return result + + def householder(ctx, A): + """ + (A|b) -> H, p, x, res + + (A|b) is the coefficient matrix with left hand side of an optionally + overdetermined linear equation system. + H and p contain all information about the transformation matrices. + x is the solution, res the residual. + """ + if not isinstance(A, ctx.matrix): + raise TypeError("A should be a type of ctx.matrix") + m = A.rows + n = A.cols + if m < n - 1: + raise RuntimeError("Columns should not be less than rows") + # calculate Householder matrix + p = [] + for j in xrange(0, n - 1): + s = ctx.fsum(abs(A[i,j])**2 for i in xrange(j, m)) + if not abs(s) > ctx.eps: + raise ValueError('matrix is numerically singular') + p.append(-ctx.sign(ctx.re(A[j,j])) * ctx.sqrt(s)) + kappa = ctx.one / (s - p[j] * A[j,j]) + A[j,j] -= p[j] + for k in xrange(j+1, n): + y = ctx.fsum(ctx.conj(A[i,j]) * A[i,k] for i in xrange(j, m)) * kappa + for i in xrange(j, m): + A[i,k] -= A[i,j] * y + # solve Rx = c1 + x = [A[i,n - 1] for i in xrange(n - 1)] + for i in xrange(n - 2, -1, -1): + x[i] -= ctx.fsum(A[i,j] * x[j] for j in xrange(i + 1, n - 1)) + x[i] /= p[i] + # calculate residual + if not m == n - 1: + r = [A[m-1-i, n-1] for i in xrange(m - n + 1)] + else: + # determined system, residual should be 0 + r = [0]*m # maybe a bad idea, changing r[i] will change all elements + return A, p, x, r + + #def qr(ctx, A): + # """ + # A -> Q, R + # + # QR factorisation of a square matrix A using Householder decomposition. + # Q is orthogonal, this leads to very few numerical errors. + # + # A = Q*R + # """ + # H, p, x, res = householder(A) + # TODO: implement this + + def residual(ctx, A, x, b, **kwargs): + """ + Calculate the residual of a solution to a linear equation system. + + r = A*x - b for A*x = b + """ + oldprec = ctx.prec + try: + ctx.prec *= 2 + A, x, b = ctx.matrix(A, **kwargs), ctx.matrix(x, **kwargs), ctx.matrix(b, **kwargs) + return A*x - b + finally: + ctx.prec = oldprec + + def qr_solve(ctx, A, b, norm=None, **kwargs): + """ + Ax = b => x, ||Ax - b|| + + Solve a determined or overdetermined linear equations system and + calculate the norm of the residual (error). + QR decomposition using Householder factorization is applied, which gives very + accurate results even for ill-conditioned matrices. qr_solve is twice as + efficient. + """ + if norm is None: + norm = ctx.norm + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A nor b + A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() + if A.rows < A.cols: + raise ValueError('cannot solve underdetermined system') + H, p, x, r = ctx.householder(ctx.extend(A, b)) + res = ctx.norm(r) + # calculate residual "manually" for determined systems + if res == 0: + res = ctx.norm(ctx.residual(A, x, b)) + return ctx.matrix(x, **kwargs), res + finally: + ctx.prec = prec + + def cholesky(ctx, A, tol=None): + r""" + Cholesky decomposition of a symmetric positive-definite matrix `A`. + Returns a lower triangular matrix `L` such that `A = L \times L^T`. + More generally, for a complex Hermitian positive-definite matrix, + a Cholesky decomposition satisfying `A = L \times L^H` is returned. + + The Cholesky decomposition can be used to solve linear equation + systems twice as efficiently as LU decomposition, or to + test whether `A` is positive-definite. + + The optional parameter ``tol`` determines the tolerance for + verifying positive-definiteness. + + **Examples** + + Cholesky decomposition of a positive-definite symmetric matrix:: + + >>> from mpmath import * + >>> mp.dps = 25; mp.pretty = True + >>> A = eye(3) + hilbert(3) + >>> nprint(A) + [ 2.0 0.5 0.333333] + [ 0.5 1.33333 0.25] + [0.333333 0.25 1.2] + >>> L = cholesky(A) + >>> nprint(L) + [ 1.41421 0.0 0.0] + [0.353553 1.09924 0.0] + [0.235702 0.15162 1.05899] + >>> chop(A - L*L.T) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + Cholesky decomposition of a Hermitian matrix:: + + >>> A = eye(3) + matrix([[0,0.25j,-0.5j],[-0.25j,0,0],[0.5j,0,0]]) + >>> L = cholesky(A) + >>> nprint(L) + [ 1.0 0.0 0.0] + [(0.0 - 0.25j) (0.968246 + 0.0j) 0.0] + [ (0.0 + 0.5j) (0.129099 + 0.0j) (0.856349 + 0.0j)] + >>> chop(A - L*L.H) + [0.0 0.0 0.0] + [0.0 0.0 0.0] + [0.0 0.0 0.0] + + Attempted Cholesky decomposition of a matrix that is not positive + definite:: + + >>> A = -eye(3) + hilbert(3) + >>> L = cholesky(A) + Traceback (most recent call last): + ... + ValueError: matrix is not positive-definite + + **References** + + 1. [Wikipedia]_ http://en.wikipedia.org/wiki/Cholesky_decomposition + + """ + if not isinstance(A, ctx.matrix): + raise RuntimeError("A should be a type of ctx.matrix") + if not A.rows == A.cols: + raise ValueError('need n*n matrix') + if tol is None: + tol = +ctx.eps + n = A.rows + L = ctx.matrix(n) + for j in xrange(n): + c = ctx.re(A[j,j]) + if abs(c-A[j,j]) > tol: + raise ValueError('matrix is not Hermitian') + s = c - ctx.fsum((L[j,k] for k in xrange(j)), + absolute=True, squared=True) + if s < tol: + raise ValueError('matrix is not positive-definite') + L[j,j] = ctx.sqrt(s) + for i in xrange(j, n): + it1 = (L[i,k] for k in xrange(j)) + it2 = (L[j,k] for k in xrange(j)) + t = ctx.fdot(it1, it2, conjugate=True) + L[i,j] = (A[i,j] - t) / L[j,j] + return L + + def cholesky_solve(ctx, A, b, **kwargs): + """ + Ax = b => x + + Solve a symmetric positive-definite linear equation system. + This is twice as efficient as lu_solve. + + Typical use cases: + * A.T*A + * Hessian matrix + * differential equations + """ + prec = ctx.prec + try: + ctx.prec += 10 + # do not overwrite A nor b + A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy() + if A.rows != A.cols: + raise ValueError('can only solve determined system') + # Cholesky factorization + L = ctx.cholesky(A) + # solve + n = L.rows + if len(b) != n: + raise ValueError("Value should be equal to n") + for i in xrange(n): + b[i] -= ctx.fsum(L[i,j] * b[j] for j in xrange(i)) + b[i] /= L[i,i] + x = ctx.U_solve(L.T, b) + return x + finally: + ctx.prec = prec + + def det(ctx, A): + """ + Calculate the determinant of a matrix. + """ + prec = ctx.prec + try: + # do not overwrite A + A = ctx.matrix(A).copy() + # use LU factorization to calculate determinant + try: + R, p = ctx.LU_decomp(A) + except ZeroDivisionError: + return 0 + z = 1 + for i, e in enumerate(p): + if i != e: + z *= -1 + for i in xrange(A.rows): + z *= R[i,i] + return z + finally: + ctx.prec = prec + + def cond(ctx, A, norm=None): + """ + Calculate the condition number of a matrix using a specified matrix norm. + + The condition number estimates the sensitivity of a matrix to errors. + Example: small input errors for ill-conditioned coefficient matrices + alter the solution of the system dramatically. + + For ill-conditioned matrices it's recommended to use qr_solve() instead + of lu_solve(). This does not help with input errors however, it just avoids + to add additional errors. + + Definition: cond(A) = ||A|| * ||A**-1|| + """ + if norm is None: + norm = lambda x: ctx.mnorm(x,1) + return norm(A) * norm(ctx.inverse(A)) + + def lu_solve_mat(ctx, a, b): + """Solve a * x = b where a and b are matrices.""" + r = ctx.matrix(a.rows, b.cols) + for i in range(b.cols): + c = ctx.lu_solve(a, b.column(i)) + for j in range(len(c)): + r[j, i] = c[j] + return r + + def qr(ctx, A, mode = 'full', edps = 10): + """ + Compute a QR factorization $A = QR$ where + A is an m x n matrix of real or complex numbers where m >= n + + mode has following meanings: + (1) mode = 'raw' returns two matrixes (A, tau) in the + internal format used by LAPACK + (2) mode = 'skinny' returns the leading n columns of Q + and n rows of R + (3) Any other value returns the leading m columns of Q + and m rows of R + + edps is the increase in mp precision used for calculations + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15 + >>> mp.pretty = True + >>> A = matrix([[1, 2], [3, 4], [1, 1]]) + >>> Q, R = qr(A) + >>> Q + [-0.301511344577764 0.861640436855329 0.408248290463863] + [-0.904534033733291 -0.123091490979333 -0.408248290463863] + [-0.301511344577764 -0.492365963917331 0.816496580927726] + >>> R + [-3.3166247903554 -4.52267016866645] + [ 0.0 0.738548945875996] + [ 0.0 0.0] + >>> Q * R + [1.0 2.0] + [3.0 4.0] + [1.0 1.0] + >>> chop(Q.T * Q) + [1.0 0.0 0.0] + [0.0 1.0 0.0] + [0.0 0.0 1.0] + >>> B = matrix([[1+0j, 2-3j], [3+j, 4+5j]]) + >>> Q, R = qr(B) + >>> nprint(Q) + [ (-0.301511 + 0.0j) (0.0695795 - 0.95092j)] + [(-0.904534 - 0.301511j) (-0.115966 + 0.278318j)] + >>> nprint(R) + [(-3.31662 + 0.0j) (-5.72872 - 2.41209j)] + [ 0.0 (3.91965 + 0.0j)] + >>> Q * R + [(1.0 + 0.0j) (2.0 - 3.0j)] + [(3.0 + 1.0j) (4.0 + 5.0j)] + >>> chop(Q.T * Q.conjugate()) + [1.0 0.0] + [0.0 1.0] + + """ + + # check values before continuing + assert isinstance(A, ctx.matrix) + m = A.rows + n = A.cols + assert n >= 0 + assert m >= n + assert edps >= 0 + + # check for complex data type + cmplx = any(type(x) is ctx.mpc for x in A) + + # temporarily increase the precision and initialize + with ctx.extradps(edps): + tau = ctx.matrix(n,1) + A = A.copy() + + # --------------- + # FACTOR MATRIX A + # --------------- + if cmplx: + one = ctx.mpc('1.0', '0.0') + zero = ctx.mpc('0.0', '0.0') + rzero = ctx.mpf('0.0') + + # main loop to factor A (complex) + for j in xrange(0, n): + alpha = A[j,j] + alphr = ctx.re(alpha) + alphi = ctx.im(alpha) + + if (m-j) >= 2: + xnorm = ctx.fsum( A[i,j]*ctx.conj(A[i,j]) for i in xrange(j+1, m) ) + xnorm = ctx.re( ctx.sqrt(xnorm) ) + else: + xnorm = rzero + + if (xnorm == rzero) and (alphi == rzero): + tau[j] = zero + continue + + if alphr < rzero: + beta = ctx.sqrt(alphr**2 + alphi**2 + xnorm**2) + else: + beta = -ctx.sqrt(alphr**2 + alphi**2 + xnorm**2) + + tau[j] = ctx.mpc( (beta - alphr) / beta, -alphi / beta ) + t = -ctx.conj(tau[j]) + za = one / (alpha - beta) + + for i in xrange(j+1, m): + A[i,j] *= za + + A[j,j] = one + for k in xrange(j+1, n): + y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j, m)) + temp = t * ctx.conj(y) + for i in xrange(j, m): + A[i,k] += A[i,j] * temp + + A[j,j] = ctx.mpc(beta, '0.0') + else: + one = ctx.mpf('1.0') + zero = ctx.mpf('0.0') + + # main loop to factor A (real) + for j in xrange(0, n): + alpha = A[j,j] + + if (m-j) > 2: + xnorm = ctx.fsum( (A[i,j])**2 for i in xrange(j+1, m) ) + xnorm = ctx.sqrt(xnorm) + elif (m-j) == 2: + xnorm = abs( A[m-1,j] ) + else: + xnorm = zero + + if xnorm == zero: + tau[j] = zero + continue + + if alpha < zero: + beta = ctx.sqrt(alpha**2 + xnorm**2) + else: + beta = -ctx.sqrt(alpha**2 + xnorm**2) + + tau[j] = (beta - alpha) / beta + t = -tau[j] + da = one / (alpha - beta) + + for i in xrange(j+1, m): + A[i,j] *= da + + A[j,j] = one + for k in xrange(j+1, n): + y = ctx.fsum( A[i,j] * A[i,k] for i in xrange(j, m) ) + temp = t * y + for i in xrange(j,m): + A[i,k] += A[i,j] * temp + + A[j,j] = beta + + # return factorization in same internal format as LAPACK + if (mode == 'raw') or (mode == 'RAW'): + return A, tau + + # ---------------------------------- + # FORM Q USING BACKWARD ACCUMULATION + # ---------------------------------- + + # form R before the values are overwritten + R = A.copy() + for j in xrange(0, n): + for i in xrange(j+1, m): + R[i,j] = zero + + # set the value of p (number of columns of Q to return) + p = m + if (mode == 'skinny') or (mode == 'SKINNY'): + p = n + + # add columns to A if needed and initialize + A.cols += (p-n) + for j in xrange(0, p): + A[j,j] = one + for i in xrange(0, j): + A[i,j] = zero + + # main loop to form Q + for j in xrange(n-1, -1, -1): + t = -tau[j] + A[j,j] += t + + for k in xrange(j+1, p): + if cmplx: + y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j+1, m)) + temp = t * ctx.conj(y) + else: + y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j+1, m)) + temp = t * y + A[j,k] = temp + for i in xrange(j+1, m): + A[i,k] += A[i,j] * temp + + for i in xrange(j+1, m): + A[i, j] *= t + + return A, R[0:p,0:n] + + # ------------------ + # END OF FUNCTION QR + # ------------------ diff --git a/.venv/lib/python3.11/site-packages/mpmath/matrices/matrices.py b/.venv/lib/python3.11/site-packages/mpmath/matrices/matrices.py new file mode 100644 index 0000000000000000000000000000000000000000..a97d5a9ca7e173195386dc7cb60860a826ab6a97 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/mpmath/matrices/matrices.py @@ -0,0 +1,1005 @@ +from ..libmp.backend import xrange +import warnings + +# TODO: interpret list as vectors (for multiplication) + +rowsep = '\n' +colsep = ' ' + +class _matrix(object): + """ + Numerical matrix. + + Specify the dimensions or the data as a nested list. + Elements default to zero. + Use a flat list to create a column vector easily. + + The datatype of the context (mpf for mp, mpi for iv, and float for fp) is used to store the data. + + Creating matrices + ----------------- + + Matrices in mpmath are implemented using dictionaries. Only non-zero values + are stored, so it is cheap to represent sparse matrices. + + The most basic way to create one is to use the ``matrix`` class directly. + You can create an empty matrix specifying the dimensions: + + >>> from mpmath import * + >>> mp.dps = 15 + >>> matrix(2) + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + >>> matrix(2, 3) + matrix( + [['0.0', '0.0', '0.0'], + ['0.0', '0.0', '0.0']]) + + Calling ``matrix`` with one dimension will create a square matrix. + + To access the dimensions of a matrix, use the ``rows`` or ``cols`` keyword: + + >>> A = matrix(3, 2) + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0'], + ['0.0', '0.0']]) + >>> A.rows + 3 + >>> A.cols + 2 + + You can also change the dimension of an existing matrix. This will set the + new elements to 0. If the new dimension is smaller than before, the + concerning elements are discarded: + + >>> A.rows = 2 + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + + Internally ``mpmathify`` is used every time an element is set. This + is done using the syntax A[row,column], counting from 0: + + >>> A = matrix(2) + >>> A[1,1] = 1 + 1j + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', mpc(real='1.0', imag='1.0')]]) + + A more comfortable way to create a matrix lets you use nested lists: + + >>> matrix([[1, 2], [3, 4]]) + matrix( + [['1.0', '2.0'], + ['3.0', '4.0']]) + + Convenient advanced functions are available for creating various standard + matrices, see ``zeros``, ``ones``, ``diag``, ``eye``, ``randmatrix`` and + ``hilbert``. + + Vectors + ....... + + Vectors may also be represented by the ``matrix`` class (with rows = 1 or cols = 1). + For vectors there are some things which make life easier. A column vector can + be created using a flat list, a row vectors using an almost flat nested list:: + + >>> matrix([1, 2, 3]) + matrix( + [['1.0'], + ['2.0'], + ['3.0']]) + >>> matrix([[1, 2, 3]]) + matrix( + [['1.0', '2.0', '3.0']]) + + Optionally vectors can be accessed like lists, using only a single index:: + + >>> x = matrix([1, 2, 3]) + >>> x[1] + mpf('2.0') + >>> x[1,0] + mpf('2.0') + + Other + ..... + + Like you probably expected, matrices can be printed:: + + >>> print randmatrix(3) # doctest:+SKIP + [ 0.782963853573023 0.802057689719883 0.427895717335467] + [0.0541876859348597 0.708243266653103 0.615134039977379] + [ 0.856151514955773 0.544759264818486 0.686210904770947] + + Use ``nstr`` or ``nprint`` to specify the number of digits to print:: + + >>> nprint(randmatrix(5), 3) # doctest:+SKIP + [2.07e-1 1.66e-1 5.06e-1 1.89e-1 8.29e-1] + [6.62e-1 6.55e-1 4.47e-1 4.82e-1 2.06e-2] + [4.33e-1 7.75e-1 6.93e-2 2.86e-1 5.71e-1] + [1.01e-1 2.53e-1 6.13e-1 3.32e-1 2.59e-1] + [1.56e-1 7.27e-2 6.05e-1 6.67e-2 2.79e-1] + + As matrices are mutable, you will need to copy them sometimes:: + + >>> A = matrix(2) + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + >>> B = A.copy() + >>> B[0,0] = 1 + >>> B + matrix( + [['1.0', '0.0'], + ['0.0', '0.0']]) + >>> A + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + + Finally, it is possible to convert a matrix to a nested list. This is very useful, + as most Python libraries involving matrices or arrays (namely NumPy or SymPy) + support this format:: + + >>> B.tolist() + [[mpf('1.0'), mpf('0.0')], [mpf('0.0'), mpf('0.0')]] + + + Matrix operations + ----------------- + + You can add and subtract matrices of compatible dimensions:: + + >>> A = matrix([[1, 2], [3, 4]]) + >>> B = matrix([[-2, 4], [5, 9]]) + >>> A + B + matrix( + [['-1.0', '6.0'], + ['8.0', '13.0']]) + >>> A - B + matrix( + [['3.0', '-2.0'], + ['-2.0', '-5.0']]) + >>> A + ones(3) # doctest:+ELLIPSIS + Traceback (most recent call last): + ... + ValueError: incompatible dimensions for addition + + It is possible to multiply or add matrices and scalars. In the latter case the + operation will be done element-wise:: + + >>> A * 2 + matrix( + [['2.0', '4.0'], + ['6.0', '8.0']]) + >>> A / 4 + matrix( + [['0.25', '0.5'], + ['0.75', '1.0']]) + >>> A - 1 + matrix( + [['0.0', '1.0'], + ['2.0', '3.0']]) + + Of course you can perform matrix multiplication, if the dimensions are + compatible, using ``@`` (for Python >= 3.5) or ``*``. For clarity, ``@`` is + recommended (`PEP 465 `), because + the meaning of ``*`` is different in many other Python libraries such as NumPy. + + >>> A @ B # doctest:+SKIP + matrix( + [['8.0', '22.0'], + ['14.0', '48.0']]) + >>> A * B # same as A @ B + matrix( + [['8.0', '22.0'], + ['14.0', '48.0']]) + >>> matrix([[1, 2, 3]]) * matrix([[-6], [7], [-2]]) + matrix( + [['2.0']]) + + .. + COMMENT: TODO: the above "doctest:+SKIP" may be removed as soon as we + have dropped support for Python 3.5 and below. + + You can raise powers of square matrices:: + + >>> A**2 + matrix( + [['7.0', '10.0'], + ['15.0', '22.0']]) + + Negative powers will calculate the inverse:: + + >>> A**-1 + matrix( + [['-2.0', '1.0'], + ['1.5', '-0.5']]) + >>> A * A**-1 + matrix( + [['1.0', '1.0842021724855e-19'], + ['-2.16840434497101e-19', '1.0']]) + + + + Matrix transposition is straightforward:: + + >>> A = ones(2, 3) + >>> A + matrix( + [['1.0', '1.0', '1.0'], + ['1.0', '1.0', '1.0']]) + >>> A.T + matrix( + [['1.0', '1.0'], + ['1.0', '1.0'], + ['1.0', '1.0']]) + + Norms + ..... + + Sometimes you need to know how "large" a matrix or vector is. Due to their + multidimensional nature it's not possible to compare them, but there are + several functions to map a matrix or a vector to a positive real number, the + so called norms. + + For vectors the p-norm is intended, usually the 1-, the 2- and the oo-norm are + used. + + >>> x = matrix([-10, 2, 100]) + >>> norm(x, 1) + mpf('112.0') + >>> norm(x, 2) + mpf('100.5186549850325') + >>> norm(x, inf) + mpf('100.0') + + Please note that the 2-norm is the most used one, though it is more expensive + to calculate than the 1- or oo-norm. + + It is possible to generalize some vector norms to matrix norm:: + + >>> A = matrix([[1, -1000], [100, 50]]) + >>> mnorm(A, 1) + mpf('1050.0') + >>> mnorm(A, inf) + mpf('1001.0') + >>> mnorm(A, 'F') + mpf('1006.2310867787777') + + The last norm (the "Frobenius-norm") is an approximation for the 2-norm, which + is hard to calculate and not available. The Frobenius-norm lacks some + mathematical properties you might expect from a norm. + """ + + def __init__(self, *args, **kwargs): + self.__data = {} + # LU decompostion cache, this is useful when solving the same system + # multiple times, when calculating the inverse and when calculating the + # determinant + self._LU = None + if "force_type" in kwargs: + warnings.warn("The force_type argument was removed, it did not work" + " properly anyway. If you want to force floating-point or" + " interval computations, use the respective methods from `fp`" + " or `mp` instead, e.g., `fp.matrix()` or `iv.matrix()`." + " If you want to truncate values to integer, use .apply(int) instead.") + if isinstance(args[0], (list, tuple)): + if isinstance(args[0][0], (list, tuple)): + # interpret nested list as matrix + A = args[0] + self.__rows = len(A) + self.__cols = len(A[0]) + for i, row in enumerate(A): + for j, a in enumerate(row): + # note: this will call __setitem__ which will call self.ctx.convert() to convert the datatype. + self[i, j] = a + else: + # interpret list as row vector + v = args[0] + self.__rows = len(v) + self.__cols = 1 + for i, e in enumerate(v): + self[i, 0] = e + elif isinstance(args[0], int): + # create empty matrix of given dimensions + if len(args) == 1: + self.__rows = self.__cols = args[0] + else: + if not isinstance(args[1], int): + raise TypeError("expected int") + self.__rows = args[0] + self.__cols = args[1] + elif isinstance(args[0], _matrix): + A = args[0] + self.__rows = A._matrix__rows + self.__cols = A._matrix__cols + for i in xrange(A.__rows): + for j in xrange(A.__cols): + self[i, j] = A[i, j] + elif hasattr(args[0], 'tolist'): + A = self.ctx.matrix(args[0].tolist()) + self.__data = A._matrix__data + self.__rows = A._matrix__rows + self.__cols = A._matrix__cols + else: + raise TypeError('could not interpret given arguments') + + def apply(self, f): + """ + Return a copy of self with the function `f` applied elementwise. + """ + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] = f(self[i,j]) + return new + + def __nstr__(self, n=None, **kwargs): + # Build table of string representations of the elements + res = [] + # Track per-column max lengths for pretty alignment + maxlen = [0] * self.cols + for i in range(self.rows): + res.append([]) + for j in range(self.cols): + if n: + string = self.ctx.nstr(self[i,j], n, **kwargs) + else: + string = str(self[i,j]) + res[-1].append(string) + maxlen[j] = max(len(string), maxlen[j]) + # Patch strings together + for i, row in enumerate(res): + for j, elem in enumerate(row): + # Pad each element up to maxlen so the columns line up + row[j] = elem.rjust(maxlen[j]) + res[i] = "[" + colsep.join(row) + "]" + return rowsep.join(res) + + def __str__(self): + return self.__nstr__() + + def _toliststr(self, avoid_type=False): + """ + Create a list string from a matrix. + + If avoid_type: avoid multiple 'mpf's. + """ + # XXX: should be something like self.ctx._types + typ = self.ctx.mpf + s = '[' + for i in xrange(self.__rows): + s += '[' + for j in xrange(self.__cols): + if not avoid_type or not isinstance(self[i,j], typ): + a = repr(self[i,j]) + else: + a = "'" + str(self[i,j]) + "'" + s += a + ', ' + s = s[:-2] + s += '],\n ' + s = s[:-3] + s += ']' + return s + + def tolist(self): + """ + Convert the matrix to a nested list. + """ + return [[self[i,j] for j in range(self.__cols)] for i in range(self.__rows)] + + def __repr__(self): + if self.ctx.pretty: + return self.__str__() + s = 'matrix(\n' + s += self._toliststr(avoid_type=True) + ')' + return s + + def __get_element(self, key): + ''' + Fast extraction of the i,j element from the matrix + This function is for private use only because is unsafe: + 1. Does not check on the value of key it expects key to be a integer tuple (i,j) + 2. Does not check bounds + ''' + if key in self.__data: + return self.__data[key] + else: + return self.ctx.zero + + def __set_element(self, key, value): + ''' + Fast assignment of the i,j element in the matrix + This function is unsafe: + 1. Does not check on the value of key it expects key to be a integer tuple (i,j) + 2. Does not check bounds + 3. Does not check the value type + 4. Does not reset the LU cache + ''' + if value: # only store non-zeros + self.__data[key] = value + elif key in self.__data: + del self.__data[key] + + + def __getitem__(self, key): + ''' + Getitem function for mp matrix class with slice index enabled + it allows the following assingments + scalar to a slice of the matrix + B = A[:,2:6] + ''' + # Convert vector to matrix indexing + if isinstance(key, int) or isinstance(key,slice): + # only sufficent for vectors + if self.__rows == 1: + key = (0, key) + elif self.__cols == 1: + key = (key, 0) + else: + raise IndexError('insufficient indices for matrix') + + if isinstance(key[0],slice) or isinstance(key[1],slice): + + #Rows + if isinstance(key[0],slice): + #Check bounds + if (key[0].start is None or key[0].start >= 0) and \ + (key[0].stop is None or key[0].stop <= self.__rows+1): + # Generate indices + rows = xrange(*key[0].indices(self.__rows)) + else: + raise IndexError('Row index out of bounds') + else: + # Single row + rows = [key[0]] + + # Columns + if isinstance(key[1],slice): + # Check bounds + if (key[1].start is None or key[1].start >= 0) and \ + (key[1].stop is None or key[1].stop <= self.__cols+1): + # Generate indices + columns = xrange(*key[1].indices(self.__cols)) + else: + raise IndexError('Column index out of bounds') + + else: + # Single column + columns = [key[1]] + + # Create matrix slice + m = self.ctx.matrix(len(rows),len(columns)) + + # Assign elements to the output matrix + for i,x in enumerate(rows): + for j,y in enumerate(columns): + m.__set_element((i,j),self.__get_element((x,y))) + + return m + + else: + # single element extraction + if key[0] >= self.__rows or key[1] >= self.__cols: + raise IndexError('matrix index out of range') + if key in self.__data: + return self.__data[key] + else: + return self.ctx.zero + + def __setitem__(self, key, value): + # setitem function for mp matrix class with slice index enabled + # it allows the following assingments + # scalar to a slice of the matrix + # A[:,2:6] = 2.5 + # submatrix to matrix (the value matrix should be the same size as the slice size) + # A[3,:] = B where A is n x m and B is n x 1 + # Convert vector to matrix indexing + if isinstance(key, int) or isinstance(key,slice): + # only sufficent for vectors + if self.__rows == 1: + key = (0, key) + elif self.__cols == 1: + key = (key, 0) + else: + raise IndexError('insufficient indices for matrix') + # Slice indexing + if isinstance(key[0],slice) or isinstance(key[1],slice): + # Rows + if isinstance(key[0],slice): + # Check bounds + if (key[0].start is None or key[0].start >= 0) and \ + (key[0].stop is None or key[0].stop <= self.__rows+1): + # generate row indices + rows = xrange(*key[0].indices(self.__rows)) + else: + raise IndexError('Row index out of bounds') + else: + # Single row + rows = [key[0]] + # Columns + if isinstance(key[1],slice): + # Check bounds + if (key[1].start is None or key[1].start >= 0) and \ + (key[1].stop is None or key[1].stop <= self.__cols+1): + # Generate column indices + columns = xrange(*key[1].indices(self.__cols)) + else: + raise IndexError('Column index out of bounds') + else: + # Single column + columns = [key[1]] + # Assign slice with a scalar + if isinstance(value,self.ctx.matrix): + # Assign elements to matrix if input and output dimensions match + if len(rows) == value.rows and len(columns) == value.cols: + for i,x in enumerate(rows): + for j,y in enumerate(columns): + self.__set_element((x,y), value.__get_element((i,j))) + else: + raise ValueError('Dimensions do not match') + else: + # Assign slice with scalars + value = self.ctx.convert(value) + for i in rows: + for j in columns: + self.__set_element((i,j), value) + else: + # Single element assingment + # Check bounds + if key[0] >= self.__rows or key[1] >= self.__cols: + raise IndexError('matrix index out of range') + # Convert and store value + value = self.ctx.convert(value) + if value: # only store non-zeros + self.__data[key] = value + elif key in self.__data: + del self.__data[key] + + if self._LU: + self._LU = None + return + + def __iter__(self): + for i in xrange(self.__rows): + for j in xrange(self.__cols): + yield self[i,j] + + def __mul__(self, other): + if isinstance(other, self.ctx.matrix): + # dot multiplication + if self.__cols != other.__rows: + raise ValueError('dimensions not compatible for multiplication') + new = self.ctx.matrix(self.__rows, other.__cols) + self_zero = self.ctx.zero + self_get = self.__data.get + other_zero = other.ctx.zero + other_get = other.__data.get + for i in xrange(self.__rows): + for j in xrange(other.__cols): + new[i, j] = self.ctx.fdot((self_get((i,k), self_zero), other_get((k,j), other_zero)) + for k in xrange(other.__rows)) + return new + else: + # try scalar multiplication + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i, j] = other * self[i, j] + return new + + def __matmul__(self, other): + return self.__mul__(other) + + def __rmul__(self, other): + # assume other is scalar and thus commutative + if isinstance(other, self.ctx.matrix): + raise TypeError("other should not be type of ctx.matrix") + return self.__mul__(other) + + def __pow__(self, other): + # avoid cyclic import problems + #from linalg import inverse + if not isinstance(other, int): + raise ValueError('only integer exponents are supported') + if not self.__rows == self.__cols: + raise ValueError('only powers of square matrices are defined') + n = other + if n == 0: + return self.ctx.eye(self.__rows) + if n < 0: + n = -n + neg = True + else: + neg = False + i = n + y = 1 + z = self.copy() + while i != 0: + if i % 2 == 1: + y = y * z + z = z*z + i = i // 2 + if neg: + y = self.ctx.inverse(y) + return y + + def __div__(self, other): + # assume other is scalar and do element-wise divison + assert not isinstance(other, self.ctx.matrix) + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] = self[i,j] / other + return new + + __truediv__ = __div__ + + def __add__(self, other): + if isinstance(other, self.ctx.matrix): + if not (self.__rows == other.__rows and self.__cols == other.__cols): + raise ValueError('incompatible dimensions for addition') + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] = self[i,j] + other[i,j] + return new + else: + # assume other is scalar and add element-wise + new = self.ctx.matrix(self.__rows, self.__cols) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[i,j] += self[i,j] + other + return new + + def __radd__(self, other): + return self.__add__(other) + + def __sub__(self, other): + if isinstance(other, self.ctx.matrix) and not (self.__rows == other.__rows + and self.__cols == other.__cols): + raise ValueError('incompatible dimensions for subtraction') + return self.__add__(other * (-1)) + + def __pos__(self): + """ + +M returns a copy of M, rounded to current working precision. + """ + return (+1) * self + + def __neg__(self): + return (-1) * self + + def __rsub__(self, other): + return -self + other + + def __eq__(self, other): + return self.__rows == other.__rows and self.__cols == other.__cols \ + and self.__data == other.__data + + def __len__(self): + if self.rows == 1: + return self.cols + elif self.cols == 1: + return self.rows + else: + return self.rows # do it like numpy + + def __getrows(self): + return self.__rows + + def __setrows(self, value): + for key in self.__data.copy(): + if key[0] >= value: + del self.__data[key] + self.__rows = value + + rows = property(__getrows, __setrows, doc='number of rows') + + def __getcols(self): + return self.__cols + + def __setcols(self, value): + for key in self.__data.copy(): + if key[1] >= value: + del self.__data[key] + self.__cols = value + + cols = property(__getcols, __setcols, doc='number of columns') + + def transpose(self): + new = self.ctx.matrix(self.__cols, self.__rows) + for i in xrange(self.__rows): + for j in xrange(self.__cols): + new[j,i] = self[i,j] + return new + + T = property(transpose) + + def conjugate(self): + return self.apply(self.ctx.conj) + + def transpose_conj(self): + return self.conjugate().transpose() + + H = property(transpose_conj) + + def copy(self): + new = self.ctx.matrix(self.__rows, self.__cols) + new.__data = self.__data.copy() + return new + + __copy__ = copy + + def column(self, n): + m = self.ctx.matrix(self.rows, 1) + for i in range(self.rows): + m[i] = self[i,n] + return m + +class MatrixMethods(object): + + def __init__(ctx): + # XXX: subclass + ctx.matrix = type('matrix', (_matrix,), {}) + ctx.matrix.ctx = ctx + ctx.matrix.convert = ctx.convert + + def eye(ctx, n, **kwargs): + """ + Create square identity matrix n x n. + """ + A = ctx.matrix(n, **kwargs) + for i in xrange(n): + A[i,i] = 1 + return A + + def diag(ctx, diagonal, **kwargs): + """ + Create square diagonal matrix using given list. + + Example: + >>> from mpmath import diag, mp + >>> mp.pretty = False + >>> diag([1, 2, 3]) + matrix( + [['1.0', '0.0', '0.0'], + ['0.0', '2.0', '0.0'], + ['0.0', '0.0', '3.0']]) + """ + A = ctx.matrix(len(diagonal), **kwargs) + for i in xrange(len(diagonal)): + A[i,i] = diagonal[i] + return A + + def zeros(ctx, *args, **kwargs): + """ + Create matrix m x n filled with zeros. + One given dimension will create square matrix n x n. + + Example: + >>> from mpmath import zeros, mp + >>> mp.pretty = False + >>> zeros(2) + matrix( + [['0.0', '0.0'], + ['0.0', '0.0']]) + """ + if len(args) == 1: + m = n = args[0] + elif len(args) == 2: + m = args[0] + n = args[1] + else: + raise TypeError('zeros expected at most 2 arguments, got %i' % len(args)) + A = ctx.matrix(m, n, **kwargs) + for i in xrange(m): + for j in xrange(n): + A[i,j] = 0 + return A + + def ones(ctx, *args, **kwargs): + """ + Create matrix m x n filled with ones. + One given dimension will create square matrix n x n. + + Example: + >>> from mpmath import ones, mp + >>> mp.pretty = False + >>> ones(2) + matrix( + [['1.0', '1.0'], + ['1.0', '1.0']]) + """ + if len(args) == 1: + m = n = args[0] + elif len(args) == 2: + m = args[0] + n = args[1] + else: + raise TypeError('ones expected at most 2 arguments, got %i' % len(args)) + A = ctx.matrix(m, n, **kwargs) + for i in xrange(m): + for j in xrange(n): + A[i,j] = 1 + return A + + def hilbert(ctx, m, n=None): + """ + Create (pseudo) hilbert matrix m x n. + One given dimension will create hilbert matrix n x n. + + The matrix is very ill-conditioned and symmetric, positive definite if + square. + """ + if n is None: + n = m + A = ctx.matrix(m, n) + for i in xrange(m): + for j in xrange(n): + A[i,j] = ctx.one / (i + j + 1) + return A + + def randmatrix(ctx, m, n=None, min=0, max=1, **kwargs): + """ + Create a random m x n matrix. + + All values are >= min and >> from mpmath import randmatrix + >>> randmatrix(2) # doctest:+SKIP + matrix( + [['0.53491598236191806', '0.57195669543302752'], + ['0.85589992269513615', '0.82444367501382143']]) + """ + if not n: + n = m + A = ctx.matrix(m, n, **kwargs) + for i in xrange(m): + for j in xrange(n): + A[i,j] = ctx.rand() * (max - min) + min + return A + + def swap_row(ctx, A, i, j): + """ + Swap row i with row j. + """ + if i == j: + return + if isinstance(A, ctx.matrix): + for k in xrange(A.cols): + A[i,k], A[j,k] = A[j,k], A[i,k] + elif isinstance(A, list): + A[i], A[j] = A[j], A[i] + else: + raise TypeError('could not interpret type') + + def extend(ctx, A, b): + """ + Extend matrix A with column b and return result. + """ + if not isinstance(A, ctx.matrix): + raise TypeError("A should be a type of ctx.matrix") + if A.rows != len(b): + raise ValueError("Value should be equal to len(b)") + A = A.copy() + A.cols += 1 + for i in xrange(A.rows): + A[i, A.cols-1] = b[i] + return A + + def norm(ctx, x, p=2): + r""" + Gives the entrywise `p`-norm of an iterable *x*, i.e. the vector norm + `\left(\sum_k |x_k|^p\right)^{1/p}`, for any given `1 \le p \le \infty`. + + Special cases: + + If *x* is not iterable, this just returns ``absmax(x)``. + + ``p=1`` gives the sum of absolute values. + + ``p=2`` is the standard Euclidean vector norm. + + ``p=inf`` gives the magnitude of the largest element. + + For *x* a matrix, ``p=2`` is the Frobenius norm. + For operator matrix norms, use :func:`~mpmath.mnorm` instead. + + You can use the string 'inf' as well as float('inf') or mpf('inf') + to specify the infinity norm. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> x = matrix([-10, 2, 100]) + >>> norm(x, 1) + mpf('112.0') + >>> norm(x, 2) + mpf('100.5186549850325') + >>> norm(x, inf) + mpf('100.0') + + """ + try: + iter(x) + except TypeError: + return ctx.absmax(x) + if type(p) is not int: + p = ctx.convert(p) + if p == ctx.inf: + return max(ctx.absmax(i) for i in x) + elif p == 1: + return ctx.fsum(x, absolute=1) + elif p == 2: + return ctx.sqrt(ctx.fsum(x, absolute=1, squared=1)) + elif p > 1: + return ctx.nthroot(ctx.fsum(abs(i)**p for i in x), p) + else: + raise ValueError('p has to be >= 1') + + def mnorm(ctx, A, p=1): + r""" + Gives the matrix (operator) `p`-norm of A. Currently ``p=1`` and ``p=inf`` + are supported: + + ``p=1`` gives the 1-norm (maximal column sum) + + ``p=inf`` gives the `\infty`-norm (maximal row sum). + You can use the string 'inf' as well as float('inf') or mpf('inf') + + ``p=2`` (not implemented) for a square matrix is the usual spectral + matrix norm, i.e. the largest singular value. + + ``p='f'`` (or 'F', 'fro', 'Frobenius, 'frobenius') gives the + Frobenius norm, which is the elementwise 2-norm. The Frobenius norm is an + approximation of the spectral norm and satisfies + + .. math :: + + \frac{1}{\sqrt{\mathrm{rank}(A)}} \|A\|_F \le \|A\|_2 \le \|A\|_F + + The Frobenius norm lacks some mathematical properties that might + be expected of a norm. + + For general elementwise `p`-norms, use :func:`~mpmath.norm` instead. + + **Examples** + + >>> from mpmath import * + >>> mp.dps = 15; mp.pretty = False + >>> A = matrix([[1, -1000], [100, 50]]) + >>> mnorm(A, 1) + mpf('1050.0') + >>> mnorm(A, inf) + mpf('1001.0') + >>> mnorm(A, 'F') + mpf('1006.2310867787777') + + """ + A = ctx.matrix(A) + if type(p) is not int: + if type(p) is str and 'frobenius'.startswith(p.lower()): + return ctx.norm(A, 2) + p = ctx.convert(p) + m, n = A.rows, A.cols + if p == 1: + return max(ctx.fsum((A[i,j] for i in xrange(m)), absolute=1) for j in xrange(n)) + elif p == ctx.inf: + return max(ctx.fsum((A[i,j] for j in xrange(n)), absolute=1) for i in xrange(m)) + else: + raise NotImplementedError("matrix p-norm for arbitrary p") + +if __name__ == '__main__': + import doctest + doctest.testmod() diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/runtests.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/runtests.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..65c78c3e0297d26cafa1b6df4bf55131ee327497 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/runtests.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c2280c51c98793200bb9b13cc3f3bf268b0093cf Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..526150d41cc3c49eb087fc05a19b8b32bc1ff7db Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ce13541bd157cb2d57cbb7ffb45218fac7fb5e67 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_hp.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_hp.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b90a2e044e0c5f50e9274f46cb26bcc75b0f165e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_hp.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e38423c311e7830d0a7fe5735613a3e4ebd6eb6 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..a5727f1f1def20357a27866590eba47eac5a9bfe Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_special.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_special.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cfd4dd5ebecdd025192e00ee9b3f2719d9c4033c Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_special.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-311.pyc b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f11b7ef6d9279f2b57e94b7808c3aceea94098bb Binary files /dev/null and b/.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/INSTALLER b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/INSTALLER new file mode 100644 index 0000000000000000000000000000000000000000..a1b589e38a32041e49332e5e81c2d363dc418d68 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/INSTALLER @@ -0,0 +1 @@ +pip diff --git a/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/LICENSE b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/LICENSE new file mode 100644 index 0000000000000000000000000000000000000000..456fa7caf4428efdaff443dfb99b4e426172376c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/METADATA b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/METADATA new file mode 100644 index 0000000000000000000000000000000000000000..7c0ffb51814e3faf38304bdabd15cc58ed1a2286 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/METADATA @@ -0,0 +1,313 @@ +Metadata-Version: 2.1 +Name: opencensus +Version: 0.11.4 +Summary: A stats collection and distributed tracing framework +Home-page: https://github.com/census-instrumentation/opencensus-python +Author: OpenCensus Authors +Author-email: census-developers@googlegroups.com +License: Apache-2.0 +Platform: UNKNOWN +Classifier: Intended Audience :: Developers +Classifier: Development Status :: 4 - Beta +Classifier: Intended Audience :: Developers +Classifier: License :: OSI Approved :: Apache Software License +Classifier: Programming Language :: Python +Classifier: Programming Language :: Python :: 2 +Classifier: Programming Language :: Python :: 2.7 +Classifier: Programming Language :: Python :: 3 +Classifier: Programming Language :: Python :: 3.4 +Classifier: Programming Language :: Python :: 3.5 +Classifier: Programming Language :: Python :: 3.6 +Classifier: Programming Language :: Python :: 3.7 +Classifier: Programming Language :: Python :: 3.8 +Classifier: Programming Language :: Python :: 3.9 +Requires-Dist: opencensus-context (>=0.1.3) +Requires-Dist: six (~=1.16) +Requires-Dist: google-api-core (<2.0.0,>=1.0.0) ; python_version < "3.6" +Requires-Dist: google-api-core (<3.0.0,>=1.0.0) ; python_version >= "3.6" + +OpenCensus - A stats collection and distributed tracing framework +================================================================= + +|gitter| +|travisci| +|circleci| +|pypi| +|compat_check_pypi| +|compat_check_github| + + +.. |travisci| image:: https://travis-ci.org/census-instrumentation/opencensus-python.svg?branch=master + :target: https://travis-ci.org/census-instrumentation/opencensus-python +.. |circleci| image:: https://circleci.com/gh/census-instrumentation/opencensus-python.svg?style=shield + :target: https://circleci.com/gh/census-instrumentation/opencensus-python +.. |gitter| image:: https://badges.gitter.im/census-instrumentation/lobby.svg + :target: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge +.. |pypi| image:: https://badge.fury.io/py/opencensus.svg + :target: https://pypi.org/project/opencensus/ +.. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=opencensus + :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=opencensus +.. |compat_check_github| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=git%2Bgit%3A//github.com/census-instrumentation/opencensus-python.git + :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=git%2Bgit%3A//github.com/census-instrumentation/opencensus-python.git + +`OpenCensus`_ for Python. OpenCensus provides a framework to measure a +server's resource usage and collect performance stats. This repository +contains Python related utilities and supporting software needed by +OpenCensus. + +.. _OpenCensus: https://github.com/census-instrumentation + +- `API Documentation`_ + +.. _API Documentation: https://opencensus.io/api/python/trace/usage.html + +-------- + Tracing +-------- + +Installation & basic usage +-------------------------- + +1. Install the opencensus package using `pip`_ or `pipenv`_: + + :: + + pip install opencensus + pipenv install opencensus + +2. Initialize a tracer for your application: + + .. code:: python + + from opencensus.trace.tracer import Tracer + from opencensus.trace.samplers import AlwaysOnSampler + + tracer = Tracer(sampler=AlwaysOnSampler()) + + .. _pip: https://pip.pypa.io + .. _pipenv: https://docs.pipenv.org/ + +3. Initialize a view_manager and a stats_recorder for your application: + + .. code:: python + + from opencensus.stats import stats as stats_module + + stats = stats_module.stats + view_manager = stats.view_manager + stats_recorder = stats.stats_recorder + + +Usage +----- + +You can collect traces using the ``Tracer`` `context manager`_: + +.. code:: python + + from opencensus.trace.tracer import Tracer + from opencensus.trace.samplers import AlwaysOnSampler + + # Initialize a tracer, by default using the `PrintExporter` + tracer = Tracer(sampler=AlwaysOnSampler()) + + # Example for creating nested spans + with tracer.span(name='span1'): + do_something_to_trace() + with tracer.span(name='span1_child1'): + do_something_to_trace() + with tracer.span(name='span1_child2'): + do_something_to_trace() + with tracer.span(name='span2'): + do_something_to_trace() + +OpenCensus will collect everything within the ``with`` statement as a single span. + +Alternatively, you can explicitly start and end a span: + +.. code:: python + + from opencensus.trace.tracer import Tracer + from opencensus.trace.samplers import AlwaysOnSampler + + # Initialize a tracer, by default using the `PrintExporter` + tracer = Tracer(sampler=AlwaysOnSampler()) + + tracer.start_span(name='span1') + do_something_to_trace() + tracer.end_span() + + +.. _context manager: https://docs.python.org/3/reference/datamodel.html#context-managers + + +Customization +------------- + +There are several things you can customize in OpenCensus: + +* **Excludelist**, which excludes certain hosts and paths from being tracked. + By default, the health check path for the App Engine flexible environment is + not tracked, you can turn it on by excluding it from the excludelist setting. + +* **Exporter**, which sends the traces. + By default, the traces are printed to stdout in JSON format. You can choose + different exporters to send the traces to. There are three built-in exporters, + which are ``PrintExporter``, ``FileExporter`` and ``LoggingExporter``, the + other exporters are provided as `extensions <#trace-exporter>`__. + +* **Sampler**, which determines how traces are sampled. + The default sampler is the ``ProbabilitySampler``, which samples (i.e. + enables tracing for) a percentage of all requests. Sampling is deterministic + according to the trace ID. To force sampling for all requests, or to prevent + any request from being sampled, see ``AlwaysOnSampler`` and + ``AlwaysOffSampler``. + +* **Propagator**, which serializes and deserializes the + ``SpanContext`` and its headers. The default propagator is + ``TraceContextPropagator``, other propagators include + ``BinaryFormatPropagator``, ``GoogleCloudFormatPropagator`` and + ``TextFormatPropagator``. + + +You can customize while initializing a tracer. + +.. code:: python + + import requests + + from opencensus.trace import config_integration + from opencensus.trace import file_exporter + from opencensus.trace import tracer as tracer_module + from opencensus.trace.propagation import google_cloud_format + from opencensus.trace.samplers import ProbabilitySampler + + config_integration.trace_integrations(['httplib']) + + tracer = tracer_module.Tracer( + exporter=file_exporter.FileExporter(file_name='traces'), + propagator=google_cloud_format.GoogleCloudFormatPropagator(), + sampler=ProbabilitySampler(rate=0.5), + ) + + with tracer.span(name='parent'): + with tracer.span(name='child'): + response = requests.get('http://localhost:5000') + +You can use a configuration file for Flask/Django/Pyramid. For more +information, please read the +`individual integration documentation <#integration>`_. + +.. code:: python + + 'OPENCENSUS': { + 'TRACE': { + 'EXCLUDELIST_HOSTNAMES': ['localhost', '127.0.0.1'], + 'EXCLUDELIST_PATHS': ['_ah/health'], + 'SAMPLER': 'opencensus.trace.samplers.ProbabilitySampler(rate=1)', + 'EXPORTER': '''opencensus.ext.ocagent.trace_exporter.TraceExporter( + service_name='foobar', + )''', + 'PROPAGATOR': 'opencensus.trace.propagation.google_cloud_format.GoogleCloudFormatPropagator()', + } + } + +------------ + Extensions +------------ + +Integration +----------- + +OpenCensus supports integration with popular web frameworks, client libraries and built-in libraries. + +- `Django`_ +- `Flask`_ +- `gevent`_ +- `Google Cloud Client Libraries`_ +- `gRPC`_ +- `httplib`_ +- `httpx`_ +- `logging`_ +- `MySQL`_ +- `PostgreSQL`_ +- `pymongo`_ +- `PyMySQL`_ +- `Pyramid`_ +- `requests`_ +- `SQLAlchemy`_ +- `threading`_ + +Log Exporter +------------ + +- `Azure`_ + +Metrics Exporter +---------------- + +- `Azure`_ + +Stats Exporter +-------------- + +- `OCAgent`_ +- `Prometheus`_ +- `Stackdriver`_ + +Trace Exporter +-------------- + +- `Azure`_ +- `Datadog`_ +- `Jaeger`_ +- `OCAgent`_ +- `Stackdriver`_ +- `Zipkin`_ + +.. _Azure: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-azure +.. _Datadog: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-datadog +.. _Django: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-django +.. _Flask: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-flask +.. _FastAPI: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-fastapi +.. _gevent: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-gevent +.. _Google Cloud Client Libraries: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-google-cloud-clientlibs +.. _gRPC: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-grpc +.. _httplib: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-httplib +.. _httpx: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-httpx +.. _Jaeger: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-jaeger +.. _logging: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-logging +.. _MySQL: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-mysql +.. _OCAgent: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-ocagent +.. _PostgreSQL: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-postgresql +.. _Prometheus: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-prometheus +.. _pymongo: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-pymongo +.. _PyMySQL: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-pymysql +.. _Pyramid: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-pyramid +.. _requests: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-requests +.. _SQLAlchemy: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-sqlalchemy +.. _Stackdriver: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-stackdriver +.. _threading: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-threading +.. _Zipkin: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-zipkin + +------------ + Versioning +------------ + +This library follows `Semantic Versioning`_. + +**GA**: Libraries defined at a GA quality level are stable, and will not introduce +backwards-incompatible changes in any minor or patch releases. We will address issues and requests +with the highest priority. If we were to make a backwards-incompatible changes on an API, we will +first mark the existing API as deprecated and keep it for 18 months before removing it. + +**Beta**: Libraries defined at a Beta quality level are expected to be mostly stable and we're +working towards their release candidate. We will address issues and requests with a higher priority. +There may be backwards incompatible changes in a minor version release, though not in a patch +release. If an element is part of an API that is only meant to be used by exporters or other +opencensus libraries, then there is no deprecation period. Otherwise, we will deprecate it for 18 +months before removing it, if possible. + +.. _Semantic Versioning: https://semver.org/ + + diff --git a/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/RECORD b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/RECORD new file mode 100644 index 0000000000000000000000000000000000000000..c113af3fcd2fd55d2f3ad8983c54bd22360c5d86 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/RECORD @@ -0,0 +1,193 @@ +opencensus-0.11.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4 +opencensus-0.11.4.dist-info/LICENSE,sha256=YmXwJVPDAw7MfsXq36r_DSbI55E92i-djKAC0Fa43ts,11553 +opencensus-0.11.4.dist-info/METADATA,sha256=M0nHiQlDaEteLWFmNwGGs6Xu1C3_YsczItcW70LZG4k,12351 +opencensus-0.11.4.dist-info/RECORD,, +opencensus-0.11.4.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110 +opencensus-0.11.4.dist-info/namespace_packages.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1 +opencensus-0.11.4.dist-info/top_level.txt,sha256=J24OU61lnFeMMuwOLPtTNywYsd2Bzp5KbBCPtAhDgaI,11 +opencensus/__init__.py,sha256=F0UD44DuZCpVvI1PX5rW4FcKQ004ORdqeOhf4JsheIY,66 +opencensus/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/__init__.py,sha256=F0UD44DuZCpVvI1PX5rW4FcKQ004ORdqeOhf4JsheIY,66 +opencensus/common/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/backports/__init__.py,sha256=ZfPWVZ8q7qcuRBDY9pGyH36kQ5mejJuMNDyOltEZqb0,2888 +opencensus/common/backports/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/configuration/__init__.py,sha256=YIDloGhqI4TzgvMx8lIrTMephpwaW31xcoO0a6UGmTE,1443 +opencensus/common/configuration/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/http_handler/__init__.py,sha256=CHqTDxGFN8pq1njv5f53aMVzdc1G-xYJEfqwJKR5QWc,1434 +opencensus/common/http_handler/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/monitored_resource/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +opencensus/common/monitored_resource/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/monitored_resource/__pycache__/aws_identity_doc_utils.cpython-311.pyc,, +opencensus/common/monitored_resource/__pycache__/gcp_metadata_config.cpython-311.pyc,, +opencensus/common/monitored_resource/__pycache__/k8s_utils.cpython-311.pyc,, +opencensus/common/monitored_resource/__pycache__/monitored_resource.cpython-311.pyc,, +opencensus/common/monitored_resource/aws_identity_doc_utils.py,sha256=v-7tuf9P554rhunHSoYmghszU14gI7i9ot7NuXJB42w,3243 +opencensus/common/monitored_resource/gcp_metadata_config.py,sha256=OufJ6HUxVTfRC-5_WFvs3SiywXxA2damyVyAtvUZsFs,4119 +opencensus/common/monitored_resource/k8s_utils.py,sha256=9RSsoqUdMSpS12P-tn3v7VtSXaIBM8KqqqBxL7MpqiM,2108 +opencensus/common/monitored_resource/monitored_resource.py,sha256=vbxUPe6kFufY7IL46QMBBOxwSI2AVHcnwZSVxqUf2U8,2389 +opencensus/common/resource/__init__.py,sha256=9c0T-2H2oHI1lXm11KvFdUjfSvsOMiowLQaGJqZ6NUY,6591 +opencensus/common/resource/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/schedule/__init__.py,sha256=6o-WqHxUC075eKEMnqVb-omWzwGY7cxbPcukxkACPUk,4738 +opencensus/common/schedule/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/transports/__init__.py,sha256=TcAW4NO62hdY2RC2-lLga_icFnrpYMrR9zcS2xcuz6U,596 +opencensus/common/transports/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/transports/__pycache__/async_.cpython-311.pyc,, +opencensus/common/transports/__pycache__/base.cpython-311.pyc,, +opencensus/common/transports/__pycache__/sync.cpython-311.pyc,, +opencensus/common/transports/async_.py,sha256=yVabLF1avTBMMewrndVZ5Uet7WPVpH6wncfDpF4hXIA,8063 +opencensus/common/transports/base.py,sha256=MHpSA0DWEe3bd9S1Pqz85i3UOa5yjkP0I_b894VYW5A,1021 +opencensus/common/transports/sync.py,sha256=5cxYNnC3ntynQmM59ccER1aGHKWRWp1yNUZD7DNZKCE,1058 +opencensus/common/utils/__init__.py,sha256=JRpGoXBroCFQTlwm7a3sP2AihrPpfFL8Ots8Yp11eYU,3738 +opencensus/common/utils/__pycache__/__init__.cpython-311.pyc,, +opencensus/common/version/__init__.py,sha256=3A51Klomvw2AvHVZ7lV3G1qmldc3cfIPvHNl8C-N55s,622 +opencensus/common/version/__pycache__/__init__.cpython-311.pyc,, +opencensus/log/__init__.py,sha256=U3ULx0SpGhjuUlbaItEQv0bQLUKSlQEBjEgplNEyJcI,4159 +opencensus/log/__pycache__/__init__.cpython-311.pyc,, +opencensus/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +opencensus/metrics/__pycache__/__init__.cpython-311.pyc,, +opencensus/metrics/__pycache__/label_key.cpython-311.pyc,, +opencensus/metrics/__pycache__/label_value.cpython-311.pyc,, +opencensus/metrics/__pycache__/transport.cpython-311.pyc,, +opencensus/metrics/export/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0 +opencensus/metrics/export/__pycache__/__init__.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/cumulative.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/gauge.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/metric.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/metric_descriptor.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/metric_producer.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/point.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/summary.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/time_series.cpython-311.pyc,, +opencensus/metrics/export/__pycache__/value.cpython-311.pyc,, +opencensus/metrics/export/cumulative.py,sha256=pXuvoBu8dk97HlEH_kH5iAXVzMGVnkZB49HQREOPcmc,2958 +opencensus/metrics/export/gauge.py,sha256=XOLPsP2IEmSRaINvIwPcMjJI-07LdscYRqo2oxVeEmI,17770 +opencensus/metrics/export/metric.py,sha256=Uo6rJKw6D7-UcRwSpsqkGfhli8XU4KrvufmjQ1r38mc,3224 +opencensus/metrics/export/metric_descriptor.py,sha256=NhFPIeQn_G42GU0mxkPG5cRD6pH07y4bxXTGq4BUVrg,6174 +opencensus/metrics/export/metric_producer.py,sha256=8XSFwJpAiliqaIR7TTuaYp26sJ1GZhep4H9Ul_5DSjk,2764 +opencensus/metrics/export/point.py,sha256=S5P0dnPFMT5Waj-nIPlT3V0TBeTyCAiQ4Fx0gWw2QyI,1571 +opencensus/metrics/export/summary.py,sha256=n30_7EsVZy5CEeNSfGaxVUe-l7bzvjbLTasRJJH48H8,4449 +opencensus/metrics/export/time_series.py,sha256=K-Va6EKfdS5b1ifO68r8d4rOVJRYhcDPp_w_hW18h7A,3278 +opencensus/metrics/export/value.py,sha256=uX9IrsAZirUspDJrxPWPqE4V2Y0REOoRjidPCsMGRUQ,9082 +opencensus/metrics/label_key.py,sha256=ZS0RSVW4VzQzLESLJho9yLRnV4Oi78Hum93mfMSf8CA,1630 +opencensus/metrics/label_value.py,sha256=lv1HbhzOyi4xOIvLFazzYqi8YPoAz1Saci4Se_T26F8,1289 +opencensus/metrics/transport.py,sha256=rVaKjRwPhdSy1Ik9ks1u6Z7CMwf9hbbBRDpBFV5YTts,4534 +opencensus/stats/__init__.py,sha256=TcAW4NO62hdY2RC2-lLga_icFnrpYMrR9zcS2xcuz6U,596 +opencensus/stats/__pycache__/__init__.cpython-311.pyc,, +opencensus/stats/__pycache__/aggregation.cpython-311.pyc,, +opencensus/stats/__pycache__/aggregation_data.cpython-311.pyc,, +opencensus/stats/__pycache__/base_exporter.cpython-311.pyc,, +opencensus/stats/__pycache__/bucket_boundaries.cpython-311.pyc,, +opencensus/stats/__pycache__/execution_context.cpython-311.pyc,, +opencensus/stats/__pycache__/measure.cpython-311.pyc,, +opencensus/stats/__pycache__/measure_to_view_map.cpython-311.pyc,, +opencensus/stats/__pycache__/measurement.cpython-311.pyc,, +opencensus/stats/__pycache__/measurement_map.cpython-311.pyc,, +opencensus/stats/__pycache__/metric_utils.cpython-311.pyc,, +opencensus/stats/__pycache__/stats.cpython-311.pyc,, +opencensus/stats/__pycache__/stats_recorder.cpython-311.pyc,, +opencensus/stats/__pycache__/view.cpython-311.pyc,, +opencensus/stats/__pycache__/view_data.cpython-311.pyc,, +opencensus/stats/__pycache__/view_manager.cpython-311.pyc,, +opencensus/stats/aggregation.py,sha256=M8PAxMf0RXMhezuItnOhcPvadmmRvxeX9xoMCWQexFY,5180 +opencensus/stats/aggregation_data.py,sha256=Q4_xZm5hqpVRZH0qW0hhByXAIBQjL3daVcMQBKruMnA,13885 +opencensus/stats/base_exporter.py,sha256=LlCB_0lLw2XMXChIUU4KOoPfvMWirX8U7-i7p7Fmg9w,1569 +opencensus/stats/bucket_boundaries.py,sha256=2e6lj0dbuRwQPELehsaAhnAO6hSThz8QerJ5tWTzswU,1407 +opencensus/stats/execution_context.py,sha256=vSMam4Q_CgCfuJ4lVCPBtbBLTK4PCFCyWOhG6zVUtLc,1067 +opencensus/stats/measure.py,sha256=dCxrmRi88TqMgULZYz4amooB7hvMc91xanUB4aMGp0M,1928 +opencensus/stats/measure_to_view_map.py,sha256=mvEDs_p2NLuNnV6qKL0QB2k5AzxIO9eVpCqZzJJCu3w,6271 +opencensus/stats/measurement.py,sha256=DXSUTsSkabsF8y7kjZjuUN0n76weaJi86z1TRPwETw0,1649 +opencensus/stats/measurement_map.py,sha256=8nr-W2OzvKXpNDKXE0hCDNWWVuH3JykKHQ7269tn1KY,4821 +opencensus/stats/metric_utils.py,sha256=JHJWHFm8ztm4Yy1e7pP5qxeRh6_Jk4CmgbrPwmt7DQA,2858 +opencensus/stats/stats.py,sha256=4nBU5aIQXbuCYNfpdwcmH19tS37_VUSJktoxMArq08c,1545 +opencensus/stats/stats_recorder.py,sha256=xoYyGI__Ml-NiKinYH4q9wyT9Z7jXPbsPH5UH5wYAAw,1378 +opencensus/stats/view.py,sha256=-7wzEVJEw3OnC67pi3FVzr2JM-aqhJTXaKnaAA2gaIc,3638 +opencensus/stats/view_data.py,sha256=mlj-tzvvCHnQjgUF_2Rpyfh4BGUDq8uwdX6TgE5BytA,3273 +opencensus/stats/view_manager.py,sha256=ePgy60hsjWf-eR6FNp1biQYbD8umxYtnpMK8FIjDUV0,2216 +opencensus/tags/__init__.py,sha256=TqjMu0ZDj4wMrhcDfIPuswSw4F2PTpxPwlO4ZGzzjys,966 +opencensus/tags/__pycache__/__init__.cpython-311.pyc,, +opencensus/tags/__pycache__/tag.cpython-311.pyc,, +opencensus/tags/__pycache__/tag_key.cpython-311.pyc,, +opencensus/tags/__pycache__/tag_map.cpython-311.pyc,, +opencensus/tags/__pycache__/tag_value.cpython-311.pyc,, +opencensus/tags/__pycache__/validation.cpython-311.pyc,, +opencensus/tags/propagation/__init__.py,sha256=TcAW4NO62hdY2RC2-lLga_icFnrpYMrR9zcS2xcuz6U,596 +opencensus/tags/propagation/__pycache__/__init__.cpython-311.pyc,, +opencensus/tags/propagation/__pycache__/binary_serializer.cpython-311.pyc,, +opencensus/tags/propagation/binary_serializer.py,sha256=eWygMfCWytI66OVO4lRpEQxAb9sKER5wi0IlRjKbGRY,3891 +opencensus/tags/tag.py,sha256=myzlZNWW2TdF4nvL4BsiqIFnsqsmalHAvUYsqWPQnc4,1145 +opencensus/tags/tag_key.py,sha256=obev7j_EvSTK56MD6OOS8yhlzu0-xIMHiVevdt0_C0o,1227 +opencensus/tags/tag_map.py,sha256=hNuiRO1qZJf3l_gesb3extDUnwCMfl-mW-8TH1tfKTE,3339 +opencensus/tags/tag_value.py,sha256=5opAVWqI50CovpSh_Z6mqnaC2T4ZDpurLB423OyI5lE,1255 +opencensus/tags/validation.py,sha256=OV8gJ9b3UyLz9tYr6IbAT65vHr6jp3gB9Bo8UmpeA8I,1255 +opencensus/trace/__init__.py,sha256=3vmAI0NGA5urfT7sweDlIpvWmGUf5LG2luZsd_Ra4f4,660 +opencensus/trace/__pycache__/__init__.cpython-311.pyc,, +opencensus/trace/__pycache__/attributes.cpython-311.pyc,, +opencensus/trace/__pycache__/attributes_helper.cpython-311.pyc,, +opencensus/trace/__pycache__/base_exporter.cpython-311.pyc,, +opencensus/trace/__pycache__/base_span.cpython-311.pyc,, +opencensus/trace/__pycache__/blank_span.cpython-311.pyc,, +opencensus/trace/__pycache__/config_integration.cpython-311.pyc,, +opencensus/trace/__pycache__/exceptions_status.cpython-311.pyc,, +opencensus/trace/__pycache__/execution_context.cpython-311.pyc,, +opencensus/trace/__pycache__/file_exporter.cpython-311.pyc,, +opencensus/trace/__pycache__/integrations.cpython-311.pyc,, +opencensus/trace/__pycache__/link.cpython-311.pyc,, +opencensus/trace/__pycache__/logging_exporter.cpython-311.pyc,, +opencensus/trace/__pycache__/print_exporter.cpython-311.pyc,, +opencensus/trace/__pycache__/span.cpython-311.pyc,, +opencensus/trace/__pycache__/span_context.cpython-311.pyc,, +opencensus/trace/__pycache__/span_data.cpython-311.pyc,, +opencensus/trace/__pycache__/stack_trace.cpython-311.pyc,, +opencensus/trace/__pycache__/status.cpython-311.pyc,, +opencensus/trace/__pycache__/time_event.cpython-311.pyc,, +opencensus/trace/__pycache__/trace_options.cpython-311.pyc,, +opencensus/trace/__pycache__/tracer.cpython-311.pyc,, +opencensus/trace/__pycache__/tracestate.cpython-311.pyc,, +opencensus/trace/__pycache__/utils.cpython-311.pyc,, +opencensus/trace/attributes.py,sha256=YAyn_Fp2V969gOwB_QhQN0iwnlskKV3f4E9rt_xMjOA,2457 +opencensus/trace/attributes_helper.py,sha256=o67kxxUOGwbjUY_Zeorr4AkCsqj0z8FYYvNizgx8z_Y,1522 +opencensus/trace/base_exporter.py,sha256=R2B4vWebq5BxEdxTJ6sXxnRfBTzJI7NBSpsvJC9eaRk,1585 +opencensus/trace/base_span.py,sha256=gMYlnYrsYwBbhmUxT_lC0kcFqAoJTSIU7q35RSAvOlE,3429 +opencensus/trace/blank_span.py,sha256=tcSFY4WtINQy9ygclZncSg5qq1gPW8J2PCIXx9L7Akc,5440 +opencensus/trace/config_integration.py,sha256=DuUsSoIG4Mg0Z3eNjp13bZ0mygabe8GFpnSNj7F2GNs,1330 +opencensus/trace/exceptions_status.py,sha256=YYyGIYnsVNevsg6bdloxzEObxO4WmMmBTyTKPWjCbZQ,940 +opencensus/trace/execution_context.py,sha256=OYQGwRUXSx9Tn9p_wxghrI3MFV-1ME9Wp7pYXyuaaCw,2476 +opencensus/trace/file_exporter.py,sha256=eXYNXEx5dHU5wPFb61ASUDyUZepxJMhKbmcQ9Mt8mJo,2669 +opencensus/trace/integrations.py,sha256=OeyzYjBpA501-96KC1fEccfasTb03njsswbZV0Er230,1448 +opencensus/trace/link.py,sha256=e-Wwrsnk3-zlK9JMaWQ1t80jSmdkPQa-dBM667DnRTQ,2622 +opencensus/trace/logging_exporter.py,sha256=zSby20MJ1o6M4nIB9hCeiA1zhNdZgZcos3_2IZf_83I,3217 +opencensus/trace/print_exporter.py,sha256=T0SvkDcbwIiA5V-UriZLSMDDetuDesLXQ6Ru-xveCYw,1932 +opencensus/trace/propagation/__init__.py,sha256=3cwrGSSn3JMa-Au3B1875PPwj3Ndg-lxgslMZRNsW3s,596 +opencensus/trace/propagation/__pycache__/__init__.cpython-311.pyc,, +opencensus/trace/propagation/__pycache__/b3_format.cpython-311.pyc,, +opencensus/trace/propagation/__pycache__/binary_format.cpython-311.pyc,, +opencensus/trace/propagation/__pycache__/google_cloud_format.cpython-311.pyc,, +opencensus/trace/propagation/__pycache__/text_format.cpython-311.pyc,, +opencensus/trace/propagation/__pycache__/trace_context_http_header_format.cpython-311.pyc,, +opencensus/trace/propagation/__pycache__/tracestate_string_format.cpython-311.pyc,, +opencensus/trace/propagation/b3_format.py,sha256=7C2dZQgsu2NVTdwsnE8qnU-WfdHgpBfb5g4oSItgVuU,4053 +opencensus/trace/propagation/binary_format.py,sha256=aahWtjg1AUJDkMiE3FUrSQrRZyb-H5KeULsEYaM_QjA,6168 +opencensus/trace/propagation/google_cloud_format.py,sha256=GDkpPWW5r3bubkp86CDSQwKDYeiY6X9i-m57-Ysdwgs,4605 +opencensus/trace/propagation/text_format.py,sha256=nBUZq4_jf5WhdSth1sl30rhrmIaaIabJiIQ1fwiY0GY,3174 +opencensus/trace/propagation/trace_context_http_header_format.py,sha256=y77aO_Hx9WI-pji-7HakeKK_DRnalZg0XAyYiTQ5UGw,3946 +opencensus/trace/propagation/tracestate_string_format.py,sha256=jw_mhR2_NvuAvjrF4B4_bilLiEqw-ocTJBDXGBE1Ryo,1604 +opencensus/trace/samplers/__init__.py,sha256=MQtXRIokFq2UCXR5Xj7K8_Nk_Ga1DCIF7iwAFBpYZ-g,2895 +opencensus/trace/samplers/__pycache__/__init__.cpython-311.pyc,, +opencensus/trace/span.py,sha256=F6JaUIiyOlOp9soyu8VoKHkx3SbrizKRsLYD2p_TEd4,15598 +opencensus/trace/span_context.py,sha256=KN8newixFHvWZ-JXP_UBFyHtVUSVFm5mRFA5upV2gBE,5548 +opencensus/trace/span_data.py,sha256=aCJm2UwEIcJ2pDI3UXHtNsyVadAUOIvC05eqybIN_yA,6514 +opencensus/trace/stack_trace.py,sha256=rm1kCy2U_sXq9ndW0DVcHfZO98X8jIJo8Fdb2nAr0bQ,6998 +opencensus/trace/status.py,sha256=yLcQgs8gvL0Yb4lqJ3ZFsWFBdU5vkeQtOfSWUyVBqlk,2624 +opencensus/trace/time_event.py,sha256=bEZ8_6zVheAn8V-3OI5lL76mZxJ-9tJUOCJBMAZSXS0,4301 +opencensus/trace/trace_options.py,sha256=I-SOm3_iaPyjjZR69D4XmFcQaEqTu19A-Btk6iaR72o,2651 +opencensus/trace/tracer.py,sha256=bORAnkVg1y__ZJt3keuxqms2dB4NpW6zclKNDYaUK0I,5166 +opencensus/trace/tracers/__init__.py,sha256=3cwrGSSn3JMa-Au3B1875PPwj3Ndg-lxgslMZRNsW3s,596 +opencensus/trace/tracers/__pycache__/__init__.cpython-311.pyc,, +opencensus/trace/tracers/__pycache__/base.cpython-311.pyc,, +opencensus/trace/tracers/__pycache__/context_tracer.cpython-311.pyc,, +opencensus/trace/tracers/__pycache__/noop_tracer.cpython-311.pyc,, +opencensus/trace/tracers/base.py,sha256=KRi_n-OAvpFHZnhLl8ZTlWofTmvoZEeaa18OMaG9Kr4,2622 +opencensus/trace/tracers/context_tracer.py,sha256=RGTy0IzYEsEgxj5u6eBG7PrmYfs7Pq-0ZCyDSHBJN-Q,5991 +opencensus/trace/tracers/noop_tracer.py,sha256=CotRxATBNii4YSNi--7_fl7-iIFj1z6rQmCzzk2f07k,2686 +opencensus/trace/tracestate.py,sha256=mKFckhglUTxHdwNnoGdHoaQeJcdOvYPCbCX7mIr0MUI,2516 +opencensus/trace/utils.py,sha256=khhyGNpkjOL8Sognf_zdZn_WSiHHKhrgSf31mDAtsBU,4059 diff --git a/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/WHEEL b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/WHEEL new file mode 100644 index 0000000000000000000000000000000000000000..01b8fc7d4a10cb8b4f1d21f11d3398d07d6b3478 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/WHEEL @@ -0,0 +1,6 @@ +Wheel-Version: 1.0 +Generator: bdist_wheel (0.36.2) +Root-Is-Purelib: true +Tag: py2-none-any +Tag: py3-none-any + diff --git a/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/namespace_packages.txt b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/namespace_packages.txt new file mode 100644 index 0000000000000000000000000000000000000000..8b137891791fe96927ad78e64b0aad7bded08bdc --- /dev/null +++ b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/namespace_packages.txt @@ -0,0 +1 @@ + diff --git a/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/top_level.txt b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/top_level.txt new file mode 100644 index 0000000000000000000000000000000000000000..030193411d1ebf3b5f61aa9b67be1c93e5349045 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/top_level.txt @@ -0,0 +1 @@ +opencensus diff --git a/.venv/lib/python3.11/site-packages/pybind11/__init__.py b/.venv/lib/python3.11/site-packages/pybind11/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..b14660caeb05d25ad042d7652acb918e0965bf25 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pybind11/__init__.py @@ -0,0 +1,19 @@ +from __future__ import annotations + +import sys + +if sys.version_info < (3, 7): # noqa: UP036 + msg = "pybind11 does not support Python < 3.7. v2.12 was the last release supporting Python 3.6." + raise ImportError(msg) + + +from ._version import __version__, version_info +from .commands import get_cmake_dir, get_include, get_pkgconfig_dir + +__all__ = ( + "version_info", + "__version__", + "get_include", + "get_cmake_dir", + "get_pkgconfig_dir", +) diff --git a/.venv/lib/python3.11/site-packages/pybind11/__main__.py b/.venv/lib/python3.11/site-packages/pybind11/__main__.py new file mode 100644 index 0000000000000000000000000000000000000000..0abc7e211722165a48779832680352b082cf9f7f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pybind11/__main__.py @@ -0,0 +1,86 @@ +# pylint: disable=missing-function-docstring +from __future__ import annotations + +import argparse +import re +import sys +import sysconfig + +from ._version import __version__ +from .commands import get_cmake_dir, get_include, get_pkgconfig_dir + +# This is the conditional used for os.path being posixpath +if "posix" in sys.builtin_module_names: + from shlex import quote +elif "nt" in sys.builtin_module_names: + # See https://github.com/mesonbuild/meson/blob/db22551ed9d2dd7889abea01cc1c7bba02bf1c75/mesonbuild/utils/universal.py#L1092-L1121 + # and the original documents: + # https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments and + # https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/ + UNSAFE = re.compile("[ \t\n\r]") + + def quote(s: str) -> str: + if s and not UNSAFE.search(s): + return s + + # Paths cannot contain a '"' on Windows, so we don't need to worry + # about nuanced counting here. + return f'"{s}\\"' if s.endswith("\\") else f'"{s}"' +else: + + def quote(s: str) -> str: + return s + + +def print_includes() -> None: + dirs = [ + sysconfig.get_path("include"), + sysconfig.get_path("platinclude"), + get_include(), + ] + + # Make unique but preserve order + unique_dirs = [] + for d in dirs: + if d and d not in unique_dirs: + unique_dirs.append(d) + + print(" ".join(quote(f"-I{d}") for d in unique_dirs)) + + +def main() -> None: + parser = argparse.ArgumentParser() + parser.add_argument( + "--version", + action="version", + version=__version__, + help="Print the version and exit.", + ) + parser.add_argument( + "--includes", + action="store_true", + help="Include flags for both pybind11 and Python headers.", + ) + parser.add_argument( + "--cmakedir", + action="store_true", + help="Print the CMake module directory, ideal for setting -Dpybind11_ROOT in CMake.", + ) + parser.add_argument( + "--pkgconfigdir", + action="store_true", + help="Print the pkgconfig directory, ideal for setting $PKG_CONFIG_PATH.", + ) + args = parser.parse_args() + if not sys.argv[1:]: + parser.print_help() + if args.includes: + print_includes() + if args.cmakedir: + print(quote(get_cmake_dir())) + if args.pkgconfigdir: + print(quote(get_pkgconfig_dir())) + + +if __name__ == "__main__": + main() diff --git a/.venv/lib/python3.11/site-packages/pybind11/_version.py b/.venv/lib/python3.11/site-packages/pybind11/_version.py new file mode 100644 index 0000000000000000000000000000000000000000..c298836bc33beb10a672a07bd027869c6a93ba1c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pybind11/_version.py @@ -0,0 +1,12 @@ +from __future__ import annotations + + +def _to_int(s: str) -> int | str: + try: + return int(s) + except ValueError: + return s + + +__version__ = "2.13.6" +version_info = tuple(_to_int(s) for s in __version__.split(".")) diff --git a/.venv/lib/python3.11/site-packages/pybind11/commands.py b/.venv/lib/python3.11/site-packages/pybind11/commands.py new file mode 100644 index 0000000000000000000000000000000000000000..d535b6cca4e08ee977c948c8ae8affda0e774a93 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pybind11/commands.py @@ -0,0 +1,39 @@ +from __future__ import annotations + +import os + +DIR = os.path.abspath(os.path.dirname(__file__)) + + +def get_include(user: bool = False) -> str: # noqa: ARG001 + """ + Return the path to the pybind11 include directory. The historical "user" + argument is unused, and may be removed. + """ + installed_path = os.path.join(DIR, "include") + source_path = os.path.join(os.path.dirname(DIR), "include") + return installed_path if os.path.exists(installed_path) else source_path + + +def get_cmake_dir() -> str: + """ + Return the path to the pybind11 CMake module directory. + """ + cmake_installed_path = os.path.join(DIR, "share", "cmake", "pybind11") + if os.path.exists(cmake_installed_path): + return cmake_installed_path + + msg = "pybind11 not installed, installation required to access the CMake files" + raise ImportError(msg) + + +def get_pkgconfig_dir() -> str: + """ + Return the path to the pybind11 pkgconfig directory. + """ + pkgconfig_installed_path = os.path.join(DIR, "share", "pkgconfig") + if os.path.exists(pkgconfig_installed_path): + return pkgconfig_installed_path + + msg = "pybind11 not installed, installation required to access the pkgconfig files" + raise ImportError(msg) diff --git a/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/attr.h b/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/attr.h new file mode 100644 index 0000000000000000000000000000000000000000..1044db94d906ac5fcf6faab6ac7668187314598f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/attr.h @@ -0,0 +1,690 @@ +/* + pybind11/attr.h: Infrastructure for processing custom + type and function attributes + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" +#include "cast.h" + +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +/// \addtogroup annotations +/// @{ + +/// Annotation for methods +struct is_method { + handle class_; + explicit is_method(const handle &c) : class_(c) {} +}; + +/// Annotation for setters +struct is_setter {}; + +/// Annotation for operators +struct is_operator {}; + +/// Annotation for classes that cannot be subclassed +struct is_final {}; + +/// Annotation for parent scope +struct scope { + handle value; + explicit scope(const handle &s) : value(s) {} +}; + +/// Annotation for documentation +struct doc { + const char *value; + explicit doc(const char *value) : value(value) {} +}; + +/// Annotation for function names +struct name { + const char *value; + explicit name(const char *value) : value(value) {} +}; + +/// Annotation indicating that a function is an overload associated with a given "sibling" +struct sibling { + handle value; + explicit sibling(const handle &value) : value(value.ptr()) {} +}; + +/// Annotation indicating that a class derives from another given type +template +struct base { + + PYBIND11_DEPRECATED( + "base() was deprecated in favor of specifying 'T' as a template argument to class_") + base() = default; +}; + +/// Keep patient alive while nurse lives +template +struct keep_alive {}; + +/// Annotation indicating that a class is involved in a multiple inheritance relationship +struct multiple_inheritance {}; + +/// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class +struct dynamic_attr {}; + +/// Annotation which enables the buffer protocol for a type +struct buffer_protocol {}; + +/// Annotation which requests that a special metaclass is created for a type +struct metaclass { + handle value; + + PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.") + metaclass() = default; + + /// Override pybind11's default metaclass + explicit metaclass(handle value) : value(value) {} +}; + +/// Specifies a custom callback with signature `void (PyHeapTypeObject*)` that +/// may be used to customize the Python type. +/// +/// The callback is invoked immediately before `PyType_Ready`. +/// +/// Note: This is an advanced interface, and uses of it may require changes to +/// work with later versions of pybind11. You may wish to consult the +/// implementation of `make_new_python_type` in `detail/classes.h` to understand +/// the context in which the callback will be run. +struct custom_type_setup { + using callback = std::function; + + explicit custom_type_setup(callback value) : value(std::move(value)) {} + + callback value; +}; + +/// Annotation that marks a class as local to the module: +struct module_local { + const bool value; + constexpr explicit module_local(bool v = true) : value(v) {} +}; + +/// Annotation to mark enums as an arithmetic type +struct arithmetic {}; + +/// Mark a function for addition at the beginning of the existing overload chain instead of the end +struct prepend {}; + +/** \rst + A call policy which places one or more guard variables (``Ts...``) around the function call. + + For example, this definition: + + .. code-block:: cpp + + m.def("foo", foo, py::call_guard()); + + is equivalent to the following pseudocode: + + .. code-block:: cpp + + m.def("foo", [](args...) { + T scope_guard; + return foo(args...); // forwarded arguments + }); + \endrst */ +template +struct call_guard; + +template <> +struct call_guard<> { + using type = detail::void_type; +}; + +template +struct call_guard { + static_assert(std::is_default_constructible::value, + "The guard type must be default constructible"); + + using type = T; +}; + +template +struct call_guard { + struct type { + T guard{}; // Compose multiple guard types with left-to-right default-constructor order + typename call_guard::type next{}; + }; +}; + +/// @} annotations + +PYBIND11_NAMESPACE_BEGIN(detail) +/* Forward declarations */ +enum op_id : int; +enum op_type : int; +struct undefined_t; +template +struct op_; +void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret); + +/// Internal data structure which holds metadata about a keyword argument +struct argument_record { + const char *name; ///< Argument name + const char *descr; ///< Human-readable version of the argument value + handle value; ///< Associated Python object + bool convert : 1; ///< True if the argument is allowed to convert when loading + bool none : 1; ///< True if None is allowed when loading + + argument_record(const char *name, const char *descr, handle value, bool convert, bool none) + : name(name), descr(descr), value(value), convert(convert), none(none) {} +}; + +/// Internal data structure which holds metadata about a bound function (signature, overloads, +/// etc.) +struct function_record { + function_record() + : is_constructor(false), is_new_style_constructor(false), is_stateless(false), + is_operator(false), is_method(false), is_setter(false), has_args(false), + has_kwargs(false), prepend(false) {} + + /// Function name + char *name = nullptr; /* why no C++ strings? They generate heavier code.. */ + + // User-specified documentation string + char *doc = nullptr; + + /// Human-readable version of the function signature + char *signature = nullptr; + + /// List of registered keyword arguments + std::vector args; + + /// Pointer to lambda function which converts arguments and performs the actual call + handle (*impl)(function_call &) = nullptr; + + /// Storage for the wrapped function pointer and captured data, if any + void *data[3] = {}; + + /// Pointer to custom destructor for 'data' (if needed) + void (*free_data)(function_record *ptr) = nullptr; + + /// Return value policy associated with this function + return_value_policy policy = return_value_policy::automatic; + + /// True if name == '__init__' + bool is_constructor : 1; + + /// True if this is a new-style `__init__` defined in `detail/init.h` + bool is_new_style_constructor : 1; + + /// True if this is a stateless function pointer + bool is_stateless : 1; + + /// True if this is an operator (__add__), etc. + bool is_operator : 1; + + /// True if this is a method + bool is_method : 1; + + /// True if this is a setter + bool is_setter : 1; + + /// True if the function has a '*args' argument + bool has_args : 1; + + /// True if the function has a '**kwargs' argument + bool has_kwargs : 1; + + /// True if this function is to be inserted at the beginning of the overload resolution chain + bool prepend : 1; + + /// Number of arguments (including py::args and/or py::kwargs, if present) + std::uint16_t nargs; + + /// Number of leading positional arguments, which are terminated by a py::args or py::kwargs + /// argument or by a py::kw_only annotation. + std::uint16_t nargs_pos = 0; + + /// Number of leading arguments (counted in `nargs`) that are positional-only + std::uint16_t nargs_pos_only = 0; + + /// Python method object + PyMethodDef *def = nullptr; + + /// Python handle to the parent scope (a class or a module) + handle scope; + + /// Python handle to the sibling function representing an overload chain + handle sibling; + + /// Pointer to next overload + function_record *next = nullptr; +}; + +/// Special data structure which (temporarily) holds metadata about a bound class +struct type_record { + PYBIND11_NOINLINE type_record() + : multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false), + default_holder(true), module_local(false), is_final(false) {} + + /// Handle to the parent scope + handle scope; + + /// Name of the class + const char *name = nullptr; + + // Pointer to RTTI type_info data structure + const std::type_info *type = nullptr; + + /// How large is the underlying C++ type? + size_t type_size = 0; + + /// What is the alignment of the underlying C++ type? + size_t type_align = 0; + + /// How large is the type's holder? + size_t holder_size = 0; + + /// The global operator new can be overridden with a class-specific variant + void *(*operator_new)(size_t) = nullptr; + + /// Function pointer to class_<..>::init_instance + void (*init_instance)(instance *, const void *) = nullptr; + + /// Function pointer to class_<..>::dealloc + void (*dealloc)(detail::value_and_holder &) = nullptr; + + /// List of base classes of the newly created type + list bases; + + /// Optional docstring + const char *doc = nullptr; + + /// Custom metaclass (optional) + handle metaclass; + + /// Custom type setup. + custom_type_setup::callback custom_type_setup_callback; + + /// Multiple inheritance marker + bool multiple_inheritance : 1; + + /// Does the class manage a __dict__? + bool dynamic_attr : 1; + + /// Does the class implement the buffer protocol? + bool buffer_protocol : 1; + + /// Is the default (unique_ptr) holder type used? + bool default_holder : 1; + + /// Is the class definition local to the module shared object? + bool module_local : 1; + + /// Is the class inheritable from python classes? + bool is_final : 1; + + PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *) ) { + auto *base_info = detail::get_type_info(base, false); + if (!base_info) { + std::string tname(base.name()); + detail::clean_type_id(tname); + pybind11_fail("generic_type: type \"" + std::string(name) + + "\" referenced unknown base type \"" + tname + "\""); + } + + if (default_holder != base_info->default_holder) { + std::string tname(base.name()); + detail::clean_type_id(tname); + pybind11_fail("generic_type: type \"" + std::string(name) + "\" " + + (default_holder ? "does not have" : "has") + + " a non-default holder type while its base \"" + tname + "\" " + + (base_info->default_holder ? "does not" : "does")); + } + + bases.append((PyObject *) base_info->type); + +#if PY_VERSION_HEX < 0x030B0000 + dynamic_attr |= base_info->type->tp_dictoffset != 0; +#else + dynamic_attr |= (base_info->type->tp_flags & Py_TPFLAGS_MANAGED_DICT) != 0; +#endif + + if (caster) { + base_info->implicit_casts.emplace_back(type, caster); + } + } +}; + +inline function_call::function_call(const function_record &f, handle p) : func(f), parent(p) { + args.reserve(f.nargs); + args_convert.reserve(f.nargs); +} + +/// Tag for a new-style `__init__` defined in `detail/init.h` +struct is_new_style_constructor {}; + +/** + * Partial template specializations to process custom attributes provided to + * cpp_function_ and class_. These are either used to initialize the respective + * fields in the type_record and function_record data structures or executed at + * runtime to deal with custom call policies (e.g. keep_alive). + */ +template +struct process_attribute; + +template +struct process_attribute_default { + /// Default implementation: do nothing + static void init(const T &, function_record *) {} + static void init(const T &, type_record *) {} + static void precall(function_call &) {} + static void postcall(function_call &, handle) {} +}; + +/// Process an attribute specifying the function's name +template <> +struct process_attribute : process_attribute_default { + static void init(const name &n, function_record *r) { r->name = const_cast(n.value); } +}; + +/// Process an attribute specifying the function's docstring +template <> +struct process_attribute : process_attribute_default { + static void init(const doc &n, function_record *r) { r->doc = const_cast(n.value); } +}; + +/// Process an attribute specifying the function's docstring (provided as a C-style string) +template <> +struct process_attribute : process_attribute_default { + static void init(const char *d, function_record *r) { r->doc = const_cast(d); } + static void init(const char *d, type_record *r) { r->doc = d; } +}; +template <> +struct process_attribute : process_attribute {}; + +/// Process an attribute indicating the function's return value policy +template <> +struct process_attribute : process_attribute_default { + static void init(const return_value_policy &p, function_record *r) { r->policy = p; } +}; + +/// Process an attribute which indicates that this is an overloaded function associated with a +/// given sibling +template <> +struct process_attribute : process_attribute_default { + static void init(const sibling &s, function_record *r) { r->sibling = s.value; } +}; + +/// Process an attribute which indicates that this function is a method +template <> +struct process_attribute : process_attribute_default { + static void init(const is_method &s, function_record *r) { + r->is_method = true; + r->scope = s.class_; + } +}; + +/// Process an attribute which indicates that this function is a setter +template <> +struct process_attribute : process_attribute_default { + static void init(const is_setter &, function_record *r) { r->is_setter = true; } +}; + +/// Process an attribute which indicates the parent scope of a method +template <> +struct process_attribute : process_attribute_default { + static void init(const scope &s, function_record *r) { r->scope = s.value; } +}; + +/// Process an attribute which indicates that this function is an operator +template <> +struct process_attribute : process_attribute_default { + static void init(const is_operator &, function_record *r) { r->is_operator = true; } +}; + +template <> +struct process_attribute + : process_attribute_default { + static void init(const is_new_style_constructor &, function_record *r) { + r->is_new_style_constructor = true; + } +}; + +inline void check_kw_only_arg(const arg &a, function_record *r) { + if (r->args.size() > r->nargs_pos && (!a.name || a.name[0] == '\0')) { + pybind11_fail("arg(): cannot specify an unnamed argument after a kw_only() annotation or " + "args() argument"); + } +} + +inline void append_self_arg_if_needed(function_record *r) { + if (r->is_method && r->args.empty()) { + r->args.emplace_back("self", nullptr, handle(), /*convert=*/true, /*none=*/false); + } +} + +/// Process a keyword argument attribute (*without* a default value) +template <> +struct process_attribute : process_attribute_default { + static void init(const arg &a, function_record *r) { + append_self_arg_if_needed(r); + r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none); + + check_kw_only_arg(a, r); + } +}; + +/// Process a keyword argument attribute (*with* a default value) +template <> +struct process_attribute : process_attribute_default { + static void init(const arg_v &a, function_record *r) { + if (r->is_method && r->args.empty()) { + r->args.emplace_back( + "self", /*descr=*/nullptr, /*parent=*/handle(), /*convert=*/true, /*none=*/false); + } + + if (!a.value) { +#if defined(PYBIND11_DETAILED_ERROR_MESSAGES) + std::string descr("'"); + if (a.name) { + descr += std::string(a.name) + ": "; + } + descr += a.type + "'"; + if (r->is_method) { + if (r->name) { + descr += " in method '" + (std::string) str(r->scope) + "." + + (std::string) r->name + "'"; + } else { + descr += " in method of '" + (std::string) str(r->scope) + "'"; + } + } else if (r->name) { + descr += " in function '" + (std::string) r->name + "'"; + } + pybind11_fail("arg(): could not convert default argument " + descr + + " into a Python object (type not registered yet?)"); +#else + pybind11_fail("arg(): could not convert default argument " + "into a Python object (type not registered yet?). " + "#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for " + "more information."); +#endif + } + r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none); + + check_kw_only_arg(a, r); + } +}; + +/// Process a keyword-only-arguments-follow pseudo argument +template <> +struct process_attribute : process_attribute_default { + static void init(const kw_only &, function_record *r) { + append_self_arg_if_needed(r); + if (r->has_args && r->nargs_pos != static_cast(r->args.size())) { + pybind11_fail("Mismatched args() and kw_only(): they must occur at the same relative " + "argument location (or omit kw_only() entirely)"); + } + r->nargs_pos = static_cast(r->args.size()); + } +}; + +/// Process a positional-only-argument maker +template <> +struct process_attribute : process_attribute_default { + static void init(const pos_only &, function_record *r) { + append_self_arg_if_needed(r); + r->nargs_pos_only = static_cast(r->args.size()); + if (r->nargs_pos_only > r->nargs_pos) { + pybind11_fail("pos_only(): cannot follow a py::args() argument"); + } + // It also can't follow a kw_only, but a static_assert in pybind11.h checks that + } +}; + +/// Process a parent class attribute. Single inheritance only (class_ itself already guarantees +/// that) +template +struct process_attribute::value>> + : process_attribute_default { + static void init(const handle &h, type_record *r) { r->bases.append(h); } +}; + +/// Process a parent class attribute (deprecated, does not support multiple inheritance) +template +struct process_attribute> : process_attribute_default> { + static void init(const base &, type_record *r) { r->add_base(typeid(T), nullptr); } +}; + +/// Process a multiple inheritance attribute +template <> +struct process_attribute : process_attribute_default { + static void init(const multiple_inheritance &, type_record *r) { + r->multiple_inheritance = true; + } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; } +}; + +template <> +struct process_attribute { + static void init(const custom_type_setup &value, type_record *r) { + r->custom_type_setup_callback = value.value; + } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const is_final &, type_record *r) { r->is_final = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; } +}; + +template <> +struct process_attribute : process_attribute_default { + static void init(const module_local &l, type_record *r) { r->module_local = l.value; } +}; + +/// Process a 'prepend' attribute, putting this at the beginning of the overload chain +template <> +struct process_attribute : process_attribute_default { + static void init(const prepend &, function_record *r) { r->prepend = true; } +}; + +/// Process an 'arithmetic' attribute for enums (does nothing here) +template <> +struct process_attribute : process_attribute_default {}; + +template +struct process_attribute> : process_attribute_default> {}; + +/** + * Process a keep_alive call policy -- invokes keep_alive_impl during the + * pre-call handler if both Nurse, Patient != 0 and use the post-call handler + * otherwise + */ +template +struct process_attribute> + : public process_attribute_default> { + template = 0> + static void precall(function_call &call) { + keep_alive_impl(Nurse, Patient, call, handle()); + } + template = 0> + static void postcall(function_call &, handle) {} + template = 0> + static void precall(function_call &) {} + template = 0> + static void postcall(function_call &call, handle ret) { + keep_alive_impl(Nurse, Patient, call, ret); + } +}; + +/// Recursively iterate over variadic template arguments +template +struct process_attributes { + static void init(const Args &...args, function_record *r) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r); + PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r); + using expander = int[]; + (void) expander{ + 0, ((void) process_attribute::type>::init(args, r), 0)...}; + } + static void init(const Args &...args, type_record *r) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r); + PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r); + using expander = int[]; + (void) expander{0, + (process_attribute::type>::init(args, r), 0)...}; + } + static void precall(function_call &call) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call); + using expander = int[]; + (void) expander{0, + (process_attribute::type>::precall(call), 0)...}; + } + static void postcall(function_call &call, handle fn_ret) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call, fn_ret); + PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(fn_ret); + using expander = int[]; + (void) expander{ + 0, (process_attribute::type>::postcall(call, fn_ret), 0)...}; + } +}; + +template +using is_call_guard = is_instantiation; + +/// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found) +template +using extract_guard_t = typename exactly_one_t, Extra...>::type; + +/// Check the number of named arguments at compile time +template ::value...), + size_t self = constexpr_sum(std::is_same::value...)> +constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) { + PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(nargs, has_args, has_kwargs); + return named == 0 || (self + named + size_t(has_args) + size_t(has_kwargs)) == nargs; +} + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/buffer_info.h b/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/buffer_info.h new file mode 100644 index 0000000000000000000000000000000000000000..75aec0ba3092a401a73f7cdb09b0894aef85cc27 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/buffer_info.h @@ -0,0 +1,208 @@ +/* + pybind11/buffer_info.h: Python buffer object interface + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +PYBIND11_NAMESPACE_BEGIN(detail) + +// Default, C-style strides +inline std::vector c_strides(const std::vector &shape, ssize_t itemsize) { + auto ndim = shape.size(); + std::vector strides(ndim, itemsize); + if (ndim > 0) { + for (size_t i = ndim - 1; i > 0; --i) { + strides[i - 1] = strides[i] * shape[i]; + } + } + return strides; +} + +// F-style strides; default when constructing an array_t with `ExtraFlags & f_style` +inline std::vector f_strides(const std::vector &shape, ssize_t itemsize) { + auto ndim = shape.size(); + std::vector strides(ndim, itemsize); + for (size_t i = 1; i < ndim; ++i) { + strides[i] = strides[i - 1] * shape[i - 1]; + } + return strides; +} + +template +struct compare_buffer_info; + +PYBIND11_NAMESPACE_END(detail) + +/// Information record describing a Python buffer object +struct buffer_info { + void *ptr = nullptr; // Pointer to the underlying storage + ssize_t itemsize = 0; // Size of individual items in bytes + ssize_t size = 0; // Total number of entries + std::string format; // For homogeneous buffers, this should be set to + // format_descriptor::format() + ssize_t ndim = 0; // Number of dimensions + std::vector shape; // Shape of the tensor (1 entry per dimension) + std::vector strides; // Number of bytes between adjacent entries + // (for each per dimension) + bool readonly = false; // flag to indicate if the underlying storage may be written to + + buffer_info() = default; + + buffer_info(void *ptr, + ssize_t itemsize, + const std::string &format, + ssize_t ndim, + detail::any_container shape_in, + detail::any_container strides_in, + bool readonly = false) + : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim), + shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) { + if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) { + pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length"); + } + for (size_t i = 0; i < (size_t) ndim; ++i) { + size *= shape[i]; + } + } + + template + buffer_info(T *ptr, + detail::any_container shape_in, + detail::any_container strides_in, + bool readonly = false) + : buffer_info(private_ctr_tag(), + ptr, + sizeof(T), + format_descriptor::format(), + static_cast(shape_in->size()), + std::move(shape_in), + std::move(strides_in), + readonly) {} + + buffer_info(void *ptr, + ssize_t itemsize, + const std::string &format, + ssize_t size, + bool readonly = false) + : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) {} + + template + buffer_info(T *ptr, ssize_t size, bool readonly = false) + : buffer_info(ptr, sizeof(T), format_descriptor::format(), size, readonly) {} + + template + buffer_info(const T *ptr, ssize_t size, bool readonly = true) + : buffer_info( + const_cast(ptr), sizeof(T), format_descriptor::format(), size, readonly) {} + + explicit buffer_info(Py_buffer *view, bool ownview = true) + : buffer_info( + view->buf, + view->itemsize, + view->format, + view->ndim, + {view->shape, view->shape + view->ndim}, + /* Though buffer::request() requests PyBUF_STRIDES, ctypes objects + * ignore this flag and return a view with NULL strides. + * When strides are NULL, build them manually. */ + view->strides + ? std::vector(view->strides, view->strides + view->ndim) + : detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize), + (view->readonly != 0)) { + // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer) + this->m_view = view; + // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer) + this->ownview = ownview; + } + + buffer_info(const buffer_info &) = delete; + buffer_info &operator=(const buffer_info &) = delete; + + buffer_info(buffer_info &&other) noexcept { (*this) = std::move(other); } + + buffer_info &operator=(buffer_info &&rhs) noexcept { + ptr = rhs.ptr; + itemsize = rhs.itemsize; + size = rhs.size; + format = std::move(rhs.format); + ndim = rhs.ndim; + shape = std::move(rhs.shape); + strides = std::move(rhs.strides); + std::swap(m_view, rhs.m_view); + std::swap(ownview, rhs.ownview); + readonly = rhs.readonly; + return *this; + } + + ~buffer_info() { + if (m_view && ownview) { + PyBuffer_Release(m_view); + delete m_view; + } + } + + Py_buffer *view() const { return m_view; } + Py_buffer *&view() { return m_view; } + + /* True if the buffer item type is equivalent to `T`. */ + // To define "equivalent" by example: + // `buffer_info::item_type_is_equivalent_to(b)` and + // `buffer_info::item_type_is_equivalent_to(b)` may both be true + // on some platforms, but `int` and `unsigned` will never be equivalent. + // For the ground truth, please inspect `detail::compare_buffer_info<>`. + template + bool item_type_is_equivalent_to() const { + return detail::compare_buffer_info::compare(*this); + } + +private: + struct private_ctr_tag {}; + + buffer_info(private_ctr_tag, + void *ptr, + ssize_t itemsize, + const std::string &format, + ssize_t ndim, + detail::any_container &&shape_in, + detail::any_container &&strides_in, + bool readonly) + : buffer_info( + ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {} + + Py_buffer *m_view = nullptr; + bool ownview = false; +}; + +PYBIND11_NAMESPACE_BEGIN(detail) + +template +struct compare_buffer_info { + static bool compare(const buffer_info &b) { + // NOLINTNEXTLINE(bugprone-sizeof-expression) Needed for `PyObject *` + return b.format == format_descriptor::format() && b.itemsize == (ssize_t) sizeof(T); + } +}; + +template +struct compare_buffer_info::value>> { + static bool compare(const buffer_info &b) { + return (size_t) b.itemsize == sizeof(T) + && (b.format == format_descriptor::value + || ((sizeof(T) == sizeof(long)) + && b.format == (std::is_unsigned::value ? "L" : "l")) + || ((sizeof(T) == sizeof(size_t)) + && b.format == (std::is_unsigned::value ? "N" : "n"))); + } +}; + +PYBIND11_NAMESPACE_END(detail) +PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE) diff --git a/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/cast.h b/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/cast.h new file mode 100644 index 0000000000000000000000000000000000000000..0f3091f6869900d14d521791198d97fcccf40629 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/pybind11/include/pybind11/cast.h @@ -0,0 +1,1855 @@ +/* + pybind11/cast.h: Partial template specializations to cast between + C++ and Python types + + Copyright (c) 2016 Wenzel Jakob + + All rights reserved. Use of this source code is governed by a + BSD-style license that can be found in the LICENSE file. +*/ + +#pragma once + +#include "detail/common.h" +#include "detail/descr.h" +#include "detail/type_caster_base.h" +#include "detail/typeid.h" +#include "pytypes.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE) + +PYBIND11_WARNING_DISABLE_MSVC(4127) + +PYBIND11_NAMESPACE_BEGIN(detail) + +template +class type_caster : public type_caster_base {}; +template +using make_caster = type_caster>; + +// Shortcut for calling a caster's `cast_op_type` cast operator for casting a type_caster to a T +template +typename make_caster::template cast_op_type cast_op(make_caster &caster) { + using result_t = typename make_caster::template cast_op_type; // See PR #4893 + return caster.operator result_t(); +} +template +typename make_caster::template cast_op_type::type> +cast_op(make_caster &&caster) { + using result_t = typename make_caster::template cast_op_type< + typename std::add_rvalue_reference::type>; // See PR #4893 + return std::move(caster).operator result_t(); +} + +template +class type_caster> { +private: + using caster_t = make_caster; + caster_t subcaster; + using reference_t = type &; + using subcaster_cast_op_type = typename caster_t::template cast_op_type; + + static_assert( + std::is_same::type &, subcaster_cast_op_type>::value + || std::is_same::value, + "std::reference_wrapper caster requires T to have a caster with an " + "`operator T &()` or `operator const T &()`"); + +public: + bool load(handle src, bool convert) { return subcaster.load(src, convert); } + static constexpr auto name = caster_t::name; + static handle + cast(const std::reference_wrapper &src, return_value_policy policy, handle parent) { + // It is definitely wrong to take ownership of this pointer, so mask that rvp + if (policy == return_value_policy::take_ownership + || policy == return_value_policy::automatic) { + policy = return_value_policy::automatic_reference; + } + return caster_t::cast(&src.get(), policy, parent); + } + template + using cast_op_type = std::reference_wrapper; + explicit operator std::reference_wrapper() { return cast_op(subcaster); } +}; + +#define PYBIND11_TYPE_CASTER(type, py_name) \ +protected: \ + type value; \ + \ +public: \ + static constexpr auto name = py_name; \ + template >::value, \ + int> \ + = 0> \ + static ::pybind11::handle cast( \ + T_ *src, ::pybind11::return_value_policy policy, ::pybind11::handle parent) { \ + if (!src) \ + return ::pybind11::none().release(); \ + if (policy == ::pybind11::return_value_policy::take_ownership) { \ + auto h = cast(std::move(*src), policy, parent); \ + delete src; \ + return h; \ + } \ + return cast(*src, policy, parent); \ + } \ + operator type *() { return &value; } /* NOLINT(bugprone-macro-parentheses) */ \ + operator type &() { return value; } /* NOLINT(bugprone-macro-parentheses) */ \ + operator type &&() && { return std::move(value); } /* NOLINT(bugprone-macro-parentheses) */ \ + template \ + using cast_op_type = ::pybind11::detail::movable_cast_op_type + +template +using is_std_char_type = any_of, /* std::string */ +#if defined(PYBIND11_HAS_U8STRING) + std::is_same, /* std::u8string */ +#endif + std::is_same, /* std::u16string */ + std::is_same, /* std::u32string */ + std::is_same /* std::wstring */ + >; + +template +struct type_caster::value && !is_std_char_type::value>> { + using _py_type_0 = conditional_t; + using _py_type_1 = conditional_t::value, + _py_type_0, + typename std::make_unsigned<_py_type_0>::type>; + using py_type = conditional_t::value, double, _py_type_1>; + +public: + bool load(handle src, bool convert) { + py_type py_value; + + if (!src) { + return false; + } + +#if !defined(PYPY_VERSION) + auto index_check = [](PyObject *o) { return PyIndex_Check(o); }; +#else + // In PyPy 7.3.3, `PyIndex_Check` is implemented by calling `__index__`, + // while CPython only considers the existence of `nb_index`/`__index__`. + auto index_check = [](PyObject *o) { return hasattr(o, "__index__"); }; +#endif + + if (std::is_floating_point::value) { + if (convert || PyFloat_Check(src.ptr())) { + py_value = (py_type) PyFloat_AsDouble(src.ptr()); + } else { + return false; + } + } else if (PyFloat_Check(src.ptr()) + || (!convert && !PYBIND11_LONG_CHECK(src.ptr()) && !index_check(src.ptr()))) { + return false; + } else { + handle src_or_index = src; + // PyPy: 7.3.7's 3.8 does not implement PyLong_*'s __index__ calls. +#if PY_VERSION_HEX < 0x03080000 || defined(PYPY_VERSION) + object index; + if (!PYBIND11_LONG_CHECK(src.ptr())) { // So: index_check(src.ptr()) + index = reinterpret_steal(PyNumber_Index(src.ptr())); + if (!index) { + PyErr_Clear(); + if (!convert) + return false; + } else { + src_or_index = index; + } + } +#endif + if (std::is_unsigned::value) { + py_value = as_unsigned(src_or_index.ptr()); + } else { // signed integer: + py_value = sizeof(T) <= sizeof(long) + ? (py_type) PyLong_AsLong(src_or_index.ptr()) + : (py_type) PYBIND11_LONG_AS_LONGLONG(src_or_index.ptr()); + } + } + + // Python API reported an error + bool py_err = py_value == (py_type) -1 && PyErr_Occurred(); + + // Check to see if the conversion is valid (integers should match exactly) + // Signed/unsigned checks happen elsewhere + if (py_err + || (std::is_integral::value && sizeof(py_type) != sizeof(T) + && py_value != (py_type) (T) py_value)) { + PyErr_Clear(); + if (py_err && convert && (PyNumber_Check(src.ptr()) != 0)) { + auto tmp = reinterpret_steal(std::is_floating_point::value + ? PyNumber_Float(src.ptr()) + : PyNumber_Long(src.ptr())); + PyErr_Clear(); + return load(tmp, false); + } + return false; + } + + value = (T) py_value; + return true; + } + + template + static typename std::enable_if::value, handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyFloat_FromDouble((double) src); + } + + template + static typename std::enable_if::value && std::is_signed::value + && (sizeof(U) <= sizeof(long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PYBIND11_LONG_FROM_SIGNED((long) src); + } + + template + static typename std::enable_if::value && std::is_unsigned::value + && (sizeof(U) <= sizeof(unsigned long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PYBIND11_LONG_FROM_UNSIGNED((unsigned long) src); + } + + template + static typename std::enable_if::value && std::is_signed::value + && (sizeof(U) > sizeof(long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyLong_FromLongLong((long long) src); + } + + template + static typename std::enable_if::value && std::is_unsigned::value + && (sizeof(U) > sizeof(unsigned long)), + handle>::type + cast(U src, return_value_policy /* policy */, handle /* parent */) { + return PyLong_FromUnsignedLongLong((unsigned long long) src); + } + + PYBIND11_TYPE_CASTER(T, const_name::value>("int", "float")); +}; + +template +struct void_caster { +public: + bool load(handle src, bool) { + if (src && src.is_none()) { + return true; + } + return false; + } + static handle cast(T, return_value_policy /* policy */, handle /* parent */) { + return none().release(); + } + PYBIND11_TYPE_CASTER(T, const_name("None")); +}; + +template <> +class type_caster : public void_caster {}; + +template <> +class type_caster : public type_caster { +public: + using type_caster::cast; + + bool load(handle h, bool) { + if (!h) { + return false; + } + if (h.is_none()) { + value = nullptr; + return true; + } + + /* Check if this is a capsule */ + if (isinstance(h)) { + value = reinterpret_borrow(h); + return true; + } + + /* Check if this is a C++ type */ + const auto &bases = all_type_info((PyTypeObject *) type::handle_of(h).ptr()); + if (bases.size() == 1) { // Only allowing loading from a single-value type + value = values_and_holders(reinterpret_cast(h.ptr())).begin()->value_ptr(); + return true; + } + + /* Fail */ + return false; + } + + static handle cast(const void *ptr, return_value_policy /* policy */, handle /* parent */) { + if (ptr) { + return capsule(ptr).release(); + } + return none().release(); + } + + template + using cast_op_type = void *&; + explicit operator void *&() { return value; } + static constexpr auto name = const_name("capsule"); + +private: + void *value = nullptr; +}; + +template <> +class type_caster : public void_caster {}; + +template <> +class type_caster { +public: + bool load(handle src, bool convert) { + if (!src) { + return false; + } + if (src.ptr() == Py_True) { + value = true; + return true; + } + if (src.ptr() == Py_False) { + value = false; + return true; + } + if (convert || is_numpy_bool(src)) { + // (allow non-implicit conversion for numpy booleans), use strncmp + // since NumPy 1.x had an additional trailing underscore. + + Py_ssize_t res = -1; + if (src.is_none()) { + res = 0; // None is implicitly converted to False + } +#if defined(PYPY_VERSION) + // On PyPy, check that "__bool__" attr exists + else if (hasattr(src, PYBIND11_BOOL_ATTR)) { + res = PyObject_IsTrue(src.ptr()); + } +#else + // Alternate approach for CPython: this does the same as the above, but optimized + // using the CPython API so as to avoid an unneeded attribute lookup. + else if (auto *tp_as_number = src.ptr()->ob_type->tp_as_number) { + if (PYBIND11_NB_BOOL(tp_as_number)) { + res = (*PYBIND11_NB_BOOL(tp_as_number))(src.ptr()); + } + } +#endif + if (res == 0 || res == 1) { + value = (res != 0); + return true; + } + PyErr_Clear(); + } + return false; + } + static handle cast(bool src, return_value_policy /* policy */, handle /* parent */) { + return handle(src ? Py_True : Py_False).inc_ref(); + } + PYBIND11_TYPE_CASTER(bool, const_name("bool")); + +private: + // Test if an object is a NumPy boolean (without fetching the type). + static inline bool is_numpy_bool(handle object) { + const char *type_name = Py_TYPE(object.ptr())->tp_name; + // Name changed to `numpy.bool` in NumPy 2, `numpy.bool_` is needed for 1.x support + return std::strcmp("numpy.bool", type_name) == 0 + || std::strcmp("numpy.bool_", type_name) == 0; + } +}; + +// Helper class for UTF-{8,16,32} C++ stl strings: +template +struct string_caster { + using CharT = typename StringType::value_type; + + // Simplify life by being able to assume standard char sizes (the standard only guarantees + // minimums, but Python requires exact sizes) + static_assert(!std::is_same::value || sizeof(CharT) == 1, + "Unsupported char size != 1"); +#if defined(PYBIND11_HAS_U8STRING) + static_assert(!std::is_same::value || sizeof(CharT) == 1, + "Unsupported char8_t size != 1"); +#endif + static_assert(!std::is_same::value || sizeof(CharT) == 2, + "Unsupported char16_t size != 2"); + static_assert(!std::is_same::value || sizeof(CharT) == 4, + "Unsupported char32_t size != 4"); + // wchar_t can be either 16 bits (Windows) or 32 (everywhere else) + static_assert(!std::is_same::value || sizeof(CharT) == 2 || sizeof(CharT) == 4, + "Unsupported wchar_t size != 2/4"); + static constexpr size_t UTF_N = 8 * sizeof(CharT); + + bool load(handle src, bool) { + handle load_src = src; + if (!src) { + return false; + } + if (!PyUnicode_Check(load_src.ptr())) { + return load_raw(load_src); + } + + // For UTF-8 we avoid the need for a temporary `bytes` object by using + // `PyUnicode_AsUTF8AndSize`. + if (UTF_N == 8) { + Py_ssize_t size = -1; + const auto *buffer + = reinterpret_cast(PyUnicode_AsUTF8AndSize(load_src.ptr(), &size)); + if (!buffer) { + PyErr_Clear(); + return false; + } + value = StringType(buffer, static_cast(size)); + return true; + } + + auto utfNbytes + = reinterpret_steal(PyUnicode_AsEncodedString(load_src.ptr(), + UTF_N == 8 ? "utf-8" + : UTF_N == 16 ? "utf-16" + : "utf-32", + nullptr)); + if (!utfNbytes) { + PyErr_Clear(); + return false; + } + + const auto *buffer + = reinterpret_cast(PYBIND11_BYTES_AS_STRING(utfNbytes.ptr())); + size_t length = (size_t) PYBIND11_BYTES_SIZE(utfNbytes.ptr()) / sizeof(CharT); + // Skip BOM for UTF-16/32 + if (UTF_N > 8) { + buffer++; + length--; + } + value = StringType(buffer, length); + + // If we're loading a string_view we need to keep the encoded Python object alive: + if (IsView) { + loader_life_support::add_patient(utfNbytes); + } + + return true; + } + + static handle + cast(const StringType &src, return_value_policy /* policy */, handle /* parent */) { + const char *buffer = reinterpret_cast(src.data()); + auto nbytes = ssize_t(src.size() * sizeof(CharT)); + handle s = decode_utfN(buffer, nbytes); + if (!s) { + throw error_already_set(); + } + return s; + } + + PYBIND11_TYPE_CASTER(StringType, const_name(PYBIND11_STRING_NAME)); + +private: + static handle decode_utfN(const char *buffer, ssize_t nbytes) { +#if !defined(PYPY_VERSION) + return UTF_N == 8 ? PyUnicode_DecodeUTF8(buffer, nbytes, nullptr) + : UTF_N == 16 ? PyUnicode_DecodeUTF16(buffer, nbytes, nullptr, nullptr) + : PyUnicode_DecodeUTF32(buffer, nbytes, nullptr, nullptr); +#else + // PyPy segfaults when on PyUnicode_DecodeUTF16 (and possibly on PyUnicode_DecodeUTF32 as + // well), so bypass the whole thing by just passing the encoding as a string value, which + // works properly: + return PyUnicode_Decode(buffer, + nbytes, + UTF_N == 8 ? "utf-8" + : UTF_N == 16 ? "utf-16" + : "utf-32", + nullptr); +#endif + } + + // When loading into a std::string or char*, accept a bytes/bytearray object as-is (i.e. + // without any encoding/decoding attempt). For other C++ char sizes this is a no-op. + // which supports loading a unicode from a str, doesn't take this path. + template + bool load_raw(enable_if_t::value, handle> src) { + if (PYBIND11_BYTES_CHECK(src.ptr())) { + // We were passed raw bytes; accept it into a std::string or char* + // without any encoding attempt. + const char *bytes = PYBIND11_BYTES_AS_STRING(src.ptr()); + if (!bytes) { + pybind11_fail("Unexpected PYBIND11_BYTES_AS_STRING() failure."); + } + value = StringType(bytes, (size_t) PYBIND11_BYTES_SIZE(src.ptr())); + return true; + } + if (PyByteArray_Check(src.ptr())) { + // We were passed a bytearray; accept it into a std::string or char* + // without any encoding attempt. + const char *bytearray = PyByteArray_AsString(src.ptr()); + if (!bytearray) { + pybind11_fail("Unexpected PyByteArray_AsString() failure."); + } + value = StringType(bytearray, (size_t) PyByteArray_Size(src.ptr())); + return true; + } + + return false; + } + + template + bool load_raw(enable_if_t::value, handle>) { + return false; + } +}; + +template +struct type_caster, + enable_if_t::value>> + : string_caster> {}; + +#ifdef PYBIND11_HAS_STRING_VIEW +template +struct type_caster, + enable_if_t::value>> + : string_caster, true> {}; +#endif + +// Type caster for C-style strings. We basically use a std::string type caster, but also add the +// ability to use None as a nullptr char* (which the string caster doesn't allow). +template +struct type_caster::value>> { + using StringType = std::basic_string; + using StringCaster = make_caster; + StringCaster str_caster; + bool none = false; + CharT one_char = 0; + +public: + bool load(handle src, bool convert) { + if (!src) { + return false; + } + if (src.is_none()) { + // Defer accepting None to other overloads (if we aren't in convert mode): + if (!convert) { + return false; + } + none = true; + return true; + } + return str_caster.load(src, convert); + } + + static handle cast(const CharT *src, return_value_policy policy, handle parent) { + if (src == nullptr) { + return pybind11::none().release(); + } + return StringCaster::cast(StringType(src), policy, parent); + } + + static handle cast(CharT src, return_value_policy policy, handle parent) { + if (std::is_same::value) { + handle s = PyUnicode_DecodeLatin1((const char *) &src, 1, nullptr); + if (!s) { + throw error_already_set(); + } + return s; + } + return StringCaster::cast(StringType(1, src), policy, parent); + } + + explicit operator CharT *() { + return none ? nullptr : const_cast(static_cast(str_caster).c_str()); + } + explicit operator CharT &() { + if (none) { + throw value_error("Cannot convert None to a character"); + } + + auto &value = static_cast(str_caster); + size_t str_len = value.size(); + if (str_len == 0) { + throw value_error("Cannot convert empty string to a character"); + } + + // If we're in UTF-8 mode, we have two possible failures: one for a unicode character that + // is too high, and one for multiple unicode characters (caught later), so we need to + // figure out how long the first encoded character is in bytes to distinguish between these + // two errors. We also allow want to allow unicode characters U+0080 through U+00FF, as + // those can fit into a single char value. + if (StringCaster::UTF_N == 8 && str_len > 1 && str_len <= 4) { + auto v0 = static_cast(value[0]); + // low bits only: 0-127 + // 0b110xxxxx - start of 2-byte sequence + // 0b1110xxxx - start of 3-byte sequence + // 0b11110xxx - start of 4-byte sequence + size_t char0_bytes = (v0 & 0x80) == 0 ? 1 + : (v0 & 0xE0) == 0xC0 ? 2 + : (v0 & 0xF0) == 0xE0 ? 3 + : 4; + + if (char0_bytes == str_len) { + // If we have a 128-255 value, we can decode it into a single char: + if (char0_bytes == 2 && (v0 & 0xFC) == 0xC0) { // 0x110000xx 0x10xxxxxx + one_char = static_cast(((v0 & 3) << 6) + + (static_cast(value[1]) & 0x3F)); + return one_char; + } + // Otherwise we have a single character, but it's > U+00FF + throw value_error("Character code point not in range(0x100)"); + } + } + + // UTF-16 is much easier: we can only have a surrogate pair for values above U+FFFF, thus a + // surrogate pair with total length 2 instantly indicates a range error (but not a "your + // string was too long" error). + else if (StringCaster::UTF_N == 16 && str_len == 2) { + one_char = static_cast(value[0]); + if (one_char >= 0xD800 && one_char < 0xE000) { + throw value_error("Character code point not in range(0x10000)"); + } + } + + if (str_len != 1) { + throw value_error("Expected a character, but multi-character string found"); + } + + one_char = value[0]; + return one_char; + } + + static constexpr auto name = const_name(PYBIND11_STRING_NAME); + template + using cast_op_type = pybind11::detail::cast_op_type<_T>; +}; + +// Base implementation for std::tuple and std::pair +template