koichi12 commited on
Commit
bcc798f
·
verified ·
1 Parent(s): 093ab29

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/mpmath/calculus/__init__.py +6 -0
  2. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/__init__.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/approximation.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/calculus.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/odes.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/optimization.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-311.pyc +0 -0
  12. .venv/lib/python3.11/site-packages/mpmath/calculus/approximation.py +246 -0
  13. .venv/lib/python3.11/site-packages/mpmath/calculus/calculus.py +6 -0
  14. .venv/lib/python3.11/site-packages/mpmath/calculus/extrapolation.py +2115 -0
  15. .venv/lib/python3.11/site-packages/mpmath/calculus/inverselaplace.py +973 -0
  16. .venv/lib/python3.11/site-packages/mpmath/calculus/polynomials.py +213 -0
  17. .venv/lib/python3.11/site-packages/mpmath/matrices/__init__.py +2 -0
  18. .venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/__init__.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/calculus.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/linalg.cpython-311.pyc +0 -0
  23. .venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/matrices.cpython-311.pyc +0 -0
  24. .venv/lib/python3.11/site-packages/mpmath/matrices/calculus.py +531 -0
  25. .venv/lib/python3.11/site-packages/mpmath/matrices/eigen.py +877 -0
  26. .venv/lib/python3.11/site-packages/mpmath/matrices/eigen_symmetric.py +1807 -0
  27. .venv/lib/python3.11/site-packages/mpmath/matrices/linalg.py +790 -0
  28. .venv/lib/python3.11/site-packages/mpmath/matrices/matrices.py +1005 -0
  29. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/runtests.cpython-311.pyc +0 -0
  30. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-311.pyc +0 -0
  31. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-311.pyc +0 -0
  32. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-311.pyc +0 -0
  33. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_hp.cpython-311.pyc +0 -0
  34. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-311.pyc +0 -0
  35. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-311.pyc +0 -0
  36. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_special.cpython-311.pyc +0 -0
  37. .venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-311.pyc +0 -0
  38. .venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/INSTALLER +1 -0
  39. .venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/LICENSE +201 -0
  40. .venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/METADATA +313 -0
  41. .venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/RECORD +193 -0
  42. .venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/WHEEL +6 -0
  43. .venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/namespace_packages.txt +1 -0
  44. .venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/top_level.txt +1 -0
  45. .venv/lib/python3.11/site-packages/pybind11/__init__.py +19 -0
  46. .venv/lib/python3.11/site-packages/pybind11/__main__.py +86 -0
  47. .venv/lib/python3.11/site-packages/pybind11/_version.py +12 -0
  48. .venv/lib/python3.11/site-packages/pybind11/commands.py +39 -0
  49. .venv/lib/python3.11/site-packages/pybind11/include/pybind11/attr.h +690 -0
  50. .venv/lib/python3.11/site-packages/pybind11/include/pybind11/buffer_info.h +208 -0
.venv/lib/python3.11/site-packages/mpmath/calculus/__init__.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ from . import calculus
2
+ # XXX: hack to set methods
3
+ from . import approximation
4
+ from . import differentiation
5
+ from . import extrapolation
6
+ from . import polynomials
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (443 Bytes). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/approximation.cpython-311.pyc ADDED
Binary file (12.7 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/calculus.cpython-311.pyc ADDED
Binary file (630 Bytes). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/differentiation.cpython-311.pyc ADDED
Binary file (28.4 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/extrapolation.cpython-311.pyc ADDED
Binary file (89.6 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/inverselaplace.cpython-311.pyc ADDED
Binary file (41.7 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/odes.cpython-311.pyc ADDED
Binary file (13.3 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/optimization.cpython-311.pyc ADDED
Binary file (42.7 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/polynomials.cpython-311.pyc ADDED
Binary file (10.9 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/__pycache__/quadrature.cpython-311.pyc ADDED
Binary file (50.9 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/calculus/approximation.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Approximation methods #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # The Chebyshev approximation formula is given at:
9
+ # http://mathworld.wolfram.com/ChebyshevApproximationFormula.html
10
+
11
+ # The only major changes in the following code is that we return the
12
+ # expanded polynomial coefficients instead of Chebyshev coefficients,
13
+ # and that we automatically transform [a,b] -> [-1,1] and back
14
+ # for convenience.
15
+
16
+ # Coefficient in Chebyshev approximation
17
+ def chebcoeff(ctx,f,a,b,j,N):
18
+ s = ctx.mpf(0)
19
+ h = ctx.mpf(0.5)
20
+ for k in range(1, N+1):
21
+ t = ctx.cospi((k-h)/N)
22
+ s += f(t*(b-a)*h + (b+a)*h) * ctx.cospi(j*(k-h)/N)
23
+ return 2*s/N
24
+
25
+ # Generate Chebyshev polynomials T_n(ax+b) in expanded form
26
+ def chebT(ctx, a=1, b=0):
27
+ Tb = [1]
28
+ yield Tb
29
+ Ta = [b, a]
30
+ while 1:
31
+ yield Ta
32
+ # Recurrence: T[n+1](ax+b) = 2*(ax+b)*T[n](ax+b) - T[n-1](ax+b)
33
+ Tmp = [0] + [2*a*t for t in Ta]
34
+ for i, c in enumerate(Ta): Tmp[i] += 2*b*c
35
+ for i, c in enumerate(Tb): Tmp[i] -= c
36
+ Ta, Tb = Tmp, Ta
37
+
38
+ @defun
39
+ def chebyfit(ctx, f, interval, N, error=False):
40
+ r"""
41
+ Computes a polynomial of degree `N-1` that approximates the
42
+ given function `f` on the interval `[a, b]`. With ``error=True``,
43
+ :func:`~mpmath.chebyfit` also returns an accurate estimate of the
44
+ maximum absolute error; that is, the maximum value of
45
+ `|f(x) - P(x)|` for `x \in [a, b]`.
46
+
47
+ :func:`~mpmath.chebyfit` uses the Chebyshev approximation formula,
48
+ which gives a nearly optimal solution: that is, the maximum
49
+ error of the approximating polynomial is very close to
50
+ the smallest possible for any polynomial of the same degree.
51
+
52
+ Chebyshev approximation is very useful if one needs repeated
53
+ evaluation of an expensive function, such as function defined
54
+ implicitly by an integral or a differential equation. (For
55
+ example, it could be used to turn a slow mpmath function
56
+ into a fast machine-precision version of the same.)
57
+
58
+ **Examples**
59
+
60
+ Here we use :func:`~mpmath.chebyfit` to generate a low-degree approximation
61
+ of `f(x) = \cos(x)`, valid on the interval `[1, 2]`::
62
+
63
+ >>> from mpmath import *
64
+ >>> mp.dps = 15; mp.pretty = True
65
+ >>> poly, err = chebyfit(cos, [1, 2], 5, error=True)
66
+ >>> nprint(poly)
67
+ [0.00291682, 0.146166, -0.732491, 0.174141, 0.949553]
68
+ >>> nprint(err, 12)
69
+ 1.61351758081e-5
70
+
71
+ The polynomial can be evaluated using ``polyval``::
72
+
73
+ >>> nprint(polyval(poly, 1.6), 12)
74
+ -0.0291858904138
75
+ >>> nprint(cos(1.6), 12)
76
+ -0.0291995223013
77
+
78
+ Sampling the true error at 1000 points shows that the error
79
+ estimate generated by ``chebyfit`` is remarkably good::
80
+
81
+ >>> error = lambda x: abs(cos(x) - polyval(poly, x))
82
+ >>> nprint(max([error(1+n/1000.) for n in range(1000)]), 12)
83
+ 1.61349954245e-5
84
+
85
+ **Choice of degree**
86
+
87
+ The degree `N` can be set arbitrarily high, to obtain an
88
+ arbitrarily good approximation. As a rule of thumb, an
89
+ `N`-term Chebyshev approximation is good to `N/(b-a)` decimal
90
+ places on a unit interval (although this depends on how
91
+ well-behaved `f` is). The cost grows accordingly: ``chebyfit``
92
+ evaluates the function `(N^2)/2` times to compute the
93
+ coefficients and an additional `N` times to estimate the error.
94
+
95
+ **Possible issues**
96
+
97
+ One should be careful to use a sufficiently high working
98
+ precision both when calling ``chebyfit`` and when evaluating
99
+ the resulting polynomial, as the polynomial is sometimes
100
+ ill-conditioned. It is for example difficult to reach
101
+ 15-digit accuracy when evaluating the polynomial using
102
+ machine precision floats, no matter the theoretical
103
+ accuracy of the polynomial. (The option to return the
104
+ coefficients in Chebyshev form should be made available
105
+ in the future.)
106
+
107
+ It is important to note the Chebyshev approximation works
108
+ poorly if `f` is not smooth. A function containing singularities,
109
+ rapid oscillation, etc can be approximated more effectively by
110
+ multiplying it by a weight function that cancels out the
111
+ nonsmooth features, or by dividing the interval into several
112
+ segments.
113
+ """
114
+ a, b = ctx._as_points(interval)
115
+ orig = ctx.prec
116
+ try:
117
+ ctx.prec = orig + int(N**0.5) + 20
118
+ c = [chebcoeff(ctx,f,a,b,k,N) for k in range(N)]
119
+ d = [ctx.zero] * N
120
+ d[0] = -c[0]/2
121
+ h = ctx.mpf(0.5)
122
+ T = chebT(ctx, ctx.mpf(2)/(b-a), ctx.mpf(-1)*(b+a)/(b-a))
123
+ for (k, Tk) in zip(range(N), T):
124
+ for i in range(len(Tk)):
125
+ d[i] += c[k]*Tk[i]
126
+ d = d[::-1]
127
+ # Estimate maximum error
128
+ err = ctx.zero
129
+ for k in range(N):
130
+ x = ctx.cos(ctx.pi*k/N) * (b-a)*h + (b+a)*h
131
+ err = max(err, abs(f(x) - ctx.polyval(d, x)))
132
+ finally:
133
+ ctx.prec = orig
134
+ if error:
135
+ return d, +err
136
+ else:
137
+ return d
138
+
139
+ @defun
140
+ def fourier(ctx, f, interval, N):
141
+ r"""
142
+ Computes the Fourier series of degree `N` of the given function
143
+ on the interval `[a, b]`. More precisely, :func:`~mpmath.fourier` returns
144
+ two lists `(c, s)` of coefficients (the cosine series and sine
145
+ series, respectively), such that
146
+
147
+ .. math ::
148
+
149
+ f(x) \sim \sum_{k=0}^N
150
+ c_k \cos(k m x) + s_k \sin(k m x)
151
+
152
+ where `m = 2 \pi / (b-a)`.
153
+
154
+ Note that many texts define the first coefficient as `2 c_0` instead
155
+ of `c_0`. The easiest way to evaluate the computed series correctly
156
+ is to pass it to :func:`~mpmath.fourierval`.
157
+
158
+ **Examples**
159
+
160
+ The function `f(x) = x` has a simple Fourier series on the standard
161
+ interval `[-\pi, \pi]`. The cosine coefficients are all zero (because
162
+ the function has odd symmetry), and the sine coefficients are
163
+ rational numbers::
164
+
165
+ >>> from mpmath import *
166
+ >>> mp.dps = 15; mp.pretty = True
167
+ >>> c, s = fourier(lambda x: x, [-pi, pi], 5)
168
+ >>> nprint(c)
169
+ [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
170
+ >>> nprint(s)
171
+ [0.0, 2.0, -1.0, 0.666667, -0.5, 0.4]
172
+
173
+ This computes a Fourier series of a nonsymmetric function on
174
+ a nonstandard interval::
175
+
176
+ >>> I = [-1, 1.5]
177
+ >>> f = lambda x: x**2 - 4*x + 1
178
+ >>> cs = fourier(f, I, 4)
179
+ >>> nprint(cs[0])
180
+ [0.583333, 1.12479, -1.27552, 0.904708, -0.441296]
181
+ >>> nprint(cs[1])
182
+ [0.0, -2.6255, 0.580905, 0.219974, -0.540057]
183
+
184
+ It is instructive to plot a function along with its truncated
185
+ Fourier series::
186
+
187
+ >>> plot([f, lambda x: fourierval(cs, I, x)], I) #doctest: +SKIP
188
+
189
+ Fourier series generally converge slowly (and may not converge
190
+ pointwise). For example, if `f(x) = \cosh(x)`, a 10-term Fourier
191
+ series gives an `L^2` error corresponding to 2-digit accuracy::
192
+
193
+ >>> I = [-1, 1]
194
+ >>> cs = fourier(cosh, I, 9)
195
+ >>> g = lambda x: (cosh(x) - fourierval(cs, I, x))**2
196
+ >>> nprint(sqrt(quad(g, I)))
197
+ 0.00467963
198
+
199
+ :func:`~mpmath.fourier` uses numerical quadrature. For nonsmooth functions,
200
+ the accuracy (and speed) can be improved by including all singular
201
+ points in the interval specification::
202
+
203
+ >>> nprint(fourier(abs, [-1, 1], 0), 10)
204
+ ([0.5000441648], [0.0])
205
+ >>> nprint(fourier(abs, [-1, 0, 1], 0), 10)
206
+ ([0.5], [0.0])
207
+
208
+ """
209
+ interval = ctx._as_points(interval)
210
+ a = interval[0]
211
+ b = interval[-1]
212
+ L = b-a
213
+ cos_series = []
214
+ sin_series = []
215
+ cutoff = ctx.eps*10
216
+ for n in xrange(N+1):
217
+ m = 2*n*ctx.pi/L
218
+ an = 2*ctx.quadgl(lambda t: f(t)*ctx.cos(m*t), interval)/L
219
+ bn = 2*ctx.quadgl(lambda t: f(t)*ctx.sin(m*t), interval)/L
220
+ if n == 0:
221
+ an /= 2
222
+ if abs(an) < cutoff: an = ctx.zero
223
+ if abs(bn) < cutoff: bn = ctx.zero
224
+ cos_series.append(an)
225
+ sin_series.append(bn)
226
+ return cos_series, sin_series
227
+
228
+ @defun
229
+ def fourierval(ctx, series, interval, x):
230
+ """
231
+ Evaluates a Fourier series (in the format computed by
232
+ by :func:`~mpmath.fourier` for the given interval) at the point `x`.
233
+
234
+ The series should be a pair `(c, s)` where `c` is the
235
+ cosine series and `s` is the sine series. The two lists
236
+ need not have the same length.
237
+ """
238
+ cs, ss = series
239
+ ab = ctx._as_points(interval)
240
+ a = interval[0]
241
+ b = interval[-1]
242
+ m = 2*ctx.pi/(ab[-1]-ab[0])
243
+ s = ctx.zero
244
+ s += ctx.fsum(cs[n]*ctx.cos(m*n*x) for n in xrange(len(cs)) if cs[n])
245
+ s += ctx.fsum(ss[n]*ctx.sin(m*n*x) for n in xrange(len(ss)) if ss[n])
246
+ return s
.venv/lib/python3.11/site-packages/mpmath/calculus/calculus.py ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ class CalculusMethods(object):
2
+ pass
3
+
4
+ def defun(f):
5
+ setattr(CalculusMethods, f.__name__, f)
6
+ return f
.venv/lib/python3.11/site-packages/mpmath/calculus/extrapolation.py ADDED
@@ -0,0 +1,2115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ try:
2
+ from itertools import izip
3
+ except ImportError:
4
+ izip = zip
5
+
6
+ from ..libmp.backend import xrange
7
+ from .calculus import defun
8
+
9
+ try:
10
+ next = next
11
+ except NameError:
12
+ next = lambda _: _.next()
13
+
14
+ @defun
15
+ def richardson(ctx, seq):
16
+ r"""
17
+ Given a list ``seq`` of the first `N` elements of a slowly convergent
18
+ infinite sequence, :func:`~mpmath.richardson` computes the `N`-term
19
+ Richardson extrapolate for the limit.
20
+
21
+ :func:`~mpmath.richardson` returns `(v, c)` where `v` is the estimated
22
+ limit and `c` is the magnitude of the largest weight used during the
23
+ computation. The weight provides an estimate of the precision
24
+ lost to cancellation. Due to cancellation effects, the sequence must
25
+ be typically be computed at a much higher precision than the target
26
+ accuracy of the extrapolation.
27
+
28
+ **Applicability and issues**
29
+
30
+ The `N`-step Richardson extrapolation algorithm used by
31
+ :func:`~mpmath.richardson` is described in [1].
32
+
33
+ Richardson extrapolation only works for a specific type of sequence,
34
+ namely one converging like partial sums of
35
+ `P(1)/Q(1) + P(2)/Q(2) + \ldots` where `P` and `Q` are polynomials.
36
+ When the sequence does not convergence at such a rate
37
+ :func:`~mpmath.richardson` generally produces garbage.
38
+
39
+ Richardson extrapolation has the advantage of being fast: the `N`-term
40
+ extrapolate requires only `O(N)` arithmetic operations, and usually
41
+ produces an estimate that is accurate to `O(N)` digits. Contrast with
42
+ the Shanks transformation (see :func:`~mpmath.shanks`), which requires
43
+ `O(N^2)` operations.
44
+
45
+ :func:`~mpmath.richardson` is unable to produce an estimate for the
46
+ approximation error. One way to estimate the error is to perform
47
+ two extrapolations with slightly different `N` and comparing the
48
+ results.
49
+
50
+ Richardson extrapolation does not work for oscillating sequences.
51
+ As a simple workaround, :func:`~mpmath.richardson` detects if the last
52
+ three elements do not differ monotonically, and in that case
53
+ applies extrapolation only to the even-index elements.
54
+
55
+ **Example**
56
+
57
+ Applying Richardson extrapolation to the Leibniz series for `\pi`::
58
+
59
+ >>> from mpmath import *
60
+ >>> mp.dps = 30; mp.pretty = True
61
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
62
+ ... for m in range(1,30)]
63
+ >>> v, c = richardson(S[:10])
64
+ >>> v
65
+ 3.2126984126984126984126984127
66
+ >>> nprint([v-pi, c])
67
+ [0.0711058, 2.0]
68
+
69
+ >>> v, c = richardson(S[:30])
70
+ >>> v
71
+ 3.14159265468624052829954206226
72
+ >>> nprint([v-pi, c])
73
+ [1.09645e-9, 20833.3]
74
+
75
+ **References**
76
+
77
+ 1. [BenderOrszag]_ pp. 375-376
78
+
79
+ """
80
+ if len(seq) < 3:
81
+ raise ValueError("seq should be of minimum length 3")
82
+ if ctx.sign(seq[-1]-seq[-2]) != ctx.sign(seq[-2]-seq[-3]):
83
+ seq = seq[::2]
84
+ N = len(seq)//2-1
85
+ s = ctx.zero
86
+ # The general weight is c[k] = (N+k)**N * (-1)**(k+N) / k! / (N-k)!
87
+ # To avoid repeated factorials, we simplify the quotient
88
+ # of successive weights to obtain a recurrence relation
89
+ c = (-1)**N * N**N / ctx.mpf(ctx._ifac(N))
90
+ maxc = 1
91
+ for k in xrange(N+1):
92
+ s += c * seq[N+k]
93
+ maxc = max(abs(c), maxc)
94
+ c *= (k-N)*ctx.mpf(k+N+1)**N
95
+ c /= ((1+k)*ctx.mpf(k+N)**N)
96
+ return s, maxc
97
+
98
+ @defun
99
+ def shanks(ctx, seq, table=None, randomized=False):
100
+ r"""
101
+ Given a list ``seq`` of the first `N` elements of a slowly
102
+ convergent infinite sequence `(A_k)`, :func:`~mpmath.shanks` computes the iterated
103
+ Shanks transformation `S(A), S(S(A)), \ldots, S^{N/2}(A)`. The Shanks
104
+ transformation often provides strong convergence acceleration,
105
+ especially if the sequence is oscillating.
106
+
107
+ The iterated Shanks transformation is computed using the Wynn
108
+ epsilon algorithm (see [1]). :func:`~mpmath.shanks` returns the full
109
+ epsilon table generated by Wynn's algorithm, which can be read
110
+ off as follows:
111
+
112
+ * The table is a list of lists forming a lower triangular matrix,
113
+ where higher row and column indices correspond to more accurate
114
+ values.
115
+ * The columns with even index hold dummy entries (required for the
116
+ computation) and the columns with odd index hold the actual
117
+ extrapolates.
118
+ * The last element in the last row is typically the most
119
+ accurate estimate of the limit.
120
+ * The difference to the third last element in the last row
121
+ provides an estimate of the approximation error.
122
+ * The magnitude of the second last element provides an estimate
123
+ of the numerical accuracy lost to cancellation.
124
+
125
+ For convenience, so the extrapolation is stopped at an odd index
126
+ so that ``shanks(seq)[-1][-1]`` always gives an estimate of the
127
+ limit.
128
+
129
+ Optionally, an existing table can be passed to :func:`~mpmath.shanks`.
130
+ This can be used to efficiently extend a previous computation after
131
+ new elements have been appended to the sequence. The table will
132
+ then be updated in-place.
133
+
134
+ **The Shanks transformation**
135
+
136
+ The Shanks transformation is defined as follows (see [2]): given
137
+ the input sequence `(A_0, A_1, \ldots)`, the transformed sequence is
138
+ given by
139
+
140
+ .. math ::
141
+
142
+ S(A_k) = \frac{A_{k+1}A_{k-1}-A_k^2}{A_{k+1}+A_{k-1}-2 A_k}
143
+
144
+ The Shanks transformation gives the exact limit `A_{\infty}` in a
145
+ single step if `A_k = A + a q^k`. Note in particular that it
146
+ extrapolates the exact sum of a geometric series in a single step.
147
+
148
+ Applying the Shanks transformation once often improves convergence
149
+ substantially for an arbitrary sequence, but the optimal effect is
150
+ obtained by applying it iteratively:
151
+ `S(S(A_k)), S(S(S(A_k))), \ldots`.
152
+
153
+ Wynn's epsilon algorithm provides an efficient way to generate
154
+ the table of iterated Shanks transformations. It reduces the
155
+ computation of each element to essentially a single division, at
156
+ the cost of requiring dummy elements in the table. See [1] for
157
+ details.
158
+
159
+ **Precision issues**
160
+
161
+ Due to cancellation effects, the sequence must be typically be
162
+ computed at a much higher precision than the target accuracy
163
+ of the extrapolation.
164
+
165
+ If the Shanks transformation converges to the exact limit (such
166
+ as if the sequence is a geometric series), then a division by
167
+ zero occurs. By default, :func:`~mpmath.shanks` handles this case by
168
+ terminating the iteration and returning the table it has
169
+ generated so far. With *randomized=True*, it will instead
170
+ replace the zero by a pseudorandom number close to zero.
171
+ (TODO: find a better solution to this problem.)
172
+
173
+ **Examples**
174
+
175
+ We illustrate by applying Shanks transformation to the Leibniz
176
+ series for `\pi`::
177
+
178
+ >>> from mpmath import *
179
+ >>> mp.dps = 50
180
+ >>> S = [4*sum(mpf(-1)**n/(2*n+1) for n in range(m))
181
+ ... for m in range(1,30)]
182
+ >>>
183
+ >>> T = shanks(S[:7])
184
+ >>> for row in T:
185
+ ... nprint(row)
186
+ ...
187
+ [-0.75]
188
+ [1.25, 3.16667]
189
+ [-1.75, 3.13333, -28.75]
190
+ [2.25, 3.14524, 82.25, 3.14234]
191
+ [-2.75, 3.13968, -177.75, 3.14139, -969.937]
192
+ [3.25, 3.14271, 327.25, 3.14166, 3515.06, 3.14161]
193
+
194
+ The extrapolated accuracy is about 4 digits, and about 4 digits
195
+ may have been lost due to cancellation::
196
+
197
+ >>> L = T[-1]
198
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
199
+ [2.22532e-5, 4.78309e-5, 3515.06]
200
+
201
+ Now we extend the computation::
202
+
203
+ >>> T = shanks(S[:25], T)
204
+ >>> L = T[-1]
205
+ >>> nprint([abs(L[-1] - pi), abs(L[-1] - L[-3]), abs(L[-2])])
206
+ [3.75527e-19, 1.48478e-19, 2.96014e+17]
207
+
208
+ The value for pi is now accurate to 18 digits. About 18 digits may
209
+ also have been lost to cancellation.
210
+
211
+ Here is an example with a geometric series, where the convergence
212
+ is immediate (the sum is exactly 1)::
213
+
214
+ >>> mp.dps = 15
215
+ >>> for row in shanks([0.5, 0.75, 0.875, 0.9375, 0.96875]):
216
+ ... nprint(row)
217
+ [4.0]
218
+ [8.0, 1.0]
219
+
220
+ **References**
221
+
222
+ 1. [GravesMorris]_
223
+
224
+ 2. [BenderOrszag]_ pp. 368-375
225
+
226
+ """
227
+ if len(seq) < 2:
228
+ raise ValueError("seq should be of minimum length 2")
229
+ if table:
230
+ START = len(table)
231
+ else:
232
+ START = 0
233
+ table = []
234
+ STOP = len(seq) - 1
235
+ if STOP & 1:
236
+ STOP -= 1
237
+ one = ctx.one
238
+ eps = +ctx.eps
239
+ if randomized:
240
+ from random import Random
241
+ rnd = Random()
242
+ rnd.seed(START)
243
+ for i in xrange(START, STOP):
244
+ row = []
245
+ for j in xrange(i+1):
246
+ if j == 0:
247
+ a, b = 0, seq[i+1]-seq[i]
248
+ else:
249
+ if j == 1:
250
+ a = seq[i]
251
+ else:
252
+ a = table[i-1][j-2]
253
+ b = row[j-1] - table[i-1][j-1]
254
+ if not b:
255
+ if randomized:
256
+ b = (1 + rnd.getrandbits(10))*eps
257
+ elif i & 1:
258
+ return table[:-1]
259
+ else:
260
+ return table
261
+ row.append(a + one/b)
262
+ table.append(row)
263
+ return table
264
+
265
+
266
+ class levin_class:
267
+ # levin: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
268
+ r"""
269
+ This interface implements Levin's (nonlinear) sequence transformation for
270
+ convergence acceleration and summation of divergent series. It performs
271
+ better than the Shanks/Wynn-epsilon algorithm for logarithmic convergent
272
+ or alternating divergent series.
273
+
274
+ Let *A* be the series we want to sum:
275
+
276
+ .. math ::
277
+
278
+ A = \sum_{k=0}^{\infty} a_k
279
+
280
+ Attention: all `a_k` must be non-zero!
281
+
282
+ Let `s_n` be the partial sums of this series:
283
+
284
+ .. math ::
285
+
286
+ s_n = \sum_{k=0}^n a_k.
287
+
288
+ **Methods**
289
+
290
+ Calling ``levin`` returns an object with the following methods.
291
+
292
+ ``update(...)`` works with the list of individual terms `a_k` of *A*, and
293
+ ``update_step(...)`` works with the list of partial sums `s_k` of *A*:
294
+
295
+ .. code ::
296
+
297
+ v, e = ...update([a_0, a_1,..., a_k])
298
+ v, e = ...update_psum([s_0, s_1,..., s_k])
299
+
300
+ ``step(...)`` works with the individual terms `a_k` and ``step_psum(...)``
301
+ works with the partial sums `s_k`:
302
+
303
+ .. code ::
304
+
305
+ v, e = ...step(a_k)
306
+ v, e = ...step_psum(s_k)
307
+
308
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
309
+ simply the difference between the current estimate and the last estimate.
310
+ One should not mix ``update``, ``update_psum``, ``step`` and ``step_psum``.
311
+
312
+ **A word of caution**
313
+
314
+ One can only hope for good results (i.e. convergence acceleration or
315
+ resummation) if the `s_n` have some well defind asymptotic behavior for
316
+ large `n` and are not erratic or random. Furthermore one usually needs very
317
+ high working precision because of the numerical cancellation. If the working
318
+ precision is insufficient, levin may produce silently numerical garbage.
319
+ Furthermore even if the Levin-transformation converges, in the general case
320
+ there is no proof that the result is mathematically sound. Only for very
321
+ special classes of problems one can prove that the Levin-transformation
322
+ converges to the expected result (for example Stieltjes-type integrals).
323
+ Furthermore the Levin-transform is quite expensive (i.e. slow) in comparison
324
+ to Shanks/Wynn-epsilon, Richardson & co.
325
+ In summary one can say that the Levin-transformation is powerful but
326
+ unreliable and that it may need a copious amount of working precision.
327
+
328
+ The Levin transform has several variants differing in the choice of weights.
329
+ Some variants are better suited for the possible flavours of convergence
330
+ behaviour of *A* than other variants:
331
+
332
+ .. code ::
333
+
334
+ convergence behaviour levin-u levin-t levin-v shanks/wynn-epsilon
335
+
336
+ logarithmic + - + -
337
+ linear + + + +
338
+ alternating divergent + + + +
339
+
340
+ "+" means the variant is suitable,"-" means the variant is not suitable;
341
+ for comparison the Shanks/Wynn-epsilon transform is listed, too.
342
+
343
+ The variant is controlled though the variant keyword (i.e. ``variant="u"``,
344
+ ``variant="t"`` or ``variant="v"``). Overall "u" is probably the best choice.
345
+
346
+ Finally it is possible to use the Sidi-S transform instead of the Levin transform
347
+ by using the keyword ``method='sidi'``. The Sidi-S transform works better than the
348
+ Levin transformation for some divergent series (see the examples).
349
+
350
+ Parameters:
351
+
352
+ .. code ::
353
+
354
+ method "levin" or "sidi" chooses either the Levin or the Sidi-S transformation
355
+ variant "u","t" or "v" chooses the weight variant.
356
+
357
+ The Levin transform is also accessible through the nsum interface.
358
+ ``method="l"`` or ``method="levin"`` select the normal Levin transform while
359
+ ``method="sidi"``
360
+ selects the Sidi-S transform. The variant is in both cases selected through the
361
+ levin_variant keyword. The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise
362
+ it will miss the point where the Levin transform converges resulting in numerical
363
+ overflow/garbage. For highly divergent series a copious amount of working precision
364
+ must be chosen.
365
+
366
+ **Examples**
367
+
368
+ First we sum the zeta function::
369
+
370
+ >>> from mpmath import mp
371
+ >>> mp.prec = 53
372
+ >>> eps = mp.mpf(mp.eps)
373
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
374
+ ... L = mp.levin(method = "levin", variant = "u")
375
+ ... S, s, n = [], 0, 1
376
+ ... while 1:
377
+ ... s += mp.one / (n * n)
378
+ ... n += 1
379
+ ... S.append(s)
380
+ ... v, e = L.update_psum(S)
381
+ ... if e < eps:
382
+ ... break
383
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
384
+ >>> print(mp.chop(v - mp.pi ** 2 / 6))
385
+ 0.0
386
+ >>> w = mp.nsum(lambda n: 1 / (n*n), [1, mp.inf], method = "levin", levin_variant = "u")
387
+ >>> print(mp.chop(v - w))
388
+ 0.0
389
+
390
+ Now we sum the zeta function outside its range of convergence
391
+ (attention: This does not work at the negative integers!)::
392
+
393
+ >>> eps = mp.mpf(mp.eps)
394
+ >>> with mp.extraprec(2 * mp.prec): # levin needs a high working precision
395
+ ... L = mp.levin(method = "levin", variant = "v")
396
+ ... A, n = [], 1
397
+ ... while 1:
398
+ ... s = mp.mpf(n) ** (2 + 3j)
399
+ ... n += 1
400
+ ... A.append(s)
401
+ ... v, e = L.update(A)
402
+ ... if e < eps:
403
+ ... break
404
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
405
+ >>> print(mp.chop(v - mp.zeta(-2-3j)))
406
+ 0.0
407
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
408
+ >>> print(mp.chop(v - w))
409
+ 0.0
410
+
411
+ Now we sum the divergent asymptotic expansion of an integral related to the
412
+ exponential integral (see also [2] p.373). The Sidi-S transform works best here::
413
+
414
+ >>> z = mp.mpf(10)
415
+ >>> exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
416
+ >>> # exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
417
+ >>> eps = mp.mpf(mp.eps)
418
+ >>> with mp.extraprec(2 * mp.prec): # high working precisions are mandatory for divergent resummation
419
+ ... L = mp.levin(method = "sidi", variant = "t")
420
+ ... n = 0
421
+ ... while 1:
422
+ ... s = (-1)**n * mp.fac(n) * z ** (-n)
423
+ ... v, e = L.step(s)
424
+ ... n += 1
425
+ ... if e < eps:
426
+ ... break
427
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
428
+ >>> print(mp.chop(v - exact))
429
+ 0.0
430
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
431
+ >>> print(mp.chop(v - w))
432
+ 0.0
433
+
434
+ Another highly divergent integral is also summable::
435
+
436
+ >>> z = mp.mpf(2)
437
+ >>> eps = mp.mpf(mp.eps)
438
+ >>> exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
439
+ >>> # exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
440
+ >>> with mp.extraprec(7 * mp.prec): # we need copious amount of precision to sum this highly divergent series
441
+ ... L = mp.levin(method = "levin", variant = "t")
442
+ ... n, s = 0, 0
443
+ ... while 1:
444
+ ... s += (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n))
445
+ ... n += 1
446
+ ... v, e = L.step_psum(s)
447
+ ... if e < eps:
448
+ ... break
449
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
450
+ >>> print(mp.chop(v - exact))
451
+ 0.0
452
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
453
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
454
+ >>> print(mp.chop(v - w))
455
+ 0.0
456
+
457
+ These examples run with 15-20 decimal digits precision. For higher precision the
458
+ working precision must be raised.
459
+
460
+ **Examples for nsum**
461
+
462
+ Here we calculate Euler's constant as the constant term in the Laurent
463
+ expansion of `\zeta(s)` at `s=1`. This sum converges extremly slowly because of
464
+ the logarithmic convergence behaviour of the Dirichlet series for zeta::
465
+
466
+ >>> mp.dps = 30
467
+ >>> z = mp.mpf(10) ** (-10)
468
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "l") - 1 / z
469
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
470
+ 0.0
471
+
472
+ The Sidi-S transform performs excellently for the alternating series of `\log(2)`::
473
+
474
+ >>> a = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "sidi")
475
+ >>> print(mp.chop(a - mp.log(2)))
476
+ 0.0
477
+
478
+ Hypergeometric series can also be summed outside their range of convergence.
479
+ The stepsize in :func:`~mpmath.nsum` must not be chosen too large, otherwise it will miss the
480
+ point where the Levin transform converges resulting in numerical overflow/garbage::
481
+
482
+ >>> z = 2 + 1j
483
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
484
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
485
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
486
+ >>> print(mp.chop(exact-v))
487
+ 0.0
488
+
489
+ References:
490
+
491
+ [1] E.J. Weniger - "Nonlinear Sequence Transformations for the Acceleration of
492
+ Convergence and the Summation of Divergent Series" arXiv:math/0306302
493
+
494
+ [2] A. Sidi - "Pratical Extrapolation Methods"
495
+
496
+ [3] H.H.H. Homeier - "Scalar Levin-Type Sequence Transformations" arXiv:math/0005209
497
+
498
+ """
499
+
500
+ def __init__(self, method = "levin", variant = "u"):
501
+ self.variant = variant
502
+ self.n = 0
503
+ self.a0 = 0
504
+ self.theta = 1
505
+ self.A = []
506
+ self.B = []
507
+ self.last = 0
508
+ self.last_s = False
509
+
510
+ if method == "levin":
511
+ self.factor = self.factor_levin
512
+ elif method == "sidi":
513
+ self.factor = self.factor_sidi
514
+ else:
515
+ raise ValueError("levin: unknown method \"%s\"" % method)
516
+
517
+ def factor_levin(self, i):
518
+ # original levin
519
+ # [1] p.50,e.7.5-7 (with n-j replaced by i)
520
+ return (self.theta + i) * (self.theta + self.n - 1) ** (self.n - i - 2) / self.ctx.mpf(self.theta + self.n) ** (self.n - i - 1)
521
+
522
+ def factor_sidi(self, i):
523
+ # sidi analogon to levin (factorial series)
524
+ # [1] p.59,e.8.3-16 (with n-j replaced by i)
525
+ return (self.theta + self.n - 1) * (self.theta + self.n - 2) / self.ctx.mpf((self.theta + 2 * self.n - i - 2) * (self.theta + 2 * self.n - i - 3))
526
+
527
+ def run(self, s, a0, a1 = 0):
528
+ if self.variant=="t":
529
+ # levin t
530
+ w=a0
531
+ elif self.variant=="u":
532
+ # levin u
533
+ w=a0*(self.theta+self.n)
534
+ elif self.variant=="v":
535
+ # levin v
536
+ w=a0*a1/(a0-a1)
537
+ else:
538
+ assert False, "unknown variant"
539
+
540
+ if w==0:
541
+ raise ValueError("levin: zero weight")
542
+
543
+ self.A.append(s/w)
544
+ self.B.append(1/w)
545
+
546
+ for i in range(self.n-1,-1,-1):
547
+ if i==self.n-1:
548
+ f=1
549
+ else:
550
+ f=self.factor(i)
551
+
552
+ self.A[i]=self.A[i+1]-f*self.A[i]
553
+ self.B[i]=self.B[i+1]-f*self.B[i]
554
+
555
+ self.n+=1
556
+
557
+ ###########################################################################
558
+
559
+ def update_psum(self,S):
560
+ """
561
+ This routine applies the convergence acceleration to the list of partial sums.
562
+
563
+ A = sum(a_k, k = 0..infinity)
564
+ s_n = sum(a_k, k = 0..n)
565
+
566
+ v, e = ...update_psum([s_0, s_1,..., s_k])
567
+
568
+ output:
569
+ v current estimate of the series A
570
+ e an error estimate which is simply the difference between the current
571
+ estimate and the last estimate.
572
+ """
573
+
574
+ if self.variant!="v":
575
+ if self.n==0:
576
+ self.run(S[0],S[0])
577
+ while self.n<len(S):
578
+ self.run(S[self.n],S[self.n]-S[self.n-1])
579
+ else:
580
+ if len(S)==1:
581
+ self.last=0
582
+ return S[0],abs(S[0])
583
+
584
+ if self.n==0:
585
+ self.a1=S[1]-S[0]
586
+ self.run(S[0],S[0],self.a1)
587
+
588
+ while self.n<len(S)-1:
589
+ na1=S[self.n+1]-S[self.n]
590
+ self.run(S[self.n],self.a1,na1)
591
+ self.a1=na1
592
+
593
+ value=self.A[0]/self.B[0]
594
+ err=abs(value-self.last)
595
+ self.last=value
596
+
597
+ return value,err
598
+
599
+ def update(self,X):
600
+ """
601
+ This routine applies the convergence acceleration to the list of individual terms.
602
+
603
+ A = sum(a_k, k = 0..infinity)
604
+
605
+ v, e = ...update([a_0, a_1,..., a_k])
606
+
607
+ output:
608
+ v current estimate of the series A
609
+ e an error estimate which is simply the difference between the current
610
+ estimate and the last estimate.
611
+ """
612
+
613
+ if self.variant!="v":
614
+ if self.n==0:
615
+ self.s=X[0]
616
+ self.run(self.s,X[0])
617
+ while self.n<len(X):
618
+ self.s+=X[self.n]
619
+ self.run(self.s,X[self.n])
620
+ else:
621
+ if len(X)==1:
622
+ self.last=0
623
+ return X[0],abs(X[0])
624
+
625
+ if self.n==0:
626
+ self.s=X[0]
627
+ self.run(self.s,X[0],X[1])
628
+
629
+ while self.n<len(X)-1:
630
+ self.s+=X[self.n]
631
+ self.run(self.s,X[self.n],X[self.n+1])
632
+
633
+ value=self.A[0]/self.B[0]
634
+ err=abs(value-self.last)
635
+ self.last=value
636
+
637
+ return value,err
638
+
639
+ ###########################################################################
640
+
641
+ def step_psum(self,s):
642
+ """
643
+ This routine applies the convergence acceleration to the partial sums.
644
+
645
+ A = sum(a_k, k = 0..infinity)
646
+ s_n = sum(a_k, k = 0..n)
647
+
648
+ v, e = ...step_psum(s_k)
649
+
650
+ output:
651
+ v current estimate of the series A
652
+ e an error estimate which is simply the difference between the current
653
+ estimate and the last estimate.
654
+ """
655
+
656
+ if self.variant!="v":
657
+ if self.n==0:
658
+ self.last_s=s
659
+ self.run(s,s)
660
+ else:
661
+ self.run(s,s-self.last_s)
662
+ self.last_s=s
663
+ else:
664
+ if isinstance(self.last_s,bool):
665
+ self.last_s=s
666
+ self.last_w=s
667
+ self.last=0
668
+ return s,abs(s)
669
+
670
+ na1=s-self.last_s
671
+ self.run(self.last_s,self.last_w,na1)
672
+ self.last_w=na1
673
+ self.last_s=s
674
+
675
+ value=self.A[0]/self.B[0]
676
+ err=abs(value-self.last)
677
+ self.last=value
678
+
679
+ return value,err
680
+
681
+ def step(self,x):
682
+ """
683
+ This routine applies the convergence acceleration to the individual terms.
684
+
685
+ A = sum(a_k, k = 0..infinity)
686
+
687
+ v, e = ...step(a_k)
688
+
689
+ output:
690
+ v current estimate of the series A
691
+ e an error estimate which is simply the difference between the current
692
+ estimate and the last estimate.
693
+ """
694
+
695
+ if self.variant!="v":
696
+ if self.n==0:
697
+ self.s=x
698
+ self.run(self.s,x)
699
+ else:
700
+ self.s+=x
701
+ self.run(self.s,x)
702
+ else:
703
+ if isinstance(self.last_s,bool):
704
+ self.last_s=x
705
+ self.s=0
706
+ self.last=0
707
+ return x,abs(x)
708
+
709
+ self.s+=self.last_s
710
+ self.run(self.s,self.last_s,x)
711
+ self.last_s=x
712
+
713
+ value=self.A[0]/self.B[0]
714
+ err=abs(value-self.last)
715
+ self.last=value
716
+
717
+ return value,err
718
+
719
+ def levin(ctx, method = "levin", variant = "u"):
720
+ L = levin_class(method = method, variant = variant)
721
+ L.ctx = ctx
722
+ return L
723
+
724
+ levin.__doc__ = levin_class.__doc__
725
+ defun(levin)
726
+
727
+
728
+ class cohen_alt_class:
729
+ # cohen_alt: Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
730
+ r"""
731
+ This interface implements the convergence acceleration of alternating series
732
+ as described in H. Cohen, F.R. Villegas, D. Zagier - "Convergence Acceleration
733
+ of Alternating Series". This series transformation works only well if the
734
+ individual terms of the series have an alternating sign. It belongs to the
735
+ class of linear series transformations (in contrast to the Shanks/Wynn-epsilon
736
+ or Levin transform). This series transformation is also able to sum some types
737
+ of divergent series. See the paper under which conditions this resummation is
738
+ mathematical sound.
739
+
740
+ Let *A* be the series we want to sum:
741
+
742
+ .. math ::
743
+
744
+ A = \sum_{k=0}^{\infty} a_k
745
+
746
+ Let `s_n` be the partial sums of this series:
747
+
748
+ .. math ::
749
+
750
+ s_n = \sum_{k=0}^n a_k.
751
+
752
+
753
+ **Interface**
754
+
755
+ Calling ``cohen_alt`` returns an object with the following methods.
756
+
757
+ Then ``update(...)`` works with the list of individual terms `a_k` and
758
+ ``update_psum(...)`` works with the list of partial sums `s_k`:
759
+
760
+ .. code ::
761
+
762
+ v, e = ...update([a_0, a_1,..., a_k])
763
+ v, e = ...update_psum([s_0, s_1,..., s_k])
764
+
765
+ *v* is the current estimate for *A*, and *e* is an error estimate which is
766
+ simply the difference between the current estimate and the last estimate.
767
+
768
+ **Examples**
769
+
770
+ Here we compute the alternating zeta function using ``update_psum``::
771
+
772
+ >>> from mpmath import mp
773
+ >>> AC = mp.cohen_alt()
774
+ >>> S, s, n = [], 0, 1
775
+ >>> while 1:
776
+ ... s += -((-1) ** n) * mp.one / (n * n)
777
+ ... n += 1
778
+ ... S.append(s)
779
+ ... v, e = AC.update_psum(S)
780
+ ... if e < mp.eps:
781
+ ... break
782
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
783
+ >>> print(mp.chop(v - mp.pi ** 2 / 12))
784
+ 0.0
785
+
786
+ Here we compute the product `\prod_{n=1}^{\infty} \Gamma(1+1/(2n-1)) / \Gamma(1+1/(2n))`::
787
+
788
+ >>> A = []
789
+ >>> AC = mp.cohen_alt()
790
+ >>> n = 1
791
+ >>> while 1:
792
+ ... A.append( mp.loggamma(1 + mp.one / (2 * n - 1)))
793
+ ... A.append(-mp.loggamma(1 + mp.one / (2 * n)))
794
+ ... n += 1
795
+ ... v, e = AC.update(A)
796
+ ... if e < mp.eps:
797
+ ... break
798
+ ... if n > 1000: raise RuntimeError("iteration limit exceeded")
799
+ >>> v = mp.exp(v)
800
+ >>> print(mp.chop(v - 1.06215090557106, tol = 1e-12))
801
+ 0.0
802
+
803
+ ``cohen_alt`` is also accessible through the :func:`~mpmath.nsum` interface::
804
+
805
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
806
+ >>> print(mp.chop(v - mp.log(2)))
807
+ 0.0
808
+ >>> v = mp.nsum(lambda n: (-1)**n / (2 * n + 1), [0, mp.inf], method = "a")
809
+ >>> print(mp.chop(v - mp.pi / 4))
810
+ 0.0
811
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
812
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
813
+ 0.0
814
+
815
+ """
816
+
817
+ def __init__(self):
818
+ self.last=0
819
+
820
+ def update(self, A):
821
+ """
822
+ This routine applies the convergence acceleration to the list of individual terms.
823
+
824
+ A = sum(a_k, k = 0..infinity)
825
+
826
+ v, e = ...update([a_0, a_1,..., a_k])
827
+
828
+ output:
829
+ v current estimate of the series A
830
+ e an error estimate which is simply the difference between the current
831
+ estimate and the last estimate.
832
+ """
833
+
834
+ n = len(A)
835
+ d = (3 + self.ctx.sqrt(8)) ** n
836
+ d = (d + 1 / d) / 2
837
+ b = -self.ctx.one
838
+ c = -d
839
+ s = 0
840
+
841
+ for k in xrange(n):
842
+ c = b - c
843
+ if k % 2 == 0:
844
+ s = s + c * A[k]
845
+ else:
846
+ s = s - c * A[k]
847
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
848
+
849
+ value = s / d
850
+
851
+ err = abs(value - self.last)
852
+ self.last = value
853
+
854
+ return value, err
855
+
856
+ def update_psum(self, S):
857
+ """
858
+ This routine applies the convergence acceleration to the list of partial sums.
859
+
860
+ A = sum(a_k, k = 0..infinity)
861
+ s_n = sum(a_k ,k = 0..n)
862
+
863
+ v, e = ...update_psum([s_0, s_1,..., s_k])
864
+
865
+ output:
866
+ v current estimate of the series A
867
+ e an error estimate which is simply the difference between the current
868
+ estimate and the last estimate.
869
+ """
870
+
871
+ n = len(S)
872
+ d = (3 + self.ctx.sqrt(8)) ** n
873
+ d = (d + 1 / d) / 2
874
+ b = self.ctx.one
875
+ s = 0
876
+
877
+ for k in xrange(n):
878
+ b = 2 * (n + k) * (n - k) * b / ((2 * k + 1) * (k + self.ctx.one))
879
+ s += b * S[k]
880
+
881
+ value = s / d
882
+
883
+ err = abs(value - self.last)
884
+ self.last = value
885
+
886
+ return value, err
887
+
888
+ def cohen_alt(ctx):
889
+ L = cohen_alt_class()
890
+ L.ctx = ctx
891
+ return L
892
+
893
+ cohen_alt.__doc__ = cohen_alt_class.__doc__
894
+ defun(cohen_alt)
895
+
896
+
897
+ @defun
898
+ def sumap(ctx, f, interval, integral=None, error=False):
899
+ r"""
900
+ Evaluates an infinite series of an analytic summand *f* using the
901
+ Abel-Plana formula
902
+
903
+ .. math ::
904
+
905
+ \sum_{k=0}^{\infty} f(k) = \int_0^{\infty} f(t) dt + \frac{1}{2} f(0) +
906
+ i \int_0^{\infty} \frac{f(it)-f(-it)}{e^{2\pi t}-1} dt.
907
+
908
+ Unlike the Euler-Maclaurin formula (see :func:`~mpmath.sumem`),
909
+ the Abel-Plana formula does not require derivatives. However,
910
+ it only works when `|f(it)-f(-it)|` does not
911
+ increase too rapidly with `t`.
912
+
913
+ **Examples**
914
+
915
+ The Abel-Plana formula is particularly useful when the summand
916
+ decreases like a power of `k`; for example when the sum is a pure
917
+ zeta function::
918
+
919
+ >>> from mpmath import *
920
+ >>> mp.dps = 25; mp.pretty = True
921
+ >>> sumap(lambda k: 1/k**2.5, [1,inf])
922
+ 1.34148725725091717975677
923
+ >>> zeta(2.5)
924
+ 1.34148725725091717975677
925
+ >>> sumap(lambda k: 1/(k+1j)**(2.5+2.5j), [1,inf])
926
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
927
+ >>> zeta(2.5+2.5j, 1+1j)
928
+ (-3.385361068546473342286084 - 0.7432082105196321803869551j)
929
+
930
+ If the series is alternating, numerical quadrature along the real
931
+ line is likely to give poor results, so it is better to evaluate
932
+ the first term symbolically whenever possible:
933
+
934
+ >>> n=3; z=-0.75
935
+ >>> I = expint(n,-log(z))
936
+ >>> chop(sumap(lambda k: z**k / k**n, [1,inf], integral=I))
937
+ -0.6917036036904594510141448
938
+ >>> polylog(n,z)
939
+ -0.6917036036904594510141448
940
+
941
+ """
942
+ prec = ctx.prec
943
+ try:
944
+ ctx.prec += 10
945
+ a, b = interval
946
+ if b != ctx.inf:
947
+ raise ValueError("b should be equal to ctx.inf")
948
+ g = lambda x: f(x+a)
949
+ if integral is None:
950
+ i1, err1 = ctx.quad(g, [0,ctx.inf], error=True)
951
+ else:
952
+ i1, err1 = integral, 0
953
+ j = ctx.j
954
+ p = ctx.pi * 2
955
+ if ctx._is_real_type(i1):
956
+ h = lambda t: -2 * ctx.im(g(j*t)) / ctx.expm1(p*t)
957
+ else:
958
+ h = lambda t: j*(g(j*t)-g(-j*t)) / ctx.expm1(p*t)
959
+ i2, err2 = ctx.quad(h, [0,ctx.inf], error=True)
960
+ err = err1+err2
961
+ v = i1+i2+0.5*g(ctx.mpf(0))
962
+ finally:
963
+ ctx.prec = prec
964
+ if error:
965
+ return +v, err
966
+ return +v
967
+
968
+
969
+ @defun
970
+ def sumem(ctx, f, interval, tol=None, reject=10, integral=None,
971
+ adiffs=None, bdiffs=None, verbose=False, error=False,
972
+ _fast_abort=False):
973
+ r"""
974
+ Uses the Euler-Maclaurin formula to compute an approximation accurate
975
+ to within ``tol`` (which defaults to the present epsilon) of the sum
976
+
977
+ .. math ::
978
+
979
+ S = \sum_{k=a}^b f(k)
980
+
981
+ where `(a,b)` are given by ``interval`` and `a` or `b` may be
982
+ infinite. The approximation is
983
+
984
+ .. math ::
985
+
986
+ S \sim \int_a^b f(x) \,dx + \frac{f(a)+f(b)}{2} +
987
+ \sum_{k=1}^{\infty} \frac{B_{2k}}{(2k)!}
988
+ \left(f^{(2k-1)}(b)-f^{(2k-1)}(a)\right).
989
+
990
+ The last sum in the Euler-Maclaurin formula is not generally
991
+ convergent (a notable exception is if `f` is a polynomial, in
992
+ which case Euler-Maclaurin actually gives an exact result).
993
+
994
+ The summation is stopped as soon as the quotient between two
995
+ consecutive terms falls below *reject*. That is, by default
996
+ (*reject* = 10), the summation is continued as long as each
997
+ term adds at least one decimal.
998
+
999
+ Although not convergent, convergence to a given tolerance can
1000
+ often be "forced" if `b = \infty` by summing up to `a+N` and then
1001
+ applying the Euler-Maclaurin formula to the sum over the range
1002
+ `(a+N+1, \ldots, \infty)`. This procedure is implemented by
1003
+ :func:`~mpmath.nsum`.
1004
+
1005
+ By default numerical quadrature and differentiation is used.
1006
+ If the symbolic values of the integral and endpoint derivatives
1007
+ are known, it is more efficient to pass the value of the
1008
+ integral explicitly as ``integral`` and the derivatives
1009
+ explicitly as ``adiffs`` and ``bdiffs``. The derivatives
1010
+ should be given as iterables that yield
1011
+ `f(a), f'(a), f''(a), \ldots` (and the equivalent for `b`).
1012
+
1013
+ **Examples**
1014
+
1015
+ Summation of an infinite series, with automatic and symbolic
1016
+ integral and derivative values (the second should be much faster)::
1017
+
1018
+ >>> from mpmath import *
1019
+ >>> mp.dps = 50; mp.pretty = True
1020
+ >>> sumem(lambda n: 1/n**2, [32, inf])
1021
+ 0.03174336652030209012658168043874142714132886413417
1022
+ >>> I = mpf(1)/32
1023
+ >>> D = adiffs=((-1)**n*fac(n+1)*32**(-2-n) for n in range(999))
1024
+ >>> sumem(lambda n: 1/n**2, [32, inf], integral=I, adiffs=D)
1025
+ 0.03174336652030209012658168043874142714132886413417
1026
+
1027
+ An exact evaluation of a finite polynomial sum::
1028
+
1029
+ >>> sumem(lambda n: n**5-12*n**2+3*n, [-100000, 200000])
1030
+ 10500155000624963999742499550000.0
1031
+ >>> print(sum(n**5-12*n**2+3*n for n in range(-100000, 200001)))
1032
+ 10500155000624963999742499550000
1033
+
1034
+ """
1035
+ tol = tol or +ctx.eps
1036
+ interval = ctx._as_points(interval)
1037
+ a = ctx.convert(interval[0])
1038
+ b = ctx.convert(interval[-1])
1039
+ err = ctx.zero
1040
+ prev = 0
1041
+ M = 10000
1042
+ if a == ctx.ninf: adiffs = (0 for n in xrange(M))
1043
+ else: adiffs = adiffs or ctx.diffs(f, a)
1044
+ if b == ctx.inf: bdiffs = (0 for n in xrange(M))
1045
+ else: bdiffs = bdiffs or ctx.diffs(f, b)
1046
+ orig = ctx.prec
1047
+ #verbose = 1
1048
+ try:
1049
+ ctx.prec += 10
1050
+ s = ctx.zero
1051
+ for k, (da, db) in enumerate(izip(adiffs, bdiffs)):
1052
+ if k & 1:
1053
+ term = (db-da) * ctx.bernoulli(k+1) / ctx.factorial(k+1)
1054
+ mag = abs(term)
1055
+ if verbose:
1056
+ print("term", k, "magnitude =", ctx.nstr(mag))
1057
+ if k > 4 and mag < tol:
1058
+ s += term
1059
+ break
1060
+ elif k > 4 and abs(prev) / mag < reject:
1061
+ err += mag
1062
+ if _fast_abort:
1063
+ return [s, (s, err)][error]
1064
+ if verbose:
1065
+ print("Failed to converge")
1066
+ break
1067
+ else:
1068
+ s += term
1069
+ prev = term
1070
+ # Endpoint correction
1071
+ if a != ctx.ninf: s += f(a)/2
1072
+ if b != ctx.inf: s += f(b)/2
1073
+ # Tail integral
1074
+ if verbose:
1075
+ print("Integrating f(x) from x = %s to %s" % (ctx.nstr(a), ctx.nstr(b)))
1076
+ if integral:
1077
+ s += integral
1078
+ else:
1079
+ integral, ierr = ctx.quad(f, interval, error=True)
1080
+ if verbose:
1081
+ print("Integration error:", ierr)
1082
+ s += integral
1083
+ err += ierr
1084
+ finally:
1085
+ ctx.prec = orig
1086
+ if error:
1087
+ return s, err
1088
+ else:
1089
+ return s
1090
+
1091
+ @defun
1092
+ def adaptive_extrapolation(ctx, update, emfun, kwargs):
1093
+ option = kwargs.get
1094
+ if ctx._fixed_precision:
1095
+ tol = option('tol', ctx.eps*2**10)
1096
+ else:
1097
+ tol = option('tol', ctx.eps/2**10)
1098
+ verbose = option('verbose', False)
1099
+ maxterms = option('maxterms', ctx.dps*10)
1100
+ method = set(option('method', 'r+s').split('+'))
1101
+ skip = option('skip', 0)
1102
+ steps = iter(option('steps', xrange(10, 10**9, 10)))
1103
+ strict = option('strict')
1104
+ #steps = (10 for i in xrange(1000))
1105
+ summer=[]
1106
+ if 'd' in method or 'direct' in method:
1107
+ TRY_RICHARDSON = TRY_SHANKS = TRY_EULER_MACLAURIN = False
1108
+ else:
1109
+ TRY_RICHARDSON = ('r' in method) or ('richardson' in method)
1110
+ TRY_SHANKS = ('s' in method) or ('shanks' in method)
1111
+ TRY_EULER_MACLAURIN = ('e' in method) or \
1112
+ ('euler-maclaurin' in method)
1113
+
1114
+ def init_levin(m):
1115
+ variant = kwargs.get("levin_variant", "u")
1116
+ if isinstance(variant, str):
1117
+ if variant == "all":
1118
+ variant = ["u", "v", "t"]
1119
+ else:
1120
+ variant = [variant]
1121
+ for s in variant:
1122
+ L = levin_class(method = m, variant = s)
1123
+ L.ctx = ctx
1124
+ L.name = m + "(" + s + ")"
1125
+ summer.append(L)
1126
+
1127
+ if ('l' in method) or ('levin' in method):
1128
+ init_levin("levin")
1129
+
1130
+ if ('sidi' in method):
1131
+ init_levin("sidi")
1132
+
1133
+ if ('a' in method) or ('alternating' in method):
1134
+ L = cohen_alt_class()
1135
+ L.ctx = ctx
1136
+ L.name = "alternating"
1137
+ summer.append(L)
1138
+
1139
+ last_richardson_value = 0
1140
+ shanks_table = []
1141
+ index = 0
1142
+ step = 10
1143
+ partial = []
1144
+ best = ctx.zero
1145
+ orig = ctx.prec
1146
+ try:
1147
+ if 'workprec' in kwargs:
1148
+ ctx.prec = kwargs['workprec']
1149
+ elif TRY_RICHARDSON or TRY_SHANKS or len(summer)!=0:
1150
+ ctx.prec = (ctx.prec+10) * 4
1151
+ else:
1152
+ ctx.prec += 30
1153
+ while 1:
1154
+ if index >= maxterms:
1155
+ break
1156
+
1157
+ # Get new batch of terms
1158
+ try:
1159
+ step = next(steps)
1160
+ except StopIteration:
1161
+ pass
1162
+ if verbose:
1163
+ print("-"*70)
1164
+ print("Adding terms #%i-#%i" % (index, index+step))
1165
+ update(partial, xrange(index, index+step))
1166
+ index += step
1167
+
1168
+ # Check direct error
1169
+ best = partial[-1]
1170
+ error = abs(best - partial[-2])
1171
+ if verbose:
1172
+ print("Direct error: %s" % ctx.nstr(error))
1173
+ if error <= tol:
1174
+ return best
1175
+
1176
+ # Check each extrapolation method
1177
+ if TRY_RICHARDSON:
1178
+ value, maxc = ctx.richardson(partial)
1179
+ # Convergence
1180
+ richardson_error = abs(value - last_richardson_value)
1181
+ if verbose:
1182
+ print("Richardson error: %s" % ctx.nstr(richardson_error))
1183
+ # Convergence
1184
+ if richardson_error <= tol:
1185
+ return value
1186
+ last_richardson_value = value
1187
+ # Unreliable due to cancellation
1188
+ if ctx.eps*maxc > tol:
1189
+ if verbose:
1190
+ print("Ran out of precision for Richardson")
1191
+ TRY_RICHARDSON = False
1192
+ if richardson_error < error:
1193
+ error = richardson_error
1194
+ best = value
1195
+ if TRY_SHANKS:
1196
+ shanks_table = ctx.shanks(partial, shanks_table, randomized=True)
1197
+ row = shanks_table[-1]
1198
+ if len(row) == 2:
1199
+ est1 = row[-1]
1200
+ shanks_error = 0
1201
+ else:
1202
+ est1, maxc, est2 = row[-1], abs(row[-2]), row[-3]
1203
+ shanks_error = abs(est1-est2)
1204
+ if verbose:
1205
+ print("Shanks error: %s" % ctx.nstr(shanks_error))
1206
+ if shanks_error <= tol:
1207
+ return est1
1208
+ if ctx.eps*maxc > tol:
1209
+ if verbose:
1210
+ print("Ran out of precision for Shanks")
1211
+ TRY_SHANKS = False
1212
+ if shanks_error < error:
1213
+ error = shanks_error
1214
+ best = est1
1215
+ for L in summer:
1216
+ est, lerror = L.update_psum(partial)
1217
+ if verbose:
1218
+ print("%s error: %s" % (L.name, ctx.nstr(lerror)))
1219
+ if lerror <= tol:
1220
+ return est
1221
+ if lerror < error:
1222
+ error = lerror
1223
+ best = est
1224
+ if TRY_EULER_MACLAURIN:
1225
+ if ctx.almosteq(ctx.mpc(ctx.sign(partial[-1]) / ctx.sign(partial[-2])), -1):
1226
+ if verbose:
1227
+ print ("NOT using Euler-Maclaurin: the series appears"
1228
+ " to be alternating, so numerical\n quadrature"
1229
+ " will most likely fail")
1230
+ TRY_EULER_MACLAURIN = False
1231
+ else:
1232
+ value, em_error = emfun(index, tol)
1233
+ value += partial[-1]
1234
+ if verbose:
1235
+ print("Euler-Maclaurin error: %s" % ctx.nstr(em_error))
1236
+ if em_error <= tol:
1237
+ return value
1238
+ if em_error < error:
1239
+ best = value
1240
+ finally:
1241
+ ctx.prec = orig
1242
+ if strict:
1243
+ raise ctx.NoConvergence
1244
+ if verbose:
1245
+ print("Warning: failed to converge to target accuracy")
1246
+ return best
1247
+
1248
+ @defun
1249
+ def nsum(ctx, f, *intervals, **options):
1250
+ r"""
1251
+ Computes the sum
1252
+
1253
+ .. math :: S = \sum_{k=a}^b f(k)
1254
+
1255
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1256
+ `b = \infty` are allowed, or more generally
1257
+
1258
+ .. math :: S = \sum_{k_1=a_1}^{b_1} \cdots
1259
+ \sum_{k_n=a_n}^{b_n} f(k_1,\ldots,k_n)
1260
+
1261
+ if multiple intervals are given.
1262
+
1263
+ Two examples of infinite series that can be summed by :func:`~mpmath.nsum`,
1264
+ where the first converges rapidly and the second converges slowly,
1265
+ are::
1266
+
1267
+ >>> from mpmath import *
1268
+ >>> mp.dps = 15; mp.pretty = True
1269
+ >>> nsum(lambda n: 1/fac(n), [0, inf])
1270
+ 2.71828182845905
1271
+ >>> nsum(lambda n: 1/n**2, [1, inf])
1272
+ 1.64493406684823
1273
+
1274
+ When appropriate, :func:`~mpmath.nsum` applies convergence acceleration to
1275
+ accurately estimate the sums of slowly convergent series. If the series is
1276
+ finite, :func:`~mpmath.nsum` currently does not attempt to perform any
1277
+ extrapolation, and simply calls :func:`~mpmath.fsum`.
1278
+
1279
+ Multidimensional infinite series are reduced to a single-dimensional
1280
+ series over expanding hypercubes; if both infinite and finite dimensions
1281
+ are present, the finite ranges are moved innermost. For more advanced
1282
+ control over the summation order, use nested calls to :func:`~mpmath.nsum`,
1283
+ or manually rewrite the sum as a single-dimensional series.
1284
+
1285
+ **Options**
1286
+
1287
+ *tol*
1288
+ Desired maximum final error. Defaults roughly to the
1289
+ epsilon of the working precision.
1290
+
1291
+ *method*
1292
+ Which summation algorithm to use (described below).
1293
+ Default: ``'richardson+shanks'``.
1294
+
1295
+ *maxterms*
1296
+ Cancel after at most this many terms. Default: 10*dps.
1297
+
1298
+ *steps*
1299
+ An iterable giving the number of terms to add between
1300
+ each extrapolation attempt. The default sequence is
1301
+ [10, 20, 30, 40, ...]. For example, if you know that
1302
+ approximately 100 terms will be required, efficiency might be
1303
+ improved by setting this to [100, 10]. Then the first
1304
+ extrapolation will be performed after 100 terms, the second
1305
+ after 110, etc.
1306
+
1307
+ *verbose*
1308
+ Print details about progress.
1309
+
1310
+ *ignore*
1311
+ If enabled, any term that raises ``ArithmeticError``
1312
+ or ``ValueError`` (e.g. through division by zero) is replaced
1313
+ by a zero. This is convenient for lattice sums with
1314
+ a singular term near the origin.
1315
+
1316
+ **Methods**
1317
+
1318
+ Unfortunately, an algorithm that can efficiently sum any infinite
1319
+ series does not exist. :func:`~mpmath.nsum` implements several different
1320
+ algorithms that each work well in different cases. The *method*
1321
+ keyword argument selects a method.
1322
+
1323
+ The default method is ``'r+s'``, i.e. both Richardson extrapolation
1324
+ and Shanks transformation is attempted. A slower method that
1325
+ handles more cases is ``'r+s+e'``. For very high precision
1326
+ summation, or if the summation needs to be fast (for example if
1327
+ multiple sums need to be evaluated), it is a good idea to
1328
+ investigate which one method works best and only use that.
1329
+
1330
+ ``'richardson'`` / ``'r'``:
1331
+ Uses Richardson extrapolation. Provides useful extrapolation
1332
+ when `f(k) \sim P(k)/Q(k)` or when `f(k) \sim (-1)^k P(k)/Q(k)`
1333
+ for polynomials `P` and `Q`. See :func:`~mpmath.richardson` for
1334
+ additional information.
1335
+
1336
+ ``'shanks'`` / ``'s'``:
1337
+ Uses Shanks transformation. Typically provides useful
1338
+ extrapolation when `f(k) \sim c^k` or when successive terms
1339
+ alternate signs. Is able to sum some divergent series.
1340
+ See :func:`~mpmath.shanks` for additional information.
1341
+
1342
+ ``'levin'`` / ``'l'``:
1343
+ Uses the Levin transformation. It performs better than the Shanks
1344
+ transformation for logarithmic convergent or alternating divergent
1345
+ series. The ``'levin_variant'``-keyword selects the variant. Valid
1346
+ choices are "u", "t", "v" and "all" whereby "all" uses all three
1347
+ u,t and v simultanously (This is good for performance comparison in
1348
+ conjunction with "verbose=True"). Instead of the Levin transform one can
1349
+ also use the Sidi-S transform by selecting the method ``'sidi'``.
1350
+ See :func:`~mpmath.levin` for additional details.
1351
+
1352
+ ``'alternating'`` / ``'a'``:
1353
+ This is the convergence acceleration of alternating series developped
1354
+ by Cohen, Villegras and Zagier.
1355
+ See :func:`~mpmath.cohen_alt` for additional details.
1356
+
1357
+ ``'euler-maclaurin'`` / ``'e'``:
1358
+ Uses the Euler-Maclaurin summation formula to approximate
1359
+ the remainder sum by an integral. This requires high-order
1360
+ numerical derivatives and numerical integration. The advantage
1361
+ of this algorithm is that it works regardless of the
1362
+ decay rate of `f`, as long as `f` is sufficiently smooth.
1363
+ See :func:`~mpmath.sumem` for additional information.
1364
+
1365
+ ``'direct'`` / ``'d'``:
1366
+ Does not perform any extrapolation. This can be used
1367
+ (and should only be used for) rapidly convergent series.
1368
+ The summation automatically stops when the terms
1369
+ decrease below the target tolerance.
1370
+
1371
+ **Basic examples**
1372
+
1373
+ A finite sum::
1374
+
1375
+ >>> nsum(lambda k: 1/k, [1, 6])
1376
+ 2.45
1377
+
1378
+ Summation of a series going to negative infinity and a doubly
1379
+ infinite series::
1380
+
1381
+ >>> nsum(lambda k: 1/k**2, [-inf, -1])
1382
+ 1.64493406684823
1383
+ >>> nsum(lambda k: 1/(1+k**2), [-inf, inf])
1384
+ 3.15334809493716
1385
+
1386
+ :func:`~mpmath.nsum` handles sums of complex numbers::
1387
+
1388
+ >>> nsum(lambda k: (0.5+0.25j)**k, [0, inf])
1389
+ (1.6 + 0.8j)
1390
+
1391
+ The following sum converges very rapidly, so it is most
1392
+ efficient to sum it by disabling convergence acceleration::
1393
+
1394
+ >>> mp.dps = 1000
1395
+ >>> a = nsum(lambda k: -(-1)**k * k**2 / fac(2*k), [1, inf],
1396
+ ... method='direct')
1397
+ >>> b = (cos(1)+sin(1))/4
1398
+ >>> abs(a-b) < mpf('1e-998')
1399
+ True
1400
+
1401
+ **Examples with Richardson extrapolation**
1402
+
1403
+ Richardson extrapolation works well for sums over rational
1404
+ functions, as well as their alternating counterparts::
1405
+
1406
+ >>> mp.dps = 50
1407
+ >>> nsum(lambda k: 1 / k**3, [1, inf],
1408
+ ... method='richardson')
1409
+ 1.2020569031595942853997381615114499907649862923405
1410
+ >>> zeta(3)
1411
+ 1.2020569031595942853997381615114499907649862923405
1412
+
1413
+ >>> nsum(lambda n: (n + 3)/(n**3 + n**2), [1, inf],
1414
+ ... method='richardson')
1415
+ 2.9348022005446793094172454999380755676568497036204
1416
+ >>> pi**2/2-2
1417
+ 2.9348022005446793094172454999380755676568497036204
1418
+
1419
+ >>> nsum(lambda k: (-1)**k / k**3, [1, inf],
1420
+ ... method='richardson')
1421
+ -0.90154267736969571404980362113358749307373971925537
1422
+ >>> -3*zeta(3)/4
1423
+ -0.90154267736969571404980362113358749307373971925538
1424
+
1425
+ **Examples with Shanks transformation**
1426
+
1427
+ The Shanks transformation works well for geometric series
1428
+ and typically provides excellent acceleration for Taylor
1429
+ series near the border of their disk of convergence.
1430
+ Here we apply it to a series for `\log(2)`, which can be
1431
+ seen as the Taylor series for `\log(1+x)` with `x = 1`::
1432
+
1433
+ >>> nsum(lambda k: -(-1)**k/k, [1, inf],
1434
+ ... method='shanks')
1435
+ 0.69314718055994530941723212145817656807550013436025
1436
+ >>> log(2)
1437
+ 0.69314718055994530941723212145817656807550013436025
1438
+
1439
+ Here we apply it to a slowly convergent geometric series::
1440
+
1441
+ >>> nsum(lambda k: mpf('0.995')**k, [0, inf],
1442
+ ... method='shanks')
1443
+ 200.0
1444
+
1445
+ Finally, Shanks' method works very well for alternating series
1446
+ where `f(k) = (-1)^k g(k)`, and often does so regardless of
1447
+ the exact decay rate of `g(k)`::
1448
+
1449
+ >>> mp.dps = 15
1450
+ >>> nsum(lambda k: (-1)**(k+1) / k**1.5, [1, inf],
1451
+ ... method='shanks')
1452
+ 0.765147024625408
1453
+ >>> (2-sqrt(2))*zeta(1.5)/2
1454
+ 0.765147024625408
1455
+
1456
+ The following slowly convergent alternating series has no known
1457
+ closed-form value. Evaluating the sum a second time at higher
1458
+ precision indicates that the value is probably correct::
1459
+
1460
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1461
+ ... method='shanks')
1462
+ 0.924299897222939
1463
+ >>> mp.dps = 30
1464
+ >>> nsum(lambda k: (-1)**k / log(k), [2, inf],
1465
+ ... method='shanks')
1466
+ 0.92429989722293885595957018136
1467
+
1468
+ **Examples with Levin transformation**
1469
+
1470
+ The following example calculates Euler's constant as the constant term in
1471
+ the Laurent expansion of zeta(s) at s=1. This sum converges extremly slow
1472
+ because of the logarithmic convergence behaviour of the Dirichlet series
1473
+ for zeta.
1474
+
1475
+ >>> mp.dps = 30
1476
+ >>> z = mp.mpf(10) ** (-10)
1477
+ >>> a = mp.nsum(lambda n: n**(-(1+z)), [1, mp.inf], method = "levin") - 1 / z
1478
+ >>> print(mp.chop(a - mp.euler, tol = 1e-10))
1479
+ 0.0
1480
+
1481
+ Now we sum the zeta function outside its range of convergence
1482
+ (attention: This does not work at the negative integers!):
1483
+
1484
+ >>> mp.dps = 15
1485
+ >>> w = mp.nsum(lambda n: n ** (2 + 3j), [1, mp.inf], method = "levin", levin_variant = "v")
1486
+ >>> print(mp.chop(w - mp.zeta(-2-3j)))
1487
+ 0.0
1488
+
1489
+ The next example resummates an asymptotic series expansion of an integral
1490
+ related to the exponential integral.
1491
+
1492
+ >>> mp.dps = 15
1493
+ >>> z = mp.mpf(10)
1494
+ >>> # exact = mp.quad(lambda x: mp.exp(-x)/(1+x/z),[0,mp.inf])
1495
+ >>> exact = z * mp.exp(z) * mp.expint(1,z) # this is the symbolic expression for the integral
1496
+ >>> w = mp.nsum(lambda n: (-1) ** n * mp.fac(n) * z ** (-n), [0, mp.inf], method = "sidi", levin_variant = "t")
1497
+ >>> print(mp.chop(w - exact))
1498
+ 0.0
1499
+
1500
+ Following highly divergent asymptotic expansion needs some care. Firstly we
1501
+ need copious amount of working precision. Secondly the stepsize must not be
1502
+ chosen to large, otherwise nsum may miss the point where the Levin transform
1503
+ converges and reach the point where only numerical garbage is produced due to
1504
+ numerical cancellation.
1505
+
1506
+ >>> mp.dps = 15
1507
+ >>> z = mp.mpf(2)
1508
+ >>> # exact = mp.quad(lambda x: mp.exp( -x * x / 2 - z * x ** 4), [0,mp.inf]) * 2 / mp.sqrt(2 * mp.pi)
1509
+ >>> exact = mp.exp(mp.one / (32 * z)) * mp.besselk(mp.one / 4, mp.one / (32 * z)) / (4 * mp.sqrt(z * mp.pi)) # this is the symbolic expression for the integral
1510
+ >>> w = mp.nsum(lambda n: (-z)**n * mp.fac(4 * n) / (mp.fac(n) * mp.fac(2 * n) * (4 ** n)),
1511
+ ... [0, mp.inf], method = "levin", levin_variant = "t", workprec = 8*mp.prec, steps = [2] + [1 for x in xrange(1000)])
1512
+ >>> print(mp.chop(w - exact))
1513
+ 0.0
1514
+
1515
+ The hypergeoemtric function can also be summed outside its range of convergence:
1516
+
1517
+ >>> mp.dps = 15
1518
+ >>> z = 2 + 1j
1519
+ >>> exact = mp.hyp2f1(2 / mp.mpf(3), 4 / mp.mpf(3), 1 / mp.mpf(3), z)
1520
+ >>> f = lambda n: mp.rf(2 / mp.mpf(3), n) * mp.rf(4 / mp.mpf(3), n) * z**n / (mp.rf(1 / mp.mpf(3), n) * mp.fac(n))
1521
+ >>> v = mp.nsum(f, [0, mp.inf], method = "levin", steps = [10 for x in xrange(1000)])
1522
+ >>> print(mp.chop(exact-v))
1523
+ 0.0
1524
+
1525
+ **Examples with Cohen's alternating series resummation**
1526
+
1527
+ The next example sums the alternating zeta function:
1528
+
1529
+ >>> v = mp.nsum(lambda n: (-1)**(n-1) / n, [1, mp.inf], method = "a")
1530
+ >>> print(mp.chop(v - mp.log(2)))
1531
+ 0.0
1532
+
1533
+ The derivate of the alternating zeta function outside its range of
1534
+ convergence:
1535
+
1536
+ >>> v = mp.nsum(lambda n: (-1)**n * mp.log(n) * n, [1, mp.inf], method = "a")
1537
+ >>> print(mp.chop(v - mp.diff(lambda s: mp.altzeta(s), -1)))
1538
+ 0.0
1539
+
1540
+ **Examples with Euler-Maclaurin summation**
1541
+
1542
+ The sum in the following example has the wrong rate of convergence
1543
+ for either Richardson or Shanks to be effective.
1544
+
1545
+ >>> f = lambda k: log(k)/k**2.5
1546
+ >>> mp.dps = 15
1547
+ >>> nsum(f, [1, inf], method='euler-maclaurin')
1548
+ 0.38734195032621
1549
+ >>> -diff(zeta, 2.5)
1550
+ 0.38734195032621
1551
+
1552
+ Increasing ``steps`` improves speed at higher precision::
1553
+
1554
+ >>> mp.dps = 50
1555
+ >>> nsum(f, [1, inf], method='euler-maclaurin', steps=[250])
1556
+ 0.38734195032620997271199237593105101319948228874688
1557
+ >>> -diff(zeta, 2.5)
1558
+ 0.38734195032620997271199237593105101319948228874688
1559
+
1560
+ **Divergent series**
1561
+
1562
+ The Shanks transformation is able to sum some *divergent*
1563
+ series. In particular, it is often able to sum Taylor series
1564
+ beyond their radius of convergence (this is due to a relation
1565
+ between the Shanks transformation and Pade approximations;
1566
+ see :func:`~mpmath.pade` for an alternative way to evaluate divergent
1567
+ Taylor series). Furthermore the Levin-transform examples above
1568
+ contain some divergent series resummation.
1569
+
1570
+ Here we apply it to `\log(1+x)` far outside the region of
1571
+ convergence::
1572
+
1573
+ >>> mp.dps = 50
1574
+ >>> nsum(lambda k: -(-9)**k/k, [1, inf],
1575
+ ... method='shanks')
1576
+ 2.3025850929940456840179914546843642076011014886288
1577
+ >>> log(10)
1578
+ 2.3025850929940456840179914546843642076011014886288
1579
+
1580
+ A particular type of divergent series that can be summed
1581
+ using the Shanks transformation is geometric series.
1582
+ The result is the same as using the closed-form formula
1583
+ for an infinite geometric series::
1584
+
1585
+ >>> mp.dps = 15
1586
+ >>> for n in range(-8, 8):
1587
+ ... if n == 1:
1588
+ ... continue
1589
+ ... print("%s %s %s" % (mpf(n), mpf(1)/(1-n),
1590
+ ... nsum(lambda k: n**k, [0, inf], method='shanks')))
1591
+ ...
1592
+ -8.0 0.111111111111111 0.111111111111111
1593
+ -7.0 0.125 0.125
1594
+ -6.0 0.142857142857143 0.142857142857143
1595
+ -5.0 0.166666666666667 0.166666666666667
1596
+ -4.0 0.2 0.2
1597
+ -3.0 0.25 0.25
1598
+ -2.0 0.333333333333333 0.333333333333333
1599
+ -1.0 0.5 0.5
1600
+ 0.0 1.0 1.0
1601
+ 2.0 -1.0 -1.0
1602
+ 3.0 -0.5 -0.5
1603
+ 4.0 -0.333333333333333 -0.333333333333333
1604
+ 5.0 -0.25 -0.25
1605
+ 6.0 -0.2 -0.2
1606
+ 7.0 -0.166666666666667 -0.166666666666667
1607
+
1608
+ **Multidimensional sums**
1609
+
1610
+ Any combination of finite and infinite ranges is allowed for the
1611
+ summation indices::
1612
+
1613
+ >>> mp.dps = 15
1614
+ >>> nsum(lambda x,y: x+y, [2,3], [4,5])
1615
+ 28.0
1616
+ >>> nsum(lambda x,y: x/2**y, [1,3], [1,inf])
1617
+ 6.0
1618
+ >>> nsum(lambda x,y: y/2**x, [1,inf], [1,3])
1619
+ 6.0
1620
+ >>> nsum(lambda x,y,z: z/(2**x*2**y), [1,inf], [1,inf], [3,4])
1621
+ 7.0
1622
+ >>> nsum(lambda x,y,z: y/(2**x*2**z), [1,inf], [3,4], [1,inf])
1623
+ 7.0
1624
+ >>> nsum(lambda x,y,z: x/(2**z*2**y), [3,4], [1,inf], [1,inf])
1625
+ 7.0
1626
+
1627
+ Some nice examples of double series with analytic solutions or
1628
+ reductions to single-dimensional series (see [1])::
1629
+
1630
+ >>> nsum(lambda m, n: 1/2**(m*n), [1,inf], [1,inf])
1631
+ 1.60669515241529
1632
+ >>> nsum(lambda n: 1/(2**n-1), [1,inf])
1633
+ 1.60669515241529
1634
+
1635
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i**2+j**2), [1,inf], [1,inf])
1636
+ 0.278070510848213
1637
+ >>> pi*(pi-3*ln2)/12
1638
+ 0.278070510848213
1639
+
1640
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**2, [1,inf], [1,inf])
1641
+ 0.129319852864168
1642
+ >>> altzeta(2) - altzeta(1)
1643
+ 0.129319852864168
1644
+
1645
+ >>> nsum(lambda i,j: (-1)**(i+j)/(i+j)**3, [1,inf], [1,inf])
1646
+ 0.0790756439455825
1647
+ >>> altzeta(3) - altzeta(2)
1648
+ 0.0790756439455825
1649
+
1650
+ >>> nsum(lambda m,n: m**2*n/(3**m*(n*3**m+m*3**n)),
1651
+ ... [1,inf], [1,inf])
1652
+ 0.28125
1653
+ >>> mpf(9)/32
1654
+ 0.28125
1655
+
1656
+ >>> nsum(lambda i,j: fac(i-1)*fac(j-1)/fac(i+j),
1657
+ ... [1,inf], [1,inf], workprec=400)
1658
+ 1.64493406684823
1659
+ >>> zeta(2)
1660
+ 1.64493406684823
1661
+
1662
+ A hard example of a multidimensional sum is the Madelung constant
1663
+ in three dimensions (see [2]). The defining sum converges very
1664
+ slowly and only conditionally, so :func:`~mpmath.nsum` is lucky to
1665
+ obtain an accurate value through convergence acceleration. The
1666
+ second evaluation below uses a much more efficient, rapidly
1667
+ convergent 2D sum::
1668
+
1669
+ >>> nsum(lambda x,y,z: (-1)**(x+y+z)/(x*x+y*y+z*z)**0.5,
1670
+ ... [-inf,inf], [-inf,inf], [-inf,inf], ignore=True)
1671
+ -1.74756459463318
1672
+ >>> nsum(lambda x,y: -12*pi*sech(0.5*pi * \
1673
+ ... sqrt((2*x+1)**2+(2*y+1)**2))**2, [0,inf], [0,inf])
1674
+ -1.74756459463318
1675
+
1676
+ Another example of a lattice sum in 2D::
1677
+
1678
+ >>> nsum(lambda x,y: (-1)**(x+y) / (x**2+y**2), [-inf,inf],
1679
+ ... [-inf,inf], ignore=True)
1680
+ -2.1775860903036
1681
+ >>> -pi*ln2
1682
+ -2.1775860903036
1683
+
1684
+ An example of an Eisenstein series::
1685
+
1686
+ >>> nsum(lambda m,n: (m+n*1j)**(-4), [-inf,inf], [-inf,inf],
1687
+ ... ignore=True)
1688
+ (3.1512120021539 + 0.0j)
1689
+
1690
+ **References**
1691
+
1692
+ 1. [Weisstein]_ http://mathworld.wolfram.com/DoubleSeries.html,
1693
+ 2. [Weisstein]_ http://mathworld.wolfram.com/MadelungConstants.html
1694
+
1695
+ """
1696
+ infinite, g = standardize(ctx, f, intervals, options)
1697
+ if not infinite:
1698
+ return +g()
1699
+
1700
+ def update(partial_sums, indices):
1701
+ if partial_sums:
1702
+ psum = partial_sums[-1]
1703
+ else:
1704
+ psum = ctx.zero
1705
+ for k in indices:
1706
+ psum = psum + g(ctx.mpf(k))
1707
+ partial_sums.append(psum)
1708
+
1709
+ prec = ctx.prec
1710
+
1711
+ def emfun(point, tol):
1712
+ workprec = ctx.prec
1713
+ ctx.prec = prec + 10
1714
+ v = ctx.sumem(g, [point, ctx.inf], tol, error=1)
1715
+ ctx.prec = workprec
1716
+ return v
1717
+
1718
+ return +ctx.adaptive_extrapolation(update, emfun, options)
1719
+
1720
+
1721
+ def wrapsafe(f):
1722
+ def g(*args):
1723
+ try:
1724
+ return f(*args)
1725
+ except (ArithmeticError, ValueError):
1726
+ return 0
1727
+ return g
1728
+
1729
+ def standardize(ctx, f, intervals, options):
1730
+ if options.get("ignore"):
1731
+ f = wrapsafe(f)
1732
+ finite = []
1733
+ infinite = []
1734
+ for k, points in enumerate(intervals):
1735
+ a, b = ctx._as_points(points)
1736
+ if b < a:
1737
+ return False, (lambda: ctx.zero)
1738
+ if a == ctx.ninf or b == ctx.inf:
1739
+ infinite.append((k, (a,b)))
1740
+ else:
1741
+ finite.append((k, (int(a), int(b))))
1742
+ if finite:
1743
+ f = fold_finite(ctx, f, finite)
1744
+ if not infinite:
1745
+ return False, lambda: f(*([0]*len(intervals)))
1746
+ if infinite:
1747
+ f = standardize_infinite(ctx, f, infinite)
1748
+ f = fold_infinite(ctx, f, infinite)
1749
+ args = [0] * len(intervals)
1750
+ d = infinite[0][0]
1751
+ def g(k):
1752
+ args[d] = k
1753
+ return f(*args)
1754
+ return True, g
1755
+
1756
+ # backwards compatible itertools.product
1757
+ def cartesian_product(args):
1758
+ pools = map(tuple, args)
1759
+ result = [[]]
1760
+ for pool in pools:
1761
+ result = [x+[y] for x in result for y in pool]
1762
+ for prod in result:
1763
+ yield tuple(prod)
1764
+
1765
+ def fold_finite(ctx, f, intervals):
1766
+ if not intervals:
1767
+ return f
1768
+ indices = [v[0] for v in intervals]
1769
+ points = [v[1] for v in intervals]
1770
+ ranges = [xrange(a, b+1) for (a,b) in points]
1771
+ def g(*args):
1772
+ args = list(args)
1773
+ s = ctx.zero
1774
+ for xs in cartesian_product(ranges):
1775
+ for dim, x in zip(indices, xs):
1776
+ args[dim] = ctx.mpf(x)
1777
+ s += f(*args)
1778
+ return s
1779
+ #print "Folded finite", indices
1780
+ return g
1781
+
1782
+ # Standardize each interval to [0,inf]
1783
+ def standardize_infinite(ctx, f, intervals):
1784
+ if not intervals:
1785
+ return f
1786
+ dim, [a,b] = intervals[-1]
1787
+ if a == ctx.ninf:
1788
+ if b == ctx.inf:
1789
+ def g(*args):
1790
+ args = list(args)
1791
+ k = args[dim]
1792
+ if k:
1793
+ s = f(*args)
1794
+ args[dim] = -k
1795
+ s += f(*args)
1796
+ return s
1797
+ else:
1798
+ return f(*args)
1799
+ else:
1800
+ def g(*args):
1801
+ args = list(args)
1802
+ args[dim] = b - args[dim]
1803
+ return f(*args)
1804
+ else:
1805
+ def g(*args):
1806
+ args = list(args)
1807
+ args[dim] += a
1808
+ return f(*args)
1809
+ #print "Standardized infinity along dimension", dim, a, b
1810
+ return standardize_infinite(ctx, g, intervals[:-1])
1811
+
1812
+ def fold_infinite(ctx, f, intervals):
1813
+ if len(intervals) < 2:
1814
+ return f
1815
+ dim1 = intervals[-2][0]
1816
+ dim2 = intervals[-1][0]
1817
+ # Assume intervals are [0,inf] x [0,inf] x ...
1818
+ def g(*args):
1819
+ args = list(args)
1820
+ #args.insert(dim2, None)
1821
+ n = int(args[dim1])
1822
+ s = ctx.zero
1823
+ #y = ctx.mpf(n)
1824
+ args[dim2] = ctx.mpf(n) #y
1825
+ for x in xrange(n+1):
1826
+ args[dim1] = ctx.mpf(x)
1827
+ s += f(*args)
1828
+ args[dim1] = ctx.mpf(n) #ctx.mpf(n)
1829
+ for y in xrange(n):
1830
+ args[dim2] = ctx.mpf(y)
1831
+ s += f(*args)
1832
+ return s
1833
+ #print "Folded infinite from", len(intervals), "to", (len(intervals)-1)
1834
+ return fold_infinite(ctx, g, intervals[:-1])
1835
+
1836
+ @defun
1837
+ def nprod(ctx, f, interval, nsum=False, **kwargs):
1838
+ r"""
1839
+ Computes the product
1840
+
1841
+ .. math ::
1842
+
1843
+ P = \prod_{k=a}^b f(k)
1844
+
1845
+ where `(a, b)` = *interval*, and where `a = -\infty` and/or
1846
+ `b = \infty` are allowed.
1847
+
1848
+ By default, :func:`~mpmath.nprod` uses the same extrapolation methods as
1849
+ :func:`~mpmath.nsum`, except applied to the partial products rather than
1850
+ partial sums, and the same keyword options as for :func:`~mpmath.nsum` are
1851
+ supported. If ``nsum=True``, the product is instead computed via
1852
+ :func:`~mpmath.nsum` as
1853
+
1854
+ .. math ::
1855
+
1856
+ P = \exp\left( \sum_{k=a}^b \log(f(k)) \right).
1857
+
1858
+ This is slower, but can sometimes yield better results. It is
1859
+ also required (and used automatically) when Euler-Maclaurin
1860
+ summation is requested.
1861
+
1862
+ **Examples**
1863
+
1864
+ A simple finite product::
1865
+
1866
+ >>> from mpmath import *
1867
+ >>> mp.dps = 25; mp.pretty = True
1868
+ >>> nprod(lambda k: k, [1, 4])
1869
+ 24.0
1870
+
1871
+ A large number of infinite products have known exact values,
1872
+ and can therefore be used as a reference. Most of the following
1873
+ examples are taken from MathWorld [1].
1874
+
1875
+ A few infinite products with simple values are::
1876
+
1877
+ >>> 2*nprod(lambda k: (4*k**2)/(4*k**2-1), [1, inf])
1878
+ 3.141592653589793238462643
1879
+ >>> nprod(lambda k: (1+1/k)**2/(1+2/k), [1, inf])
1880
+ 2.0
1881
+ >>> nprod(lambda k: (k**3-1)/(k**3+1), [2, inf])
1882
+ 0.6666666666666666666666667
1883
+ >>> nprod(lambda k: (1-1/k**2), [2, inf])
1884
+ 0.5
1885
+
1886
+ Next, several more infinite products with more complicated
1887
+ values::
1888
+
1889
+ >>> nprod(lambda k: exp(1/k**2), [1, inf]); exp(pi**2/6)
1890
+ 5.180668317897115748416626
1891
+ 5.180668317897115748416626
1892
+
1893
+ >>> nprod(lambda k: (k**2-1)/(k**2+1), [2, inf]); pi*csch(pi)
1894
+ 0.2720290549821331629502366
1895
+ 0.2720290549821331629502366
1896
+
1897
+ >>> nprod(lambda k: (k**4-1)/(k**4+1), [2, inf])
1898
+ 0.8480540493529003921296502
1899
+ >>> pi*sinh(pi)/(cosh(sqrt(2)*pi)-cos(sqrt(2)*pi))
1900
+ 0.8480540493529003921296502
1901
+
1902
+ >>> nprod(lambda k: (1+1/k+1/k**2)**2/(1+2/k+3/k**2), [1, inf])
1903
+ 1.848936182858244485224927
1904
+ >>> 3*sqrt(2)*cosh(pi*sqrt(3)/2)**2*csch(pi*sqrt(2))/pi
1905
+ 1.848936182858244485224927
1906
+
1907
+ >>> nprod(lambda k: (1-1/k**4), [2, inf]); sinh(pi)/(4*pi)
1908
+ 0.9190194775937444301739244
1909
+ 0.9190194775937444301739244
1910
+
1911
+ >>> nprod(lambda k: (1-1/k**6), [2, inf])
1912
+ 0.9826842777421925183244759
1913
+ >>> (1+cosh(pi*sqrt(3)))/(12*pi**2)
1914
+ 0.9826842777421925183244759
1915
+
1916
+ >>> nprod(lambda k: (1+1/k**2), [2, inf]); sinh(pi)/(2*pi)
1917
+ 1.838038955187488860347849
1918
+ 1.838038955187488860347849
1919
+
1920
+ >>> nprod(lambda n: (1+1/n)**n * exp(1/(2*n)-1), [1, inf])
1921
+ 1.447255926890365298959138
1922
+ >>> exp(1+euler/2)/sqrt(2*pi)
1923
+ 1.447255926890365298959138
1924
+
1925
+ The following two products are equivalent and can be evaluated in
1926
+ terms of a Jacobi theta function. Pi can be replaced by any value
1927
+ (as long as convergence is preserved)::
1928
+
1929
+ >>> nprod(lambda k: (1-pi**-k)/(1+pi**-k), [1, inf])
1930
+ 0.3838451207481672404778686
1931
+ >>> nprod(lambda k: tanh(k*log(pi)/2), [1, inf])
1932
+ 0.3838451207481672404778686
1933
+ >>> jtheta(4,0,1/pi)
1934
+ 0.3838451207481672404778686
1935
+
1936
+ This product does not have a known closed form value::
1937
+
1938
+ >>> nprod(lambda k: (1-1/2**k), [1, inf])
1939
+ 0.2887880950866024212788997
1940
+
1941
+ A product taken from `-\infty`::
1942
+
1943
+ >>> nprod(lambda k: 1-k**(-3), [-inf,-2])
1944
+ 0.8093965973662901095786805
1945
+ >>> cosh(pi*sqrt(3)/2)/(3*pi)
1946
+ 0.8093965973662901095786805
1947
+
1948
+ A doubly infinite product::
1949
+
1950
+ >>> nprod(lambda k: exp(1/(1+k**2)), [-inf, inf])
1951
+ 23.41432688231864337420035
1952
+ >>> exp(pi/tanh(pi))
1953
+ 23.41432688231864337420035
1954
+
1955
+ A product requiring the use of Euler-Maclaurin summation to compute
1956
+ an accurate value::
1957
+
1958
+ >>> nprod(lambda k: (1-1/k**2.5), [2, inf], method='e')
1959
+ 0.696155111336231052898125
1960
+
1961
+ **References**
1962
+
1963
+ 1. [Weisstein]_ http://mathworld.wolfram.com/InfiniteProduct.html
1964
+
1965
+ """
1966
+ if nsum or ('e' in kwargs.get('method', '')):
1967
+ orig = ctx.prec
1968
+ try:
1969
+ # TODO: we are evaluating log(1+eps) -> eps, which is
1970
+ # inaccurate. This currently works because nsum greatly
1971
+ # increases the working precision. But we should be
1972
+ # more intelligent and handle the precision here.
1973
+ ctx.prec += 10
1974
+ v = ctx.nsum(lambda n: ctx.ln(f(n)), interval, **kwargs)
1975
+ finally:
1976
+ ctx.prec = orig
1977
+ return +ctx.exp(v)
1978
+
1979
+ a, b = ctx._as_points(interval)
1980
+ if a == ctx.ninf:
1981
+ if b == ctx.inf:
1982
+ return f(0) * ctx.nprod(lambda k: f(-k) * f(k), [1, ctx.inf], **kwargs)
1983
+ return ctx.nprod(f, [-b, ctx.inf], **kwargs)
1984
+ elif b != ctx.inf:
1985
+ return ctx.fprod(f(ctx.mpf(k)) for k in xrange(int(a), int(b)+1))
1986
+
1987
+ a = int(a)
1988
+
1989
+ def update(partial_products, indices):
1990
+ if partial_products:
1991
+ pprod = partial_products[-1]
1992
+ else:
1993
+ pprod = ctx.one
1994
+ for k in indices:
1995
+ pprod = pprod * f(a + ctx.mpf(k))
1996
+ partial_products.append(pprod)
1997
+
1998
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
1999
+
2000
+
2001
+ @defun
2002
+ def limit(ctx, f, x, direction=1, exp=False, **kwargs):
2003
+ r"""
2004
+ Computes an estimate of the limit
2005
+
2006
+ .. math ::
2007
+
2008
+ \lim_{t \to x} f(t)
2009
+
2010
+ where `x` may be finite or infinite.
2011
+
2012
+ For finite `x`, :func:`~mpmath.limit` evaluates `f(x + d/n)` for
2013
+ consecutive integer values of `n`, where the approach direction
2014
+ `d` may be specified using the *direction* keyword argument.
2015
+ For infinite `x`, :func:`~mpmath.limit` evaluates values of
2016
+ `f(\mathrm{sign}(x) \cdot n)`.
2017
+
2018
+ If the approach to the limit is not sufficiently fast to give
2019
+ an accurate estimate directly, :func:`~mpmath.limit` attempts to find
2020
+ the limit using Richardson extrapolation or the Shanks
2021
+ transformation. You can select between these methods using
2022
+ the *method* keyword (see documentation of :func:`~mpmath.nsum` for
2023
+ more information).
2024
+
2025
+ **Options**
2026
+
2027
+ The following options are available with essentially the
2028
+ same meaning as for :func:`~mpmath.nsum`: *tol*, *method*, *maxterms*,
2029
+ *steps*, *verbose*.
2030
+
2031
+ If the option *exp=True* is set, `f` will be
2032
+ sampled at exponentially spaced points `n = 2^1, 2^2, 2^3, \ldots`
2033
+ instead of the linearly spaced points `n = 1, 2, 3, \ldots`.
2034
+ This can sometimes improve the rate of convergence so that
2035
+ :func:`~mpmath.limit` may return a more accurate answer (and faster).
2036
+ However, do note that this can only be used if `f`
2037
+ supports fast and accurate evaluation for arguments that
2038
+ are extremely close to the limit point (or if infinite,
2039
+ very large arguments).
2040
+
2041
+ **Examples**
2042
+
2043
+ A basic evaluation of a removable singularity::
2044
+
2045
+ >>> from mpmath import *
2046
+ >>> mp.dps = 30; mp.pretty = True
2047
+ >>> limit(lambda x: (x-sin(x))/x**3, 0)
2048
+ 0.166666666666666666666666666667
2049
+
2050
+ Computing the exponential function using its limit definition::
2051
+
2052
+ >>> limit(lambda n: (1+3/n)**n, inf)
2053
+ 20.0855369231876677409285296546
2054
+ >>> exp(3)
2055
+ 20.0855369231876677409285296546
2056
+
2057
+ A limit for `\pi`::
2058
+
2059
+ >>> f = lambda n: 2**(4*n+1)*fac(n)**4/(2*n+1)/fac(2*n)**2
2060
+ >>> limit(f, inf)
2061
+ 3.14159265358979323846264338328
2062
+
2063
+ Calculating the coefficient in Stirling's formula::
2064
+
2065
+ >>> limit(lambda n: fac(n) / (sqrt(n)*(n/e)**n), inf)
2066
+ 2.50662827463100050241576528481
2067
+ >>> sqrt(2*pi)
2068
+ 2.50662827463100050241576528481
2069
+
2070
+ Evaluating Euler's constant `\gamma` using the limit representation
2071
+
2072
+ .. math ::
2073
+
2074
+ \gamma = \lim_{n \rightarrow \infty } \left[ \left(
2075
+ \sum_{k=1}^n \frac{1}{k} \right) - \log(n) \right]
2076
+
2077
+ (which converges notoriously slowly)::
2078
+
2079
+ >>> f = lambda n: sum([mpf(1)/k for k in range(1,int(n)+1)]) - log(n)
2080
+ >>> limit(f, inf)
2081
+ 0.577215664901532860606512090082
2082
+ >>> +euler
2083
+ 0.577215664901532860606512090082
2084
+
2085
+ With default settings, the following limit converges too slowly
2086
+ to be evaluated accurately. Changing to exponential sampling
2087
+ however gives a perfect result::
2088
+
2089
+ >>> f = lambda x: sqrt(x**3+x**2)/(sqrt(x**3)+x)
2090
+ >>> limit(f, inf)
2091
+ 0.992831158558330281129249686491
2092
+ >>> limit(f, inf, exp=True)
2093
+ 1.0
2094
+
2095
+ """
2096
+
2097
+ if ctx.isinf(x):
2098
+ direction = ctx.sign(x)
2099
+ g = lambda k: f(ctx.mpf(k+1)*direction)
2100
+ else:
2101
+ direction *= ctx.one
2102
+ g = lambda k: f(x + direction/(k+1))
2103
+ if exp:
2104
+ h = g
2105
+ g = lambda k: h(2**k)
2106
+
2107
+ def update(values, indices):
2108
+ for k in indices:
2109
+ values.append(g(k+1))
2110
+
2111
+ # XXX: steps used by nsum don't work well
2112
+ if not 'steps' in kwargs:
2113
+ kwargs['steps'] = [10]
2114
+
2115
+ return +ctx.adaptive_extrapolation(update, None, kwargs)
.venv/lib/python3.11/site-packages/mpmath/calculus/inverselaplace.py ADDED
@@ -0,0 +1,973 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # contributed to mpmath by Kristopher L. Kuhlman, February 2017
2
+ # contributed to mpmath by Guillermo Navas-Palencia, February 2022
3
+
4
+ class InverseLaplaceTransform(object):
5
+ r"""
6
+ Inverse Laplace transform methods are implemented using this
7
+ class, in order to simplify the code and provide a common
8
+ infrastructure.
9
+
10
+ Implement a custom inverse Laplace transform algorithm by
11
+ subclassing :class:`InverseLaplaceTransform` and implementing the
12
+ appropriate methods. The subclass can then be used by
13
+ :func:`~mpmath.invertlaplace` by passing it as the *method*
14
+ argument.
15
+ """
16
+
17
+ def __init__(self, ctx):
18
+ self.ctx = ctx
19
+
20
+ def calc_laplace_parameter(self, t, **kwargs):
21
+ r"""
22
+ Determine the vector of Laplace parameter values needed for an
23
+ algorithm, this will depend on the choice of algorithm (de
24
+ Hoog is default), the algorithm-specific parameters passed (or
25
+ default ones), and desired time.
26
+ """
27
+ raise NotImplementedError
28
+
29
+ def calc_time_domain_solution(self, fp):
30
+ r"""
31
+ Compute the time domain solution, after computing the
32
+ Laplace-space function evaluations at the abscissa required
33
+ for the algorithm. Abscissa computed for one algorithm are
34
+ typically not useful for another algorithm.
35
+ """
36
+ raise NotImplementedError
37
+
38
+
39
+ class FixedTalbot(InverseLaplaceTransform):
40
+
41
+ def calc_laplace_parameter(self, t, **kwargs):
42
+ r"""The "fixed" Talbot method deforms the Bromwich contour towards
43
+ `-\infty` in the shape of a parabola. Traditionally the Talbot
44
+ algorithm has adjustable parameters, but the "fixed" version
45
+ does not. The `r` parameter could be passed in as a parameter,
46
+ if you want to override the default given by (Abate & Valko,
47
+ 2004).
48
+
49
+ The Laplace parameter is sampled along a parabola opening
50
+ along the negative imaginary axis, with the base of the
51
+ parabola along the real axis at
52
+ `p=\frac{r}{t_\mathrm{max}}`. As the number of terms used in
53
+ the approximation (degree) grows, the abscissa required for
54
+ function evaluation tend towards `-\infty`, requiring high
55
+ precision to prevent overflow. If any poles, branch cuts or
56
+ other singularities exist such that the deformed Bromwich
57
+ contour lies to the left of the singularity, the method will
58
+ fail.
59
+
60
+ **Optional arguments**
61
+
62
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
63
+ recognizes the following keywords
64
+
65
+ *tmax*
66
+ maximum time associated with vector of times
67
+ (typically just the time requested)
68
+ *degree*
69
+ integer order of approximation (M = number of terms)
70
+ *r*
71
+ abscissa for `p_0` (otherwise computed using rule
72
+ of thumb `2M/5`)
73
+
74
+ The working precision will be increased according to a rule of
75
+ thumb. If 'degree' is not specified, the working precision and
76
+ degree are chosen to hopefully achieve the dps of the calling
77
+ context. If 'degree' is specified, the working precision is
78
+ chosen to achieve maximum resulting precision for the
79
+ specified degree.
80
+
81
+ .. math ::
82
+
83
+ p_0=\frac{r}{t}
84
+
85
+ .. math ::
86
+
87
+ p_i=\frac{i r \pi}{Mt_\mathrm{max}}\left[\cot\left(
88
+ \frac{i\pi}{M}\right) + j \right] \qquad 1\le i <M
89
+
90
+ where `j=\sqrt{-1}`, `r=2M/5`, and `t_\mathrm{max}` is the
91
+ maximum specified time.
92
+
93
+ """
94
+
95
+ # required
96
+ # ------------------------------
97
+ # time of desired approximation
98
+ self.t = self.ctx.convert(t)
99
+
100
+ # optional
101
+ # ------------------------------
102
+ # maximum time desired (used for scaling) default is requested
103
+ # time.
104
+ self.tmax = self.ctx.convert(kwargs.get('tmax', self.t))
105
+
106
+ # empirical relationships used here based on a linear fit of
107
+ # requested and delivered dps for exponentially decaying time
108
+ # functions for requested dps up to 512.
109
+
110
+ if 'degree' in kwargs:
111
+ self.degree = kwargs['degree']
112
+ self.dps_goal = self.degree
113
+ else:
114
+ self.dps_goal = int(1.72*self.ctx.dps)
115
+ self.degree = max(12, int(1.38*self.dps_goal))
116
+
117
+ M = self.degree
118
+
119
+ # this is adjusting the dps of the calling context hopefully
120
+ # the caller doesn't monkey around with it between calling
121
+ # this routine and calc_time_domain_solution()
122
+ self.dps_orig = self.ctx.dps
123
+ self.ctx.dps = self.dps_goal
124
+
125
+ # Abate & Valko rule of thumb for r parameter
126
+ self.r = kwargs.get('r', self.ctx.fraction(2, 5)*M)
127
+
128
+ self.theta = self.ctx.linspace(0.0, self.ctx.pi, M+1)
129
+
130
+ self.cot_theta = self.ctx.matrix(M, 1)
131
+ self.cot_theta[0] = 0 # not used
132
+
133
+ # all but time-dependent part of p
134
+ self.delta = self.ctx.matrix(M, 1)
135
+ self.delta[0] = self.r
136
+
137
+ for i in range(1, M):
138
+ self.cot_theta[i] = self.ctx.cot(self.theta[i])
139
+ self.delta[i] = self.r*self.theta[i]*(self.cot_theta[i] + 1j)
140
+
141
+ self.p = self.ctx.matrix(M, 1)
142
+ self.p = self.delta/self.tmax
143
+
144
+ # NB: p is complex (mpc)
145
+
146
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
147
+ r"""The fixed Talbot time-domain solution is computed from the
148
+ Laplace-space function evaluations using
149
+
150
+ .. math ::
151
+
152
+ f(t,M)=\frac{2}{5t}\sum_{k=0}^{M-1}\Re \left[
153
+ \gamma_k \bar{f}(p_k)\right]
154
+
155
+ where
156
+
157
+ .. math ::
158
+
159
+ \gamma_0 = \frac{1}{2}e^{r}\bar{f}(p_0)
160
+
161
+ .. math ::
162
+
163
+ \gamma_k = e^{tp_k}\left\lbrace 1 + \frac{jk\pi}{M}\left[1 +
164
+ \cot \left( \frac{k \pi}{M} \right)^2 \right] - j\cot\left(
165
+ \frac{k \pi}{M}\right)\right \rbrace \qquad 1\le k<M.
166
+
167
+ Again, `j=\sqrt{-1}`.
168
+
169
+ Before calling this function, call
170
+ :class:`~mpmath.calculus.inverselaplace.FixedTalbot.calc_laplace_parameter`
171
+ to set the parameters and compute the required coefficients.
172
+
173
+ **References**
174
+
175
+ 1. Abate, J., P. Valko (2004). Multi-precision Laplace
176
+ transform inversion. *International Journal for Numerical
177
+ Methods in Engineering* 60:979-993,
178
+ http://dx.doi.org/10.1002/nme.995
179
+ 2. Talbot, A. (1979). The accurate numerical inversion of
180
+ Laplace transforms. *IMA Journal of Applied Mathematics*
181
+ 23(1):97, http://dx.doi.org/10.1093/imamat/23.1.97
182
+ """
183
+
184
+ # required
185
+ # ------------------------------
186
+ self.t = self.ctx.convert(t)
187
+
188
+ # assume fp was computed from p matrix returned from
189
+ # calc_laplace_parameter(), so is already a list or matrix of
190
+ # mpmath 'mpc' types
191
+
192
+ # these were computed in previous call to
193
+ # calc_laplace_parameter()
194
+ theta = self.theta
195
+ delta = self.delta
196
+ M = self.degree
197
+ p = self.p
198
+ r = self.r
199
+
200
+ ans = self.ctx.matrix(M, 1)
201
+ ans[0] = self.ctx.exp(delta[0])*fp[0]/2
202
+
203
+ for i in range(1, M):
204
+ ans[i] = self.ctx.exp(delta[i])*fp[i]*(
205
+ 1 + 1j*theta[i]*(1 + self.cot_theta[i]**2) -
206
+ 1j*self.cot_theta[i])
207
+
208
+ result = self.ctx.fraction(2, 5)*self.ctx.fsum(ans)/self.t
209
+
210
+ # setting dps back to value when calc_laplace_parameter was
211
+ # called, unless flag is set.
212
+ if not manual_prec:
213
+ self.ctx.dps = self.dps_orig
214
+
215
+ return result.real
216
+
217
+
218
+ # ****************************************
219
+
220
+ class Stehfest(InverseLaplaceTransform):
221
+
222
+ def calc_laplace_parameter(self, t, **kwargs):
223
+ r"""
224
+ The Gaver-Stehfest method is a discrete approximation of the
225
+ Widder-Post inversion algorithm, rather than a direct
226
+ approximation of the Bromwich contour integral.
227
+
228
+ The method abscissa along the real axis, and therefore has
229
+ issues inverting oscillatory functions (which have poles in
230
+ pairs away from the real axis).
231
+
232
+ The working precision will be increased according to a rule of
233
+ thumb. If 'degree' is not specified, the working precision and
234
+ degree are chosen to hopefully achieve the dps of the calling
235
+ context. If 'degree' is specified, the working precision is
236
+ chosen to achieve maximum resulting precision for the
237
+ specified degree.
238
+
239
+ .. math ::
240
+
241
+ p_k = \frac{k \log 2}{t} \qquad 1 \le k \le M
242
+ """
243
+
244
+ # required
245
+ # ------------------------------
246
+ # time of desired approximation
247
+ self.t = self.ctx.convert(t)
248
+
249
+ # optional
250
+ # ------------------------------
251
+
252
+ # empirical relationships used here based on a linear fit of
253
+ # requested and delivered dps for exponentially decaying time
254
+ # functions for requested dps up to 512.
255
+
256
+ if 'degree' in kwargs:
257
+ self.degree = kwargs['degree']
258
+ self.dps_goal = int(1.38*self.degree)
259
+ else:
260
+ self.dps_goal = int(2.93*self.ctx.dps)
261
+ self.degree = max(16, self.dps_goal)
262
+
263
+ # _coeff routine requires even degree
264
+ if self.degree % 2 > 0:
265
+ self.degree += 1
266
+
267
+ M = self.degree
268
+
269
+ # this is adjusting the dps of the calling context
270
+ # hopefully the caller doesn't monkey around with it
271
+ # between calling this routine and calc_time_domain_solution()
272
+ self.dps_orig = self.ctx.dps
273
+ self.ctx.dps = self.dps_goal
274
+
275
+ self.V = self._coeff()
276
+ self.p = self.ctx.matrix(self.ctx.arange(1, M+1))*self.ctx.ln2/self.t
277
+
278
+ # NB: p is real (mpf)
279
+
280
+ def _coeff(self):
281
+ r"""Salzer summation weights (aka, "Stehfest coefficients")
282
+ only depend on the approximation order (M) and the precision"""
283
+
284
+ M = self.degree
285
+ M2 = int(M/2) # checked earlier that M is even
286
+
287
+ V = self.ctx.matrix(M, 1)
288
+
289
+ # Salzer summation weights
290
+ # get very large in magnitude and oscillate in sign,
291
+ # if the precision is not high enough, there will be
292
+ # catastrophic cancellation
293
+ for k in range(1, M+1):
294
+ z = self.ctx.matrix(min(k, M2)+1, 1)
295
+ for j in range(int((k+1)/2), min(k, M2)+1):
296
+ z[j] = (self.ctx.power(j, M2)*self.ctx.fac(2*j)/
297
+ (self.ctx.fac(M2-j)*self.ctx.fac(j)*
298
+ self.ctx.fac(j-1)*self.ctx.fac(k-j)*
299
+ self.ctx.fac(2*j-k)))
300
+ V[k-1] = self.ctx.power(-1, k+M2)*self.ctx.fsum(z)
301
+
302
+ return V
303
+
304
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
305
+ r"""Compute time-domain Stehfest algorithm solution.
306
+
307
+ .. math ::
308
+
309
+ f(t,M) = \frac{\log 2}{t} \sum_{k=1}^{M} V_k \bar{f}\left(
310
+ p_k \right)
311
+
312
+ where
313
+
314
+ .. math ::
315
+
316
+ V_k = (-1)^{k + N/2} \sum^{\min(k,N/2)}_{i=\lfloor(k+1)/2 \rfloor}
317
+ \frac{i^{\frac{N}{2}}(2i)!}{\left(\frac{N}{2}-i \right)! \, i! \,
318
+ \left(i-1 \right)! \, \left(k-i\right)! \, \left(2i-k \right)!}
319
+
320
+ As the degree increases, the abscissa (`p_k`) only increase
321
+ linearly towards `\infty`, but the Stehfest coefficients
322
+ (`V_k`) alternate in sign and increase rapidly in sign,
323
+ requiring high precision to prevent overflow or loss of
324
+ significance when evaluating the sum.
325
+
326
+ **References**
327
+
328
+ 1. Widder, D. (1941). *The Laplace Transform*. Princeton.
329
+ 2. Stehfest, H. (1970). Algorithm 368: numerical inversion of
330
+ Laplace transforms. *Communications of the ACM* 13(1):47-49,
331
+ http://dx.doi.org/10.1145/361953.361969
332
+
333
+ """
334
+
335
+ # required
336
+ self.t = self.ctx.convert(t)
337
+
338
+ # assume fp was computed from p matrix returned from
339
+ # calc_laplace_parameter(), so is already
340
+ # a list or matrix of mpmath 'mpf' types
341
+
342
+ result = self.ctx.fdot(self.V, fp)*self.ctx.ln2/self.t
343
+
344
+ # setting dps back to value when calc_laplace_parameter was called
345
+ if not manual_prec:
346
+ self.ctx.dps = self.dps_orig
347
+
348
+ # ignore any small imaginary part
349
+ return result.real
350
+
351
+
352
+ # ****************************************
353
+
354
+ class deHoog(InverseLaplaceTransform):
355
+
356
+ def calc_laplace_parameter(self, t, **kwargs):
357
+ r"""the de Hoog, Knight & Stokes algorithm is an
358
+ accelerated form of the Fourier series numerical
359
+ inverse Laplace transform algorithms.
360
+
361
+ .. math ::
362
+
363
+ p_k = \gamma + \frac{jk}{T} \qquad 0 \le k < 2M+1
364
+
365
+ where
366
+
367
+ .. math ::
368
+
369
+ \gamma = \alpha - \frac{\log \mathrm{tol}}{2T},
370
+
371
+ `j=\sqrt{-1}`, `T = 2t_\mathrm{max}` is a scaled time,
372
+ `\alpha=10^{-\mathrm{dps\_goal}}` is the real part of the
373
+ rightmost pole or singularity, which is chosen based on the
374
+ desired accuracy (assuming the rightmost singularity is 0),
375
+ and `\mathrm{tol}=10\alpha` is the desired tolerance, which is
376
+ chosen in relation to `\alpha`.`
377
+
378
+ When increasing the degree, the abscissa increase towards
379
+ `j\infty`, but more slowly than the fixed Talbot
380
+ algorithm. The de Hoog et al. algorithm typically does better
381
+ with oscillatory functions of time, and less well-behaved
382
+ functions. The method tends to be slower than the Talbot and
383
+ Stehfest algorithsm, especially so at very high precision
384
+ (e.g., `>500` digits precision).
385
+
386
+ """
387
+
388
+ # required
389
+ # ------------------------------
390
+ self.t = self.ctx.convert(t)
391
+
392
+ # optional
393
+ # ------------------------------
394
+ self.tmax = kwargs.get('tmax', self.t)
395
+
396
+ # empirical relationships used here based on a linear fit of
397
+ # requested and delivered dps for exponentially decaying time
398
+ # functions for requested dps up to 512.
399
+
400
+ if 'degree' in kwargs:
401
+ self.degree = kwargs['degree']
402
+ self.dps_goal = int(1.38*self.degree)
403
+ else:
404
+ self.dps_goal = int(self.ctx.dps*1.36)
405
+ self.degree = max(10, self.dps_goal)
406
+
407
+ # 2*M+1 terms in approximation
408
+ M = self.degree
409
+
410
+ # adjust alpha component of abscissa of convergence for higher
411
+ # precision
412
+ tmp = self.ctx.power(10.0, -self.dps_goal)
413
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
414
+
415
+ # desired tolerance (here simply related to alpha)
416
+ self.tol = self.ctx.convert(kwargs.get('tol', self.alpha*10.0))
417
+ self.np = 2*self.degree+1 # number of terms in approximation
418
+
419
+ # this is adjusting the dps of the calling context
420
+ # hopefully the caller doesn't monkey around with it
421
+ # between calling this routine and calc_time_domain_solution()
422
+ self.dps_orig = self.ctx.dps
423
+ self.ctx.dps = self.dps_goal
424
+
425
+ # scaling factor (likely tun-able, but 2 is typical)
426
+ self.scale = kwargs.get('scale', 2)
427
+ self.T = self.ctx.convert(kwargs.get('T', self.scale*self.tmax))
428
+
429
+ self.p = self.ctx.matrix(2*M+1, 1)
430
+ self.gamma = self.alpha - self.ctx.log(self.tol)/(self.scale*self.T)
431
+ self.p = (self.gamma + self.ctx.pi*
432
+ self.ctx.matrix(self.ctx.arange(self.np))/self.T*1j)
433
+
434
+ # NB: p is complex (mpc)
435
+
436
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
437
+ r"""Calculate time-domain solution for
438
+ de Hoog, Knight & Stokes algorithm.
439
+
440
+ The un-accelerated Fourier series approach is:
441
+
442
+ .. math ::
443
+
444
+ f(t,2M+1) = \frac{e^{\gamma t}}{T} \sum_{k=0}^{2M}{}^{'}
445
+ \Re\left[\bar{f}\left( p_k \right)
446
+ e^{i\pi t/T} \right],
447
+
448
+ where the prime on the summation indicates the first term is halved.
449
+
450
+ This simplistic approach requires so many function evaluations
451
+ that it is not practical. Non-linear acceleration is
452
+ accomplished via Pade-approximation and an analytic expression
453
+ for the remainder of the continued fraction. See the original
454
+ paper (reference 2 below) a detailed description of the
455
+ numerical approach.
456
+
457
+ **References**
458
+
459
+ 1. Davies, B. (2005). *Integral Transforms and their
460
+ Applications*, Third Edition. Springer.
461
+ 2. de Hoog, F., J. Knight, A. Stokes (1982). An improved
462
+ method for numerical inversion of Laplace transforms. *SIAM
463
+ Journal of Scientific and Statistical Computing* 3:357-366,
464
+ http://dx.doi.org/10.1137/0903022
465
+
466
+ """
467
+
468
+ M = self.degree
469
+ np = self.np
470
+ T = self.T
471
+
472
+ self.t = self.ctx.convert(t)
473
+
474
+ # would it be useful to try re-using
475
+ # space between e&q and A&B?
476
+ e = self.ctx.zeros(np, M+1)
477
+ q = self.ctx.matrix(2*M, M)
478
+ d = self.ctx.matrix(np, 1)
479
+ A = self.ctx.zeros(np+1, 1)
480
+ B = self.ctx.ones(np+1, 1)
481
+
482
+ # initialize Q-D table
483
+ e[:, 0] = 0.0 + 0j
484
+ q[0, 0] = fp[1]/(fp[0]/2)
485
+ for i in range(1, 2*M):
486
+ q[i, 0] = fp[i+1]/fp[i]
487
+
488
+ # rhombus rule for filling triangular Q-D table (e & q)
489
+ for r in range(1, M+1):
490
+ # start with e, column 1, 0:2*M-2
491
+ mr = 2*(M-r) + 1
492
+ e[0:mr, r] = q[1:mr+1, r-1] - q[0:mr, r-1] + e[1:mr+1, r-1]
493
+ if not r == M:
494
+ rq = r+1
495
+ mr = 2*(M-rq)+1 + 2
496
+ for i in range(mr):
497
+ q[i, rq-1] = q[i+1, rq-2]*e[i+1, rq-1]/e[i, rq-1]
498
+
499
+ # build up continued fraction coefficients (d)
500
+ d[0] = fp[0]/2
501
+ for r in range(1, M+1):
502
+ d[2*r-1] = -q[0, r-1] # even terms
503
+ d[2*r] = -e[0, r] # odd terms
504
+
505
+ # seed A and B for recurrence
506
+ A[0] = 0.0 + 0.0j
507
+ A[1] = d[0]
508
+ B[0:2] = 1.0 + 0.0j
509
+
510
+ # base of the power series
511
+ z = self.ctx.expjpi(self.t/T) # i*pi is already in fcn
512
+
513
+ # coefficients of Pade approximation (A & B)
514
+ # using recurrence for all but last term
515
+ for i in range(1, 2*M):
516
+ A[i+1] = A[i] + d[i]*A[i-1]*z
517
+ B[i+1] = B[i] + d[i]*B[i-1]*z
518
+
519
+ # "improved remainder" to continued fraction
520
+ brem = (1 + (d[2*M-1] - d[2*M])*z)/2
521
+ # powm1(x,y) computes x^y - 1 more accurately near zero
522
+ rem = brem*self.ctx.powm1(1 + d[2*M]*z/brem,
523
+ self.ctx.fraction(1, 2))
524
+
525
+ # last term of recurrence using new remainder
526
+ A[np] = A[2*M] + rem*A[2*M-1]
527
+ B[np] = B[2*M] + rem*B[2*M-1]
528
+
529
+ # diagonal Pade approximation
530
+ # F=A/B represents accelerated trapezoid rule
531
+ result = self.ctx.exp(self.gamma*self.t)/T*(A[np]/B[np]).real
532
+
533
+ # setting dps back to value when calc_laplace_parameter was called
534
+ if not manual_prec:
535
+ self.ctx.dps = self.dps_orig
536
+
537
+ return result
538
+
539
+
540
+ # ****************************************
541
+
542
+ class Cohen(InverseLaplaceTransform):
543
+
544
+ def calc_laplace_parameter(self, t, **kwargs):
545
+ r"""The Cohen algorithm accelerates the convergence of the nearly
546
+ alternating series resulting from the application of the trapezoidal
547
+ rule to the Bromwich contour inversion integral.
548
+
549
+ .. math ::
550
+
551
+ p_k = \frac{\gamma}{2 t} + \frac{\pi i k}{t} \qquad 0 \le k < M
552
+
553
+ where
554
+
555
+ .. math ::
556
+
557
+ \gamma = \frac{2}{3} (d + \log(10) + \log(2 t)),
558
+
559
+ `d = \mathrm{dps\_goal}`, which is chosen based on the desired
560
+ accuracy using the method developed in [1] to improve numerical
561
+ stability. The Cohen algorithm shows robustness similar to the de Hoog
562
+ et al. algorithm, but it is faster than the fixed Talbot algorithm.
563
+
564
+ **Optional arguments**
565
+
566
+ *degree*
567
+ integer order of the approximation (M = number of terms)
568
+ *alpha*
569
+ abscissa for `p_0` (controls the discretization error)
570
+
571
+ The working precision will be increased according to a rule of
572
+ thumb. If 'degree' is not specified, the working precision and
573
+ degree are chosen to hopefully achieve the dps of the calling
574
+ context. If 'degree' is specified, the working precision is
575
+ chosen to achieve maximum resulting precision for the
576
+ specified degree.
577
+
578
+ **References**
579
+
580
+ 1. P. Glasserman, J. Ruiz-Mata (2006). Computing the credit loss
581
+ distribution in the Gaussian copula model: a comparison of methods.
582
+ *Journal of Credit Risk* 2(4):33-66, 10.21314/JCR.2006.057
583
+
584
+ """
585
+ self.t = self.ctx.convert(t)
586
+
587
+ if 'degree' in kwargs:
588
+ self.degree = kwargs['degree']
589
+ self.dps_goal = int(1.5 * self.degree)
590
+ else:
591
+ self.dps_goal = int(self.ctx.dps * 1.74)
592
+ self.degree = max(22, int(1.31 * self.dps_goal))
593
+
594
+ M = self.degree + 1
595
+
596
+ # this is adjusting the dps of the calling context hopefully
597
+ # the caller doesn't monkey around with it between calling
598
+ # this routine and calc_time_domain_solution()
599
+ self.dps_orig = self.ctx.dps
600
+ self.ctx.dps = self.dps_goal
601
+
602
+ ttwo = 2 * self.t
603
+ tmp = self.ctx.dps * self.ctx.log(10) + self.ctx.log(ttwo)
604
+ tmp = self.ctx.fraction(2, 3) * tmp
605
+ self.alpha = self.ctx.convert(kwargs.get('alpha', tmp))
606
+
607
+ # all but time-dependent part of p
608
+ a_t = self.alpha / ttwo
609
+ p_t = self.ctx.pi * 1j / self.t
610
+
611
+ self.p = self.ctx.matrix(M, 1)
612
+ self.p[0] = a_t
613
+
614
+ for i in range(1, M):
615
+ self.p[i] = a_t + i * p_t
616
+
617
+ def calc_time_domain_solution(self, fp, t, manual_prec=False):
618
+ r"""Calculate time-domain solution for Cohen algorithm.
619
+
620
+ The accelerated nearly alternating series is:
621
+
622
+ .. math ::
623
+
624
+ f(t, M) = \frac{e^{\gamma / 2}}{t} \left[\frac{1}{2}
625
+ \Re\left(\bar{f}\left(\frac{\gamma}{2t}\right) \right) -
626
+ \sum_{k=0}^{M-1}\frac{c_{M,k}}{d_M}\Re\left(\bar{f}
627
+ \left(\frac{\gamma + 2(k+1) \pi i}{2t}\right)\right)\right],
628
+
629
+ where coefficients `\frac{c_{M, k}}{d_M}` are described in [1].
630
+
631
+ 1. H. Cohen, F. Rodriguez Villegas, D. Zagier (2000). Convergence
632
+ acceleration of alternating series. *Experiment. Math* 9(1):3-12
633
+
634
+ """
635
+ self.t = self.ctx.convert(t)
636
+
637
+ n = self.degree
638
+ M = n + 1
639
+
640
+ A = self.ctx.matrix(M, 1)
641
+ for i in range(M):
642
+ A[i] = fp[i].real
643
+
644
+ d = (3 + self.ctx.sqrt(8)) ** n
645
+ d = (d + 1 / d) / 2
646
+ b = -self.ctx.one
647
+ c = -d
648
+ s = 0
649
+
650
+ for k in range(n):
651
+ c = b - c
652
+ s = s + c * A[k + 1]
653
+ b = 2 * (k + n) * (k - n) * b / ((2 * k + 1) * (k + self.ctx.one))
654
+
655
+ result = self.ctx.exp(self.alpha / 2) / self.t * (A[0] / 2 - s / d)
656
+
657
+ # setting dps back to value when calc_laplace_parameter was
658
+ # called, unless flag is set.
659
+ if not manual_prec:
660
+ self.ctx.dps = self.dps_orig
661
+
662
+ return result
663
+
664
+
665
+ # ****************************************
666
+
667
+ class LaplaceTransformInversionMethods(object):
668
+ def __init__(ctx, *args, **kwargs):
669
+ ctx._fixed_talbot = FixedTalbot(ctx)
670
+ ctx._stehfest = Stehfest(ctx)
671
+ ctx._de_hoog = deHoog(ctx)
672
+ ctx._cohen = Cohen(ctx)
673
+
674
+ def invertlaplace(ctx, f, t, **kwargs):
675
+ r"""Computes the numerical inverse Laplace transform for a
676
+ Laplace-space function at a given time. The function being
677
+ evaluated is assumed to be a real-valued function of time.
678
+
679
+ The user must supply a Laplace-space function `\bar{f}(p)`,
680
+ and a desired time at which to estimate the time-domain
681
+ solution `f(t)`.
682
+
683
+ A few basic examples of Laplace-space functions with known
684
+ inverses (see references [1,2]) :
685
+
686
+ .. math ::
687
+
688
+ \mathcal{L}\left\lbrace f(t) \right\rbrace=\bar{f}(p)
689
+
690
+ .. math ::
691
+
692
+ \mathcal{L}^{-1}\left\lbrace \bar{f}(p) \right\rbrace = f(t)
693
+
694
+ .. math ::
695
+
696
+ \bar{f}(p) = \frac{1}{(p+1)^2}
697
+
698
+ .. math ::
699
+
700
+ f(t) = t e^{-t}
701
+
702
+ >>> from mpmath import *
703
+ >>> mp.dps = 15; mp.pretty = True
704
+ >>> tt = [0.001, 0.01, 0.1, 1, 10]
705
+ >>> fp = lambda p: 1/(p+1)**2
706
+ >>> ft = lambda t: t*exp(-t)
707
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='talbot')
708
+ (0.000999000499833375, 8.57923043561212e-20)
709
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='talbot')
710
+ (0.00990049833749168, 3.27007646698047e-19)
711
+ >>> ft(tt[2]),ft(tt[2])-invertlaplace(fp,tt[2],method='talbot')
712
+ (0.090483741803596, -1.75215800052168e-18)
713
+ >>> ft(tt[3]),ft(tt[3])-invertlaplace(fp,tt[3],method='talbot')
714
+ (0.367879441171442, 1.2428864009344e-17)
715
+ >>> ft(tt[4]),ft(tt[4])-invertlaplace(fp,tt[4],method='talbot')
716
+ (0.000453999297624849, 4.04513489306658e-20)
717
+
718
+ The methods also work for higher precision:
719
+
720
+ >>> mp.dps = 100; mp.pretty = True
721
+ >>> nstr(ft(tt[0]),15),nstr(ft(tt[0])-invertlaplace(fp,tt[0],method='talbot'),15)
722
+ ('0.000999000499833375', '-4.96868310693356e-105')
723
+ >>> nstr(ft(tt[1]),15),nstr(ft(tt[1])-invertlaplace(fp,tt[1],method='talbot'),15)
724
+ ('0.00990049833749168', '1.23032291513122e-104')
725
+
726
+ .. math ::
727
+
728
+ \bar{f}(p) = \frac{1}{p^2+1}
729
+
730
+ .. math ::
731
+
732
+ f(t) = \mathrm{J}_0(t)
733
+
734
+ >>> mp.dps = 15; mp.pretty = True
735
+ >>> fp = lambda p: 1/sqrt(p*p + 1)
736
+ >>> ft = lambda t: besselj(0,t)
737
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='dehoog')
738
+ (0.999999750000016, -6.09717765032273e-18)
739
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='dehoog')
740
+ (0.99997500015625, -5.61756281076169e-17)
741
+
742
+ .. math ::
743
+
744
+ \bar{f}(p) = \frac{\log p}{p}
745
+
746
+ .. math ::
747
+
748
+ f(t) = -\gamma -\log t
749
+
750
+ >>> mp.dps = 15; mp.pretty = True
751
+ >>> fp = lambda p: log(p)/p
752
+ >>> ft = lambda t: -euler-log(t)
753
+ >>> ft(tt[0]),ft(tt[0])-invertlaplace(fp,tt[0],method='stehfest')
754
+ (6.3305396140806, -1.92126634837863e-16)
755
+ >>> ft(tt[1]),ft(tt[1])-invertlaplace(fp,tt[1],method='stehfest')
756
+ (4.02795452108656, -4.81486093200704e-16)
757
+
758
+ **Options**
759
+
760
+ :func:`~mpmath.invertlaplace` recognizes the following optional
761
+ keywords valid for all methods:
762
+
763
+ *method*
764
+ Chooses numerical inverse Laplace transform algorithm
765
+ (described below).
766
+ *degree*
767
+ Number of terms used in the approximation
768
+
769
+ **Algorithms**
770
+
771
+ Mpmath implements four numerical inverse Laplace transform
772
+ algorithms, attributed to: Talbot, Stehfest, and de Hoog,
773
+ Knight and Stokes. These can be selected by using
774
+ *method='talbot'*, *method='stehfest'*, *method='dehoog'* or
775
+ *method='cohen'* or by passing the classes *method=FixedTalbot*,
776
+ *method=Stehfest*, *method=deHoog*, or *method=Cohen*. The functions
777
+ :func:`~mpmath.invlaptalbot`, :func:`~mpmath.invlapstehfest`,
778
+ :func:`~mpmath.invlapdehoog`, and :func:`~mpmath.invlapcohen`
779
+ are also available as shortcuts.
780
+
781
+ All four algorithms implement a heuristic balance between the
782
+ requested precision and the precision used internally for the
783
+ calculations. This has been tuned for a typical exponentially
784
+ decaying function and precision up to few hundred decimal
785
+ digits.
786
+
787
+ The Laplace transform converts the variable time (i.e., along
788
+ a line) into a parameter given by the right half of the
789
+ complex `p`-plane. Singularities, poles, and branch cuts in
790
+ the complex `p`-plane contain all the information regarding
791
+ the time behavior of the corresponding function. Any numerical
792
+ method must therefore sample `p`-plane "close enough" to the
793
+ singularities to accurately characterize them, while not
794
+ getting too close to have catastrophic cancellation, overflow,
795
+ or underflow issues. Most significantly, if one or more of the
796
+ singularities in the `p`-plane is not on the left side of the
797
+ Bromwich contour, its effects will be left out of the computed
798
+ solution, and the answer will be completely wrong.
799
+
800
+ *Talbot*
801
+
802
+ The fixed Talbot method is high accuracy and fast, but the
803
+ method can catastrophically fail for certain classes of time-domain
804
+ behavior, including a Heaviside step function for positive
805
+ time (e.g., `H(t-2)`), or some oscillatory behaviors. The
806
+ Talbot method usually has adjustable parameters, but the
807
+ "fixed" variety implemented here does not. This method
808
+ deforms the Bromwich integral contour in the shape of a
809
+ parabola towards `-\infty`, which leads to problems
810
+ when the solution has a decaying exponential in it (e.g., a
811
+ Heaviside step function is equivalent to multiplying by a
812
+ decaying exponential in Laplace space).
813
+
814
+ *Stehfest*
815
+
816
+ The Stehfest algorithm only uses abscissa along the real axis
817
+ of the complex `p`-plane to estimate the time-domain
818
+ function. Oscillatory time-domain functions have poles away
819
+ from the real axis, so this method does not work well with
820
+ oscillatory functions, especially high-frequency ones. This
821
+ method also depends on summation of terms in a series that
822
+ grows very large, and will have catastrophic cancellation
823
+ during summation if the working precision is too low.
824
+
825
+ *de Hoog et al.*
826
+
827
+ The de Hoog, Knight, and Stokes method is essentially a
828
+ Fourier-series quadrature-type approximation to the Bromwich
829
+ contour integral, with non-linear series acceleration and an
830
+ analytical expression for the remainder term. This method is
831
+ typically one of the most robust. This method also involves the
832
+ greatest amount of overhead, so it is typically the slowest of the
833
+ four methods at high precision.
834
+
835
+ *Cohen*
836
+
837
+ The Cohen method is a trapezoidal rule approximation to the Bromwich
838
+ contour integral, with linear acceleration for alternating
839
+ series. This method is as robust as the de Hoog et al method and the
840
+ fastest of the four methods at high precision, and is therefore the
841
+ default method.
842
+
843
+ **Singularities**
844
+
845
+ All numerical inverse Laplace transform methods have problems
846
+ at large time when the Laplace-space function has poles,
847
+ singularities, or branch cuts to the right of the origin in
848
+ the complex plane. For simple poles in `\bar{f}(p)` at the
849
+ `p`-plane origin, the time function is constant in time (e.g.,
850
+ `\mathcal{L}\left\lbrace 1 \right\rbrace=1/p` has a pole at
851
+ `p=0`). A pole in `\bar{f}(p)` to the left of the origin is a
852
+ decreasing function of time (e.g., `\mathcal{L}\left\lbrace
853
+ e^{-t/2} \right\rbrace=1/(p+1/2)` has a pole at `p=-1/2`), and
854
+ a pole to the right of the origin leads to an increasing
855
+ function in time (e.g., `\mathcal{L}\left\lbrace t e^{t/4}
856
+ \right\rbrace = 1/(p-1/4)^2` has a pole at `p=1/4`). When
857
+ singularities occur off the real `p` axis, the time-domain
858
+ function is oscillatory. For example `\mathcal{L}\left\lbrace
859
+ \mathrm{J}_0(t) \right\rbrace=1/\sqrt{p^2+1}` has a branch cut
860
+ starting at `p=j=\sqrt{-1}` and is a decaying oscillatory
861
+ function, This range of behaviors is illustrated in Duffy [3]
862
+ Figure 4.10.4, p. 228.
863
+
864
+ In general as `p \rightarrow \infty` `t \rightarrow 0` and
865
+ vice-versa. All numerical inverse Laplace transform methods
866
+ require their abscissa to shift closer to the origin for
867
+ larger times. If the abscissa shift left of the rightmost
868
+ singularity in the Laplace domain, the answer will be
869
+ completely wrong (the effect of singularities to the right of
870
+ the Bromwich contour are not included in the results).
871
+
872
+ For example, the following exponentially growing function has
873
+ a pole at `p=3`:
874
+
875
+ .. math ::
876
+
877
+ \bar{f}(p)=\frac{1}{p^2-9}
878
+
879
+ .. math ::
880
+
881
+ f(t)=\frac{1}{3}\sinh 3t
882
+
883
+ >>> mp.dps = 15; mp.pretty = True
884
+ >>> fp = lambda p: 1/(p*p-9)
885
+ >>> ft = lambda t: sinh(3*t)/3
886
+ >>> tt = [0.01,0.1,1.0,10.0]
887
+ >>> ft(tt[0]),invertlaplace(fp,tt[0],method='talbot')
888
+ (0.0100015000675014, 0.0100015000675014)
889
+ >>> ft(tt[1]),invertlaplace(fp,tt[1],method='talbot')
890
+ (0.101506764482381, 0.101506764482381)
891
+ >>> ft(tt[2]),invertlaplace(fp,tt[2],method='talbot')
892
+ (3.33929164246997, 3.33929164246997)
893
+ >>> ft(tt[3]),invertlaplace(fp,tt[3],method='talbot')
894
+ (1781079096920.74, -1.61331069624091e-14)
895
+
896
+ **References**
897
+
898
+ 1. [DLMF]_ section 1.14 (http://dlmf.nist.gov/1.14T4)
899
+ 2. Cohen, A.M. (2007). Numerical Methods for Laplace Transform
900
+ Inversion, Springer.
901
+ 3. Duffy, D.G. (1998). Advanced Engineering Mathematics, CRC Press.
902
+
903
+ **Numerical Inverse Laplace Transform Reviews**
904
+
905
+ 1. Bellman, R., R.E. Kalaba, J.A. Lockett (1966). *Numerical
906
+ inversion of the Laplace transform: Applications to Biology,
907
+ Economics, Engineering, and Physics*. Elsevier.
908
+ 2. Davies, B., B. Martin (1979). Numerical inversion of the
909
+ Laplace transform: a survey and comparison of methods. *Journal
910
+ of Computational Physics* 33:1-32,
911
+ http://dx.doi.org/10.1016/0021-9991(79)90025-1
912
+ 3. Duffy, D.G. (1993). On the numerical inversion of Laplace
913
+ transforms: Comparison of three new methods on characteristic
914
+ problems from applications. *ACM Transactions on Mathematical
915
+ Software* 19(3):333-359, http://dx.doi.org/10.1145/155743.155788
916
+ 4. Kuhlman, K.L., (2013). Review of Inverse Laplace Transform
917
+ Algorithms for Laplace-Space Numerical Approaches, *Numerical
918
+ Algorithms*, 63(2):339-355.
919
+ http://dx.doi.org/10.1007/s11075-012-9625-3
920
+
921
+ """
922
+
923
+ rule = kwargs.get('method', 'cohen')
924
+ if type(rule) is str:
925
+ lrule = rule.lower()
926
+ if lrule == 'talbot':
927
+ rule = ctx._fixed_talbot
928
+ elif lrule == 'stehfest':
929
+ rule = ctx._stehfest
930
+ elif lrule == 'dehoog':
931
+ rule = ctx._de_hoog
932
+ elif rule == 'cohen':
933
+ rule = ctx._cohen
934
+ else:
935
+ raise ValueError("unknown invlap algorithm: %s" % rule)
936
+ else:
937
+ rule = rule(ctx)
938
+
939
+ # determine the vector of Laplace-space parameter
940
+ # needed for the requested method and desired time
941
+ rule.calc_laplace_parameter(t, **kwargs)
942
+
943
+ # compute the Laplace-space function evalutations
944
+ # at the required abscissa.
945
+ fp = [f(p) for p in rule.p]
946
+
947
+ # compute the time-domain solution from the
948
+ # Laplace-space function evaluations
949
+ return rule.calc_time_domain_solution(fp, t)
950
+
951
+ # shortcuts for the above function for specific methods
952
+ def invlaptalbot(ctx, *args, **kwargs):
953
+ kwargs['method'] = 'talbot'
954
+ return ctx.invertlaplace(*args, **kwargs)
955
+
956
+ def invlapstehfest(ctx, *args, **kwargs):
957
+ kwargs['method'] = 'stehfest'
958
+ return ctx.invertlaplace(*args, **kwargs)
959
+
960
+ def invlapdehoog(ctx, *args, **kwargs):
961
+ kwargs['method'] = 'dehoog'
962
+ return ctx.invertlaplace(*args, **kwargs)
963
+
964
+ def invlapcohen(ctx, *args, **kwargs):
965
+ kwargs['method'] = 'cohen'
966
+ return ctx.invertlaplace(*args, **kwargs)
967
+
968
+
969
+ # ****************************************
970
+
971
+ if __name__ == '__main__':
972
+ import doctest
973
+ doctest.testmod()
.venv/lib/python3.11/site-packages/mpmath/calculus/polynomials.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ from .calculus import defun
3
+
4
+ #----------------------------------------------------------------------------#
5
+ # Polynomials #
6
+ #----------------------------------------------------------------------------#
7
+
8
+ # XXX: extra precision
9
+ @defun
10
+ def polyval(ctx, coeffs, x, derivative=False):
11
+ r"""
12
+ Given coefficients `[c_n, \ldots, c_2, c_1, c_0]` and a number `x`,
13
+ :func:`~mpmath.polyval` evaluates the polynomial
14
+
15
+ .. math ::
16
+
17
+ P(x) = c_n x^n + \ldots + c_2 x^2 + c_1 x + c_0.
18
+
19
+ If *derivative=True* is set, :func:`~mpmath.polyval` simultaneously
20
+ evaluates `P(x)` with the derivative, `P'(x)`, and returns the
21
+ tuple `(P(x), P'(x))`.
22
+
23
+ >>> from mpmath import *
24
+ >>> mp.pretty = True
25
+ >>> polyval([3, 0, 2], 0.5)
26
+ 2.75
27
+ >>> polyval([3, 0, 2], 0.5, derivative=True)
28
+ (2.75, 3.0)
29
+
30
+ The coefficients and the evaluation point may be any combination
31
+ of real or complex numbers.
32
+ """
33
+ if not coeffs:
34
+ return ctx.zero
35
+ p = ctx.convert(coeffs[0])
36
+ q = ctx.zero
37
+ for c in coeffs[1:]:
38
+ if derivative:
39
+ q = p + x*q
40
+ p = c + x*p
41
+ if derivative:
42
+ return p, q
43
+ else:
44
+ return p
45
+
46
+ @defun
47
+ def polyroots(ctx, coeffs, maxsteps=50, cleanup=True, extraprec=10,
48
+ error=False, roots_init=None):
49
+ """
50
+ Computes all roots (real or complex) of a given polynomial.
51
+
52
+ The roots are returned as a sorted list, where real roots appear first
53
+ followed by complex conjugate roots as adjacent elements. The polynomial
54
+ should be given as a list of coefficients, in the format used by
55
+ :func:`~mpmath.polyval`. The leading coefficient must be nonzero.
56
+
57
+ With *error=True*, :func:`~mpmath.polyroots` returns a tuple *(roots, err)*
58
+ where *err* is an estimate of the maximum error among the computed roots.
59
+
60
+ **Examples**
61
+
62
+ Finding the three real roots of `x^3 - x^2 - 14x + 24`::
63
+
64
+ >>> from mpmath import *
65
+ >>> mp.dps = 15; mp.pretty = True
66
+ >>> nprint(polyroots([1,-1,-14,24]), 4)
67
+ [-4.0, 2.0, 3.0]
68
+
69
+ Finding the two complex conjugate roots of `4x^2 + 3x + 2`, with an
70
+ error estimate::
71
+
72
+ >>> roots, err = polyroots([4,3,2], error=True)
73
+ >>> for r in roots:
74
+ ... print(r)
75
+ ...
76
+ (-0.375 + 0.59947894041409j)
77
+ (-0.375 - 0.59947894041409j)
78
+ >>>
79
+ >>> err
80
+ 2.22044604925031e-16
81
+ >>>
82
+ >>> polyval([4,3,2], roots[0])
83
+ (2.22044604925031e-16 + 0.0j)
84
+ >>> polyval([4,3,2], roots[1])
85
+ (2.22044604925031e-16 + 0.0j)
86
+
87
+ The following example computes all the 5th roots of unity; that is,
88
+ the roots of `x^5 - 1`::
89
+
90
+ >>> mp.dps = 20
91
+ >>> for r in polyroots([1, 0, 0, 0, 0, -1]):
92
+ ... print(r)
93
+ ...
94
+ 1.0
95
+ (-0.8090169943749474241 + 0.58778525229247312917j)
96
+ (-0.8090169943749474241 - 0.58778525229247312917j)
97
+ (0.3090169943749474241 + 0.95105651629515357212j)
98
+ (0.3090169943749474241 - 0.95105651629515357212j)
99
+
100
+ **Precision and conditioning**
101
+
102
+ The roots are computed to the current working precision accuracy. If this
103
+ accuracy cannot be achieved in ``maxsteps`` steps, then a
104
+ ``NoConvergence`` exception is raised. The algorithm internally is using
105
+ the current working precision extended by ``extraprec``. If
106
+ ``NoConvergence`` was raised, that is caused either by not having enough
107
+ extra precision to achieve convergence (in which case increasing
108
+ ``extraprec`` should fix the problem) or too low ``maxsteps`` (in which
109
+ case increasing ``maxsteps`` should fix the problem), or a combination of
110
+ both.
111
+
112
+ The user should always do a convergence study with regards to
113
+ ``extraprec`` to ensure accurate results. It is possible to get
114
+ convergence to a wrong answer with too low ``extraprec``.
115
+
116
+ Provided there are no repeated roots, :func:`~mpmath.polyroots` can
117
+ typically compute all roots of an arbitrary polynomial to high precision::
118
+
119
+ >>> mp.dps = 60
120
+ >>> for r in polyroots([1, 0, -10, 0, 1]):
121
+ ... print(r)
122
+ ...
123
+ -3.14626436994197234232913506571557044551247712918732870123249
124
+ -0.317837245195782244725757617296174288373133378433432554879127
125
+ 0.317837245195782244725757617296174288373133378433432554879127
126
+ 3.14626436994197234232913506571557044551247712918732870123249
127
+ >>>
128
+ >>> sqrt(3) + sqrt(2)
129
+ 3.14626436994197234232913506571557044551247712918732870123249
130
+ >>> sqrt(3) - sqrt(2)
131
+ 0.317837245195782244725757617296174288373133378433432554879127
132
+
133
+ **Algorithm**
134
+
135
+ :func:`~mpmath.polyroots` implements the Durand-Kerner method [1], which
136
+ uses complex arithmetic to locate all roots simultaneously.
137
+ The Durand-Kerner method can be viewed as approximately performing
138
+ simultaneous Newton iteration for all the roots. In particular,
139
+ the convergence to simple roots is quadratic, just like Newton's
140
+ method.
141
+
142
+ Although all roots are internally calculated using complex arithmetic, any
143
+ root found to have an imaginary part smaller than the estimated numerical
144
+ error is truncated to a real number (small real parts are also chopped).
145
+ Real roots are placed first in the returned list, sorted by value. The
146
+ remaining complex roots are sorted by their real parts so that conjugate
147
+ roots end up next to each other.
148
+
149
+ **References**
150
+
151
+ 1. http://en.wikipedia.org/wiki/Durand-Kerner_method
152
+
153
+ """
154
+ if len(coeffs) <= 1:
155
+ if not coeffs or not coeffs[0]:
156
+ raise ValueError("Input to polyroots must not be the zero polynomial")
157
+ # Constant polynomial with no roots
158
+ return []
159
+
160
+ orig = ctx.prec
161
+ tol = +ctx.eps
162
+ with ctx.extraprec(extraprec):
163
+ deg = len(coeffs) - 1
164
+ # Must be monic
165
+ lead = ctx.convert(coeffs[0])
166
+ if lead == 1:
167
+ coeffs = [ctx.convert(c) for c in coeffs]
168
+ else:
169
+ coeffs = [c/lead for c in coeffs]
170
+ f = lambda x: ctx.polyval(coeffs, x)
171
+ if roots_init is None:
172
+ roots = [ctx.mpc((0.4+0.9j)**n) for n in xrange(deg)]
173
+ else:
174
+ roots = [None]*deg;
175
+ deg_init = min(deg, len(roots_init))
176
+ roots[:deg_init] = list(roots_init[:deg_init])
177
+ roots[deg_init:] = [ctx.mpc((0.4+0.9j)**n) for n
178
+ in xrange(deg_init,deg)]
179
+ err = [ctx.one for n in xrange(deg)]
180
+ # Durand-Kerner iteration until convergence
181
+ for step in xrange(maxsteps):
182
+ if abs(max(err)) < tol:
183
+ break
184
+ for i in xrange(deg):
185
+ p = roots[i]
186
+ x = f(p)
187
+ for j in range(deg):
188
+ if i != j:
189
+ try:
190
+ x /= (p-roots[j])
191
+ except ZeroDivisionError:
192
+ continue
193
+ roots[i] = p - x
194
+ err[i] = abs(x)
195
+ if abs(max(err)) >= tol:
196
+ raise ctx.NoConvergence("Didn't converge in maxsteps=%d steps." \
197
+ % maxsteps)
198
+ # Remove small real or imaginary parts
199
+ if cleanup:
200
+ for i in xrange(deg):
201
+ if abs(roots[i]) < tol:
202
+ roots[i] = ctx.zero
203
+ elif abs(ctx._im(roots[i])) < tol:
204
+ roots[i] = roots[i].real
205
+ elif abs(ctx._re(roots[i])) < tol:
206
+ roots[i] = roots[i].imag * 1j
207
+ roots.sort(key=lambda x: (abs(ctx._im(x)), ctx._re(x)))
208
+ if error:
209
+ err = max(err)
210
+ err = max(err, ctx.ldexp(1, -orig+1))
211
+ return [+r for r in roots], +err
212
+ else:
213
+ return [+r for r in roots]
.venv/lib/python3.11/site-packages/mpmath/matrices/__init__.py ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ from . import eigen # to set methods
2
+ from . import eigen_symmetric # to set methods
.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/__init__.cpython-311.pyc ADDED
Binary file (286 Bytes). View file
 
.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/calculus.cpython-311.pyc ADDED
Binary file (22.9 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen.cpython-311.pyc ADDED
Binary file (31 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/eigen_symmetric.cpython-311.pyc ADDED
Binary file (70 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/linalg.cpython-311.pyc ADDED
Binary file (40.1 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/matrices/__pycache__/matrices.cpython-311.pyc ADDED
Binary file (44.7 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/matrices/calculus.py ADDED
@@ -0,0 +1,531 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+
3
+ # TODO: should use diagonalization-based algorithms
4
+
5
+ class MatrixCalculusMethods(object):
6
+
7
+ def _exp_pade(ctx, a):
8
+ """
9
+ Exponential of a matrix using Pade approximants.
10
+
11
+ See G. H. Golub, C. F. van Loan 'Matrix Computations',
12
+ third Ed., page 572
13
+
14
+ TODO:
15
+ - find a good estimate for q
16
+ - reduce the number of matrix multiplications to improve
17
+ performance
18
+ """
19
+ def eps_pade(p):
20
+ return ctx.mpf(2)**(3-2*p) * \
21
+ ctx.factorial(p)**2/(ctx.factorial(2*p)**2 * (2*p + 1))
22
+ q = 4
23
+ extraq = 8
24
+ while 1:
25
+ if eps_pade(q) < ctx.eps:
26
+ break
27
+ q += 1
28
+ q += extraq
29
+ j = int(max(1, ctx.mag(ctx.mnorm(a,'inf'))))
30
+ extra = q
31
+ prec = ctx.prec
32
+ ctx.dps += extra + 3
33
+ try:
34
+ a = a/2**j
35
+ na = a.rows
36
+ den = ctx.eye(na)
37
+ num = ctx.eye(na)
38
+ x = ctx.eye(na)
39
+ c = ctx.mpf(1)
40
+ for k in range(1, q+1):
41
+ c *= ctx.mpf(q - k + 1)/((2*q - k + 1) * k)
42
+ x = a*x
43
+ cx = c*x
44
+ num += cx
45
+ den += (-1)**k * cx
46
+ f = ctx.lu_solve_mat(den, num)
47
+ for k in range(j):
48
+ f = f*f
49
+ finally:
50
+ ctx.prec = prec
51
+ return f*1
52
+
53
+ def expm(ctx, A, method='taylor'):
54
+ r"""
55
+ Computes the matrix exponential of a square matrix `A`, which is defined
56
+ by the power series
57
+
58
+ .. math ::
59
+
60
+ \exp(A) = I + A + \frac{A^2}{2!} + \frac{A^3}{3!} + \ldots
61
+
62
+ With method='taylor', the matrix exponential is computed
63
+ using the Taylor series. With method='pade', Pade approximants
64
+ are used instead.
65
+
66
+ **Examples**
67
+
68
+ Basic examples::
69
+
70
+ >>> from mpmath import *
71
+ >>> mp.dps = 15; mp.pretty = True
72
+ >>> expm(zeros(3))
73
+ [1.0 0.0 0.0]
74
+ [0.0 1.0 0.0]
75
+ [0.0 0.0 1.0]
76
+ >>> expm(eye(3))
77
+ [2.71828182845905 0.0 0.0]
78
+ [ 0.0 2.71828182845905 0.0]
79
+ [ 0.0 0.0 2.71828182845905]
80
+ >>> expm([[1,1,0],[1,0,1],[0,1,0]])
81
+ [ 3.86814500615414 2.26812870852145 0.841130841230196]
82
+ [ 2.26812870852145 2.44114713886289 1.42699786729125]
83
+ [0.841130841230196 1.42699786729125 1.6000162976327]
84
+ >>> expm([[1,1,0],[1,0,1],[0,1,0]], method='pade')
85
+ [ 3.86814500615414 2.26812870852145 0.841130841230196]
86
+ [ 2.26812870852145 2.44114713886289 1.42699786729125]
87
+ [0.841130841230196 1.42699786729125 1.6000162976327]
88
+ >>> expm([[1+j, 0], [1+j,1]])
89
+ [(1.46869393991589 + 2.28735528717884j) 0.0]
90
+ [ (1.03776739863568 + 3.536943175722j) (2.71828182845905 + 0.0j)]
91
+
92
+ Matrices with large entries are allowed::
93
+
94
+ >>> expm(matrix([[1,2],[2,3]])**25)
95
+ [5.65024064048415e+2050488462815550 9.14228140091932e+2050488462815550]
96
+ [9.14228140091932e+2050488462815550 1.47925220414035e+2050488462815551]
97
+
98
+ The identity `\exp(A+B) = \exp(A) \exp(B)` does not hold for
99
+ noncommuting matrices::
100
+
101
+ >>> A = hilbert(3)
102
+ >>> B = A + eye(3)
103
+ >>> chop(mnorm(A*B - B*A))
104
+ 0.0
105
+ >>> chop(mnorm(expm(A+B) - expm(A)*expm(B)))
106
+ 0.0
107
+ >>> B = A + ones(3)
108
+ >>> mnorm(A*B - B*A)
109
+ 1.8
110
+ >>> mnorm(expm(A+B) - expm(A)*expm(B))
111
+ 42.0927851137247
112
+
113
+ """
114
+ if method == 'pade':
115
+ prec = ctx.prec
116
+ try:
117
+ A = ctx.matrix(A)
118
+ ctx.prec += 2*A.rows
119
+ res = ctx._exp_pade(A)
120
+ finally:
121
+ ctx.prec = prec
122
+ return res
123
+ A = ctx.matrix(A)
124
+ prec = ctx.prec
125
+ j = int(max(1, ctx.mag(ctx.mnorm(A,'inf'))))
126
+ j += int(0.5*prec**0.5)
127
+ try:
128
+ ctx.prec += 10 + 2*j
129
+ tol = +ctx.eps
130
+ A = A/2**j
131
+ T = A
132
+ Y = A**0 + A
133
+ k = 2
134
+ while 1:
135
+ T *= A * (1/ctx.mpf(k))
136
+ if ctx.mnorm(T, 'inf') < tol:
137
+ break
138
+ Y += T
139
+ k += 1
140
+ for k in xrange(j):
141
+ Y = Y*Y
142
+ finally:
143
+ ctx.prec = prec
144
+ Y *= 1
145
+ return Y
146
+
147
+ def cosm(ctx, A):
148
+ r"""
149
+ Gives the cosine of a square matrix `A`, defined in analogy
150
+ with the matrix exponential.
151
+
152
+ Examples::
153
+
154
+ >>> from mpmath import *
155
+ >>> mp.dps = 15; mp.pretty = True
156
+ >>> X = eye(3)
157
+ >>> cosm(X)
158
+ [0.54030230586814 0.0 0.0]
159
+ [ 0.0 0.54030230586814 0.0]
160
+ [ 0.0 0.0 0.54030230586814]
161
+ >>> X = hilbert(3)
162
+ >>> cosm(X)
163
+ [ 0.424403834569555 -0.316643413047167 -0.221474945949293]
164
+ [-0.316643413047167 0.820646708837824 -0.127183694770039]
165
+ [-0.221474945949293 -0.127183694770039 0.909236687217541]
166
+ >>> X = matrix([[1+j,-2],[0,-j]])
167
+ >>> cosm(X)
168
+ [(0.833730025131149 - 0.988897705762865j) (1.07485840848393 - 0.17192140544213j)]
169
+ [ 0.0 (1.54308063481524 + 0.0j)]
170
+ """
171
+ B = 0.5 * (ctx.expm(A*ctx.j) + ctx.expm(A*(-ctx.j)))
172
+ if not sum(A.apply(ctx.im).apply(abs)):
173
+ B = B.apply(ctx.re)
174
+ return B
175
+
176
+ def sinm(ctx, A):
177
+ r"""
178
+ Gives the sine of a square matrix `A`, defined in analogy
179
+ with the matrix exponential.
180
+
181
+ Examples::
182
+
183
+ >>> from mpmath import *
184
+ >>> mp.dps = 15; mp.pretty = True
185
+ >>> X = eye(3)
186
+ >>> sinm(X)
187
+ [0.841470984807897 0.0 0.0]
188
+ [ 0.0 0.841470984807897 0.0]
189
+ [ 0.0 0.0 0.841470984807897]
190
+ >>> X = hilbert(3)
191
+ >>> sinm(X)
192
+ [0.711608512150994 0.339783913247439 0.220742837314741]
193
+ [0.339783913247439 0.244113865695532 0.187231271174372]
194
+ [0.220742837314741 0.187231271174372 0.155816730769635]
195
+ >>> X = matrix([[1+j,-2],[0,-j]])
196
+ >>> sinm(X)
197
+ [(1.29845758141598 + 0.634963914784736j) (-1.96751511930922 + 0.314700021761367j)]
198
+ [ 0.0 (0.0 - 1.1752011936438j)]
199
+ """
200
+ B = (-0.5j) * (ctx.expm(A*ctx.j) - ctx.expm(A*(-ctx.j)))
201
+ if not sum(A.apply(ctx.im).apply(abs)):
202
+ B = B.apply(ctx.re)
203
+ return B
204
+
205
+ def _sqrtm_rot(ctx, A, _may_rotate):
206
+ # If the iteration fails to converge, cheat by performing
207
+ # a rotation by a complex number
208
+ u = ctx.j**0.3
209
+ return ctx.sqrtm(u*A, _may_rotate) / ctx.sqrt(u)
210
+
211
+ def sqrtm(ctx, A, _may_rotate=2):
212
+ r"""
213
+ Computes a square root of the square matrix `A`, i.e. returns
214
+ a matrix `B = A^{1/2}` such that `B^2 = A`. The square root
215
+ of a matrix, if it exists, is not unique.
216
+
217
+ **Examples**
218
+
219
+ Square roots of some simple matrices::
220
+
221
+ >>> from mpmath import *
222
+ >>> mp.dps = 15; mp.pretty = True
223
+ >>> sqrtm([[1,0], [0,1]])
224
+ [1.0 0.0]
225
+ [0.0 1.0]
226
+ >>> sqrtm([[0,0], [0,0]])
227
+ [0.0 0.0]
228
+ [0.0 0.0]
229
+ >>> sqrtm([[2,0],[0,1]])
230
+ [1.4142135623731 0.0]
231
+ [ 0.0 1.0]
232
+ >>> sqrtm([[1,1],[1,0]])
233
+ [ (0.920442065259926 - 0.21728689675164j) (0.568864481005783 + 0.351577584254143j)]
234
+ [(0.568864481005783 + 0.351577584254143j) (0.351577584254143 - 0.568864481005783j)]
235
+ >>> sqrtm([[1,0],[0,1]])
236
+ [1.0 0.0]
237
+ [0.0 1.0]
238
+ >>> sqrtm([[-1,0],[0,1]])
239
+ [(0.0 - 1.0j) 0.0]
240
+ [ 0.0 (1.0 + 0.0j)]
241
+ >>> sqrtm([[j,0],[0,j]])
242
+ [(0.707106781186547 + 0.707106781186547j) 0.0]
243
+ [ 0.0 (0.707106781186547 + 0.707106781186547j)]
244
+
245
+ A square root of a rotation matrix, giving the corresponding
246
+ half-angle rotation matrix::
247
+
248
+ >>> t1 = 0.75
249
+ >>> t2 = t1 * 0.5
250
+ >>> A1 = matrix([[cos(t1), -sin(t1)], [sin(t1), cos(t1)]])
251
+ >>> A2 = matrix([[cos(t2), -sin(t2)], [sin(t2), cos(t2)]])
252
+ >>> sqrtm(A1)
253
+ [0.930507621912314 -0.366272529086048]
254
+ [0.366272529086048 0.930507621912314]
255
+ >>> A2
256
+ [0.930507621912314 -0.366272529086048]
257
+ [0.366272529086048 0.930507621912314]
258
+
259
+ The identity `(A^2)^{1/2} = A` does not necessarily hold::
260
+
261
+ >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]])
262
+ >>> sqrtm(A**2)
263
+ [ 4.0 1.0 4.0]
264
+ [ 7.0 8.0 9.0]
265
+ [10.0 2.0 11.0]
266
+ >>> sqrtm(A)**2
267
+ [ 4.0 1.0 4.0]
268
+ [ 7.0 8.0 9.0]
269
+ [10.0 2.0 11.0]
270
+ >>> A = matrix([[-4,1,4],[7,-8,9],[10,2,11]])
271
+ >>> sqrtm(A**2)
272
+ [ 7.43715112194995 -0.324127569985474 1.8481718827526]
273
+ [-0.251549715716942 9.32699765900402 2.48221180985147]
274
+ [ 4.11609388833616 0.775751877098258 13.017955697342]
275
+ >>> chop(sqrtm(A)**2)
276
+ [-4.0 1.0 4.0]
277
+ [ 7.0 -8.0 9.0]
278
+ [10.0 2.0 11.0]
279
+
280
+ For some matrices, a square root does not exist::
281
+
282
+ >>> sqrtm([[0,1], [0,0]])
283
+ Traceback (most recent call last):
284
+ ...
285
+ ZeroDivisionError: matrix is numerically singular
286
+
287
+ Two examples from the documentation for Matlab's ``sqrtm``::
288
+
289
+ >>> mp.dps = 15; mp.pretty = True
290
+ >>> sqrtm([[7,10],[15,22]])
291
+ [1.56669890360128 1.74077655955698]
292
+ [2.61116483933547 4.17786374293675]
293
+ >>>
294
+ >>> X = matrix(\
295
+ ... [[5,-4,1,0,0],
296
+ ... [-4,6,-4,1,0],
297
+ ... [1,-4,6,-4,1],
298
+ ... [0,1,-4,6,-4],
299
+ ... [0,0,1,-4,5]])
300
+ >>> Y = matrix(\
301
+ ... [[2,-1,-0,-0,-0],
302
+ ... [-1,2,-1,0,-0],
303
+ ... [0,-1,2,-1,0],
304
+ ... [-0,0,-1,2,-1],
305
+ ... [-0,-0,-0,-1,2]])
306
+ >>> mnorm(sqrtm(X) - Y)
307
+ 4.53155328326114e-19
308
+
309
+ """
310
+ A = ctx.matrix(A)
311
+ # Trivial
312
+ if A*0 == A:
313
+ return A
314
+ prec = ctx.prec
315
+ if _may_rotate:
316
+ d = ctx.det(A)
317
+ if abs(ctx.im(d)) < 16*ctx.eps and ctx.re(d) < 0:
318
+ return ctx._sqrtm_rot(A, _may_rotate-1)
319
+ try:
320
+ ctx.prec += 10
321
+ tol = ctx.eps * 128
322
+ Y = A
323
+ Z = I = A**0
324
+ k = 0
325
+ # Denman-Beavers iteration
326
+ while 1:
327
+ Yprev = Y
328
+ try:
329
+ Y, Z = 0.5*(Y+ctx.inverse(Z)), 0.5*(Z+ctx.inverse(Y))
330
+ except ZeroDivisionError:
331
+ if _may_rotate:
332
+ Y = ctx._sqrtm_rot(A, _may_rotate-1)
333
+ break
334
+ else:
335
+ raise
336
+ mag1 = ctx.mnorm(Y-Yprev, 'inf')
337
+ mag2 = ctx.mnorm(Y, 'inf')
338
+ if mag1 <= mag2*tol:
339
+ break
340
+ if _may_rotate and k > 6 and not mag1 < mag2 * 0.001:
341
+ return ctx._sqrtm_rot(A, _may_rotate-1)
342
+ k += 1
343
+ if k > ctx.prec:
344
+ raise ctx.NoConvergence
345
+ finally:
346
+ ctx.prec = prec
347
+ Y *= 1
348
+ return Y
349
+
350
+ def logm(ctx, A):
351
+ r"""
352
+ Computes a logarithm of the square matrix `A`, i.e. returns
353
+ a matrix `B = \log(A)` such that `\exp(B) = A`. The logarithm
354
+ of a matrix, if it exists, is not unique.
355
+
356
+ **Examples**
357
+
358
+ Logarithms of some simple matrices::
359
+
360
+ >>> from mpmath import *
361
+ >>> mp.dps = 15; mp.pretty = True
362
+ >>> X = eye(3)
363
+ >>> logm(X)
364
+ [0.0 0.0 0.0]
365
+ [0.0 0.0 0.0]
366
+ [0.0 0.0 0.0]
367
+ >>> logm(2*X)
368
+ [0.693147180559945 0.0 0.0]
369
+ [ 0.0 0.693147180559945 0.0]
370
+ [ 0.0 0.0 0.693147180559945]
371
+ >>> logm(expm(X))
372
+ [1.0 0.0 0.0]
373
+ [0.0 1.0 0.0]
374
+ [0.0 0.0 1.0]
375
+
376
+ A logarithm of a complex matrix::
377
+
378
+ >>> X = matrix([[2+j, 1, 3], [1-j, 1-2*j, 1], [-4, -5, j]])
379
+ >>> B = logm(X)
380
+ >>> nprint(B)
381
+ [ (0.808757 + 0.107759j) (2.20752 + 0.202762j) (1.07376 - 0.773874j)]
382
+ [ (0.905709 - 0.107795j) (0.0287395 - 0.824993j) (0.111619 + 0.514272j)]
383
+ [(-0.930151 + 0.399512j) (-2.06266 - 0.674397j) (0.791552 + 0.519839j)]
384
+ >>> chop(expm(B))
385
+ [(2.0 + 1.0j) 1.0 3.0]
386
+ [(1.0 - 1.0j) (1.0 - 2.0j) 1.0]
387
+ [ -4.0 -5.0 (0.0 + 1.0j)]
388
+
389
+ A matrix `X` close to the identity matrix, for which
390
+ `\log(\exp(X)) = \exp(\log(X)) = X` holds::
391
+
392
+ >>> X = eye(3) + hilbert(3)/4
393
+ >>> X
394
+ [ 1.25 0.125 0.0833333333333333]
395
+ [ 0.125 1.08333333333333 0.0625]
396
+ [0.0833333333333333 0.0625 1.05]
397
+ >>> logm(expm(X))
398
+ [ 1.25 0.125 0.0833333333333333]
399
+ [ 0.125 1.08333333333333 0.0625]
400
+ [0.0833333333333333 0.0625 1.05]
401
+ >>> expm(logm(X))
402
+ [ 1.25 0.125 0.0833333333333333]
403
+ [ 0.125 1.08333333333333 0.0625]
404
+ [0.0833333333333333 0.0625 1.05]
405
+
406
+ A logarithm of a rotation matrix, giving back the angle of
407
+ the rotation::
408
+
409
+ >>> t = 3.7
410
+ >>> A = matrix([[cos(t),sin(t)],[-sin(t),cos(t)]])
411
+ >>> chop(logm(A))
412
+ [ 0.0 -2.58318530717959]
413
+ [2.58318530717959 0.0]
414
+ >>> (2*pi-t)
415
+ 2.58318530717959
416
+
417
+ For some matrices, a logarithm does not exist::
418
+
419
+ >>> logm([[1,0], [0,0]])
420
+ Traceback (most recent call last):
421
+ ...
422
+ ZeroDivisionError: matrix is numerically singular
423
+
424
+ Logarithm of a matrix with large entries::
425
+
426
+ >>> logm(hilbert(3) * 10**20).apply(re)
427
+ [ 45.5597513593433 1.27721006042799 0.317662687717978]
428
+ [ 1.27721006042799 42.5222778973542 2.24003708791604]
429
+ [0.317662687717978 2.24003708791604 42.395212822267]
430
+
431
+ """
432
+ A = ctx.matrix(A)
433
+ prec = ctx.prec
434
+ try:
435
+ ctx.prec += 10
436
+ tol = ctx.eps * 128
437
+ I = A**0
438
+ B = A
439
+ n = 0
440
+ while 1:
441
+ B = ctx.sqrtm(B)
442
+ n += 1
443
+ if ctx.mnorm(B-I, 'inf') < 0.125:
444
+ break
445
+ T = X = B-I
446
+ L = X*0
447
+ k = 1
448
+ while 1:
449
+ if k & 1:
450
+ L += T / k
451
+ else:
452
+ L -= T / k
453
+ T *= X
454
+ if ctx.mnorm(T, 'inf') < tol:
455
+ break
456
+ k += 1
457
+ if k > ctx.prec:
458
+ raise ctx.NoConvergence
459
+ finally:
460
+ ctx.prec = prec
461
+ L *= 2**n
462
+ return L
463
+
464
+ def powm(ctx, A, r):
465
+ r"""
466
+ Computes `A^r = \exp(A \log r)` for a matrix `A` and complex
467
+ number `r`.
468
+
469
+ **Examples**
470
+
471
+ Powers and inverse powers of a matrix::
472
+
473
+ >>> from mpmath import *
474
+ >>> mp.dps = 15; mp.pretty = True
475
+ >>> A = matrix([[4,1,4],[7,8,9],[10,2,11]])
476
+ >>> powm(A, 2)
477
+ [ 63.0 20.0 69.0]
478
+ [174.0 89.0 199.0]
479
+ [164.0 48.0 179.0]
480
+ >>> chop(powm(powm(A, 4), 1/4.))
481
+ [ 4.0 1.0 4.0]
482
+ [ 7.0 8.0 9.0]
483
+ [10.0 2.0 11.0]
484
+ >>> powm(extraprec(20)(powm)(A, -4), -1/4.)
485
+ [ 4.0 1.0 4.0]
486
+ [ 7.0 8.0 9.0]
487
+ [10.0 2.0 11.0]
488
+ >>> chop(powm(powm(A, 1+0.5j), 1/(1+0.5j)))
489
+ [ 4.0 1.0 4.0]
490
+ [ 7.0 8.0 9.0]
491
+ [10.0 2.0 11.0]
492
+ >>> powm(extraprec(5)(powm)(A, -1.5), -1/(1.5))
493
+ [ 4.0 1.0 4.0]
494
+ [ 7.0 8.0 9.0]
495
+ [10.0 2.0 11.0]
496
+
497
+ A Fibonacci-generating matrix::
498
+
499
+ >>> powm([[1,1],[1,0]], 10)
500
+ [89.0 55.0]
501
+ [55.0 34.0]
502
+ >>> fib(10)
503
+ 55.0
504
+ >>> powm([[1,1],[1,0]], 6.5)
505
+ [(16.5166626964253 - 0.0121089837381789j) (10.2078589271083 + 0.0195927472575932j)]
506
+ [(10.2078589271083 + 0.0195927472575932j) (6.30880376931698 - 0.0317017309957721j)]
507
+ >>> (phi**6.5 - (1-phi)**6.5)/sqrt(5)
508
+ (10.2078589271083 - 0.0195927472575932j)
509
+ >>> powm([[1,1],[1,0]], 6.2)
510
+ [ (14.3076953002666 - 0.008222855781077j) (8.81733464837593 + 0.0133048601383712j)]
511
+ [(8.81733464837593 + 0.0133048601383712j) (5.49036065189071 - 0.0215277159194482j)]
512
+ >>> (phi**6.2 - (1-phi)**6.2)/sqrt(5)
513
+ (8.81733464837593 - 0.0133048601383712j)
514
+
515
+ """
516
+ A = ctx.matrix(A)
517
+ r = ctx.convert(r)
518
+ prec = ctx.prec
519
+ try:
520
+ ctx.prec += 10
521
+ if ctx.isint(r):
522
+ v = A ** int(r)
523
+ elif ctx.isint(r*2):
524
+ y = int(r*2)
525
+ v = ctx.sqrtm(A) ** y
526
+ else:
527
+ v = ctx.expm(r*ctx.logm(A))
528
+ finally:
529
+ ctx.prec = prec
530
+ v *= 1
531
+ return v
.venv/lib/python3.11/site-packages/mpmath/matrices/eigen.py ADDED
@@ -0,0 +1,877 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ ##################################################################################################
5
+ # module for the eigenvalue problem
6
+ # Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
7
+ #
8
+ # todo:
9
+ # - implement balancing
10
+ # - agressive early deflation
11
+ #
12
+ ##################################################################################################
13
+
14
+ """
15
+ The eigenvalue problem
16
+ ----------------------
17
+
18
+ This file contains routines for the eigenvalue problem.
19
+
20
+ high level routines:
21
+
22
+ hessenberg : reduction of a real or complex square matrix to upper Hessenberg form
23
+ schur : reduction of a real or complex square matrix to upper Schur form
24
+ eig : eigenvalues and eigenvectors of a real or complex square matrix
25
+
26
+ low level routines:
27
+
28
+ hessenberg_reduce_0 : reduction of a real or complex square matrix to upper Hessenberg form
29
+ hessenberg_reduce_1 : auxiliary routine to hessenberg_reduce_0
30
+ qr_step : a single implicitly shifted QR step for an upper Hessenberg matrix
31
+ hessenberg_qr : Schur decomposition of an upper Hessenberg matrix
32
+ eig_tr_r : right eigenvectors of an upper triangular matrix
33
+ eig_tr_l : left eigenvectors of an upper triangular matrix
34
+ """
35
+
36
+ from ..libmp.backend import xrange
37
+
38
+ class Eigen(object):
39
+ pass
40
+
41
+ def defun(f):
42
+ setattr(Eigen, f.__name__, f)
43
+ return f
44
+
45
+ def hessenberg_reduce_0(ctx, A, T):
46
+ """
47
+ This routine computes the (upper) Hessenberg decomposition of a square matrix A.
48
+ Given A, an unitary matrix Q is calculated such that
49
+
50
+ Q' A Q = H and Q' Q = Q Q' = 1
51
+
52
+ where H is an upper Hessenberg matrix, meaning that it only contains zeros
53
+ below the first subdiagonal. Here ' denotes the hermitian transpose (i.e.
54
+ transposition and conjugation).
55
+
56
+ parameters:
57
+ A (input/output) On input, A contains the square matrix A of
58
+ dimension (n,n). On output, A contains a compressed representation
59
+ of Q and H.
60
+ T (output) An array of length n containing the first elements of
61
+ the Householder reflectors.
62
+ """
63
+
64
+ # internally we work with householder reflections from the right.
65
+ # let u be a row vector (i.e. u[i]=A[i,:i]). then
66
+ # Q is build up by reflectors of the type (1-v'v) where v is a suitable
67
+ # modification of u. these reflectors are applyed to A from the right.
68
+ # because we work with reflectors from the right we have to start with
69
+ # the bottom row of A and work then upwards (this corresponds to
70
+ # some kind of RQ decomposition).
71
+ # the first part of the vectors v (i.e. A[i,:(i-1)]) are stored as row vectors
72
+ # in the lower left part of A (excluding the diagonal and subdiagonal).
73
+ # the last entry of v is stored in T.
74
+ # the upper right part of A (including diagonal and subdiagonal) becomes H.
75
+
76
+
77
+ n = A.rows
78
+ if n <= 2: return
79
+
80
+ for i in xrange(n-1, 1, -1):
81
+
82
+ # scale the vector
83
+
84
+ scale = 0
85
+ for k in xrange(0, i):
86
+ scale += abs(ctx.re(A[i,k])) + abs(ctx.im(A[i,k]))
87
+
88
+ scale_inv = 0
89
+ if scale != 0:
90
+ scale_inv = 1 / scale
91
+
92
+ if scale == 0 or ctx.isinf(scale_inv):
93
+ # sadly there are floating point numbers not equal to zero whose reciprocal is infinity
94
+ T[i] = 0
95
+ A[i,i-1] = 0
96
+ continue
97
+
98
+ # calculate parameters for housholder transformation
99
+
100
+ H = 0
101
+ for k in xrange(0, i):
102
+ A[i,k] *= scale_inv
103
+ rr = ctx.re(A[i,k])
104
+ ii = ctx.im(A[i,k])
105
+ H += rr * rr + ii * ii
106
+
107
+ F = A[i,i-1]
108
+ f = abs(F)
109
+ G = ctx.sqrt(H)
110
+ A[i,i-1] = - G * scale
111
+
112
+ if f == 0:
113
+ T[i] = G
114
+ else:
115
+ ff = F / f
116
+ T[i] = F + G * ff
117
+ A[i,i-1] *= ff
118
+
119
+ H += G * f
120
+ H = 1 / ctx.sqrt(H)
121
+
122
+ T[i] *= H
123
+ for k in xrange(0, i - 1):
124
+ A[i,k] *= H
125
+
126
+ for j in xrange(0, i):
127
+ # apply housholder transformation (from right)
128
+
129
+ G = ctx.conj(T[i]) * A[j,i-1]
130
+ for k in xrange(0, i-1):
131
+ G += ctx.conj(A[i,k]) * A[j,k]
132
+
133
+ A[j,i-1] -= G * T[i]
134
+ for k in xrange(0, i-1):
135
+ A[j,k] -= G * A[i,k]
136
+
137
+ for j in xrange(0, n):
138
+ # apply housholder transformation (from left)
139
+
140
+ G = T[i] * A[i-1,j]
141
+ for k in xrange(0, i-1):
142
+ G += A[i,k] * A[k,j]
143
+
144
+ A[i-1,j] -= G * ctx.conj(T[i])
145
+ for k in xrange(0, i-1):
146
+ A[k,j] -= G * ctx.conj(A[i,k])
147
+
148
+
149
+
150
+ def hessenberg_reduce_1(ctx, A, T):
151
+ """
152
+ This routine forms the unitary matrix Q described in hessenberg_reduce_0.
153
+
154
+ parameters:
155
+ A (input/output) On input, A is the same matrix as delivered by
156
+ hessenberg_reduce_0. On output, A is set to Q.
157
+
158
+ T (input) On input, T is the same array as delivered by hessenberg_reduce_0.
159
+ """
160
+
161
+ n = A.rows
162
+
163
+ if n == 1:
164
+ A[0,0] = 1
165
+ return
166
+
167
+ A[0,0] = A[1,1] = 1
168
+ A[0,1] = A[1,0] = 0
169
+
170
+ for i in xrange(2, n):
171
+ if T[i] != 0:
172
+
173
+ for j in xrange(0, i):
174
+ G = T[i] * A[i-1,j]
175
+ for k in xrange(0, i-1):
176
+ G += A[i,k] * A[k,j]
177
+
178
+ A[i-1,j] -= G * ctx.conj(T[i])
179
+ for k in xrange(0, i-1):
180
+ A[k,j] -= G * ctx.conj(A[i,k])
181
+
182
+ A[i,i] = 1
183
+ for j in xrange(0, i):
184
+ A[j,i] = A[i,j] = 0
185
+
186
+
187
+
188
+ @defun
189
+ def hessenberg(ctx, A, overwrite_a = False):
190
+ """
191
+ This routine computes the Hessenberg decomposition of a square matrix A.
192
+ Given A, an unitary matrix Q is determined such that
193
+
194
+ Q' A Q = H and Q' Q = Q Q' = 1
195
+
196
+ where H is an upper right Hessenberg matrix. Here ' denotes the hermitian
197
+ transpose (i.e. transposition and conjugation).
198
+
199
+ input:
200
+ A : a real or complex square matrix
201
+ overwrite_a : if true, allows modification of A which may improve
202
+ performance. if false, A is not modified.
203
+
204
+ output:
205
+ Q : an unitary matrix
206
+ H : an upper right Hessenberg matrix
207
+
208
+ example:
209
+ >>> from mpmath import mp
210
+ >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
211
+ >>> Q, H = mp.hessenberg(A)
212
+ >>> mp.nprint(H, 3) # doctest:+SKIP
213
+ [ 3.15 2.23 4.44]
214
+ [-0.769 4.85 3.05]
215
+ [ 0.0 3.61 7.0]
216
+ >>> print(mp.chop(A - Q * H * Q.transpose_conj()))
217
+ [0.0 0.0 0.0]
218
+ [0.0 0.0 0.0]
219
+ [0.0 0.0 0.0]
220
+
221
+ return value: (Q, H)
222
+ """
223
+
224
+ n = A.rows
225
+
226
+ if n == 1:
227
+ return (ctx.matrix([[1]]), A)
228
+
229
+ if not overwrite_a:
230
+ A = A.copy()
231
+
232
+ T = ctx.matrix(n, 1)
233
+
234
+ hessenberg_reduce_0(ctx, A, T)
235
+ Q = A.copy()
236
+ hessenberg_reduce_1(ctx, Q, T)
237
+
238
+ for x in xrange(n):
239
+ for y in xrange(x+2, n):
240
+ A[y,x] = 0
241
+
242
+ return Q, A
243
+
244
+
245
+ ###########################################################################
246
+
247
+
248
+ def qr_step(ctx, n0, n1, A, Q, shift):
249
+ """
250
+ This subroutine executes a single implicitly shifted QR step applied to an
251
+ upper Hessenberg matrix A. Given A and shift as input, first an QR
252
+ decomposition is calculated:
253
+
254
+ Q R = A - shift * 1 .
255
+
256
+ The output is then following matrix:
257
+
258
+ R Q + shift * 1
259
+
260
+ parameters:
261
+ n0, n1 (input) Two integers which specify the submatrix A[n0:n1,n0:n1]
262
+ on which this subroutine operators. The subdiagonal elements
263
+ to the left and below this submatrix must be deflated (i.e. zero).
264
+ following restriction is imposed: n1>=n0+2
265
+ A (input/output) On input, A is an upper Hessenberg matrix.
266
+ On output, A is replaced by "R Q + shift * 1"
267
+ Q (input/output) The parameter Q is multiplied by the unitary matrix
268
+ Q arising from the QR decomposition. Q can also be false, in which
269
+ case the unitary matrix Q is not computated.
270
+ shift (input) a complex number specifying the shift. idealy close to an
271
+ eigenvalue of the bottemmost part of the submatrix A[n0:n1,n0:n1].
272
+
273
+ references:
274
+ Stoer, Bulirsch - Introduction to Numerical Analysis.
275
+ Kresser : Numerical Methods for General and Structured Eigenvalue Problems
276
+ """
277
+
278
+ # implicitly shifted and bulge chasing is explained at p.398/399 in "Stoer, Bulirsch - Introduction to Numerical Analysis"
279
+ # for bulge chasing see also "Watkins - The Matrix Eigenvalue Problem" sec.4.5,p.173
280
+
281
+ # the Givens rotation we used is determined as follows: let c,s be two complex
282
+ # numbers. then we have following relation:
283
+ #
284
+ # v = sqrt(|c|^2 + |s|^2)
285
+ #
286
+ # 1/v [ c~ s~] [c] = [v]
287
+ # [-s c ] [s] [0]
288
+ #
289
+ # the matrix on the left is our Givens rotation.
290
+
291
+ n = A.rows
292
+
293
+ # first step
294
+
295
+ # calculate givens rotation
296
+ c = A[n0 ,n0] - shift
297
+ s = A[n0+1,n0]
298
+
299
+ v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s)))
300
+
301
+ if v == 0:
302
+ v = 1
303
+ c = 1
304
+ s = 0
305
+ else:
306
+ c /= v
307
+ s /= v
308
+
309
+ cc = ctx.conj(c)
310
+ cs = ctx.conj(s)
311
+
312
+ for k in xrange(n0, n):
313
+ # apply givens rotation from the left
314
+ x = A[n0 ,k]
315
+ y = A[n0+1,k]
316
+ A[n0 ,k] = cc * x + cs * y
317
+ A[n0+1,k] = c * y - s * x
318
+
319
+ for k in xrange(min(n1, n0+3)):
320
+ # apply givens rotation from the right
321
+ x = A[k,n0 ]
322
+ y = A[k,n0+1]
323
+ A[k,n0 ] = c * x + s * y
324
+ A[k,n0+1] = cc * y - cs * x
325
+
326
+ if not isinstance(Q, bool):
327
+ for k in xrange(n):
328
+ # eigenvectors
329
+ x = Q[k,n0 ]
330
+ y = Q[k,n0+1]
331
+ Q[k,n0 ] = c * x + s * y
332
+ Q[k,n0+1] = cc * y - cs * x
333
+
334
+ # chase the bulge
335
+
336
+ for j in xrange(n0, n1 - 2):
337
+ # calculate givens rotation
338
+
339
+ c = A[j+1,j]
340
+ s = A[j+2,j]
341
+
342
+ v = ctx.hypot(ctx.hypot(ctx.re(c), ctx.im(c)), ctx.hypot(ctx.re(s), ctx.im(s)))
343
+
344
+ if v == 0:
345
+ A[j+1,j] = 0
346
+ v = 1
347
+ c = 1
348
+ s = 0
349
+ else:
350
+ A[j+1,j] = v
351
+ c /= v
352
+ s /= v
353
+
354
+ A[j+2,j] = 0
355
+
356
+ cc = ctx.conj(c)
357
+ cs = ctx.conj(s)
358
+
359
+ for k in xrange(j+1, n):
360
+ # apply givens rotation from the left
361
+ x = A[j+1,k]
362
+ y = A[j+2,k]
363
+ A[j+1,k] = cc * x + cs * y
364
+ A[j+2,k] = c * y - s * x
365
+
366
+ for k in xrange(0, min(n1, j+4)):
367
+ # apply givens rotation from the right
368
+ x = A[k,j+1]
369
+ y = A[k,j+2]
370
+ A[k,j+1] = c * x + s * y
371
+ A[k,j+2] = cc * y - cs * x
372
+
373
+ if not isinstance(Q, bool):
374
+ for k in xrange(0, n):
375
+ # eigenvectors
376
+ x = Q[k,j+1]
377
+ y = Q[k,j+2]
378
+ Q[k,j+1] = c * x + s * y
379
+ Q[k,j+2] = cc * y - cs * x
380
+
381
+
382
+
383
+ def hessenberg_qr(ctx, A, Q):
384
+ """
385
+ This routine computes the Schur decomposition of an upper Hessenberg matrix A.
386
+ Given A, an unitary matrix Q is determined such that
387
+
388
+ Q' A Q = R and Q' Q = Q Q' = 1
389
+
390
+ where R is an upper right triangular matrix. Here ' denotes the hermitian
391
+ transpose (i.e. transposition and conjugation).
392
+
393
+ parameters:
394
+ A (input/output) On input, A contains an upper Hessenberg matrix.
395
+ On output, A is replace by the upper right triangluar matrix R.
396
+
397
+ Q (input/output) The parameter Q is multiplied by the unitary
398
+ matrix Q arising from the Schur decomposition. Q can also be
399
+ false, in which case the unitary matrix Q is not computated.
400
+ """
401
+
402
+ n = A.rows
403
+
404
+ norm = 0
405
+ for x in xrange(n):
406
+ for y in xrange(min(x+2, n)):
407
+ norm += ctx.re(A[y,x]) ** 2 + ctx.im(A[y,x]) ** 2
408
+ norm = ctx.sqrt(norm) / n
409
+
410
+ if norm == 0:
411
+ return
412
+
413
+ n0 = 0
414
+ n1 = n
415
+
416
+ eps = ctx.eps / (100 * n)
417
+ maxits = ctx.dps * 4
418
+
419
+ its = totalits = 0
420
+
421
+ while 1:
422
+ # kressner p.32 algo 3
423
+ # the active submatrix is A[n0:n1,n0:n1]
424
+
425
+ k = n0
426
+
427
+ while k + 1 < n1:
428
+ s = abs(ctx.re(A[k,k])) + abs(ctx.im(A[k,k])) + abs(ctx.re(A[k+1,k+1])) + abs(ctx.im(A[k+1,k+1]))
429
+ if s < eps * norm:
430
+ s = norm
431
+ if abs(A[k+1,k]) < eps * s:
432
+ break
433
+ k += 1
434
+
435
+ if k + 1 < n1:
436
+ # deflation found at position (k+1, k)
437
+
438
+ A[k+1,k] = 0
439
+ n0 = k + 1
440
+
441
+ its = 0
442
+
443
+ if n0 + 1 >= n1:
444
+ # block of size at most two has converged
445
+ n0 = 0
446
+ n1 = k + 1
447
+ if n1 < 2:
448
+ # QR algorithm has converged
449
+ return
450
+ else:
451
+ if (its % 30) == 10:
452
+ # exceptional shift
453
+ shift = A[n1-1,n1-2]
454
+ elif (its % 30) == 20:
455
+ # exceptional shift
456
+ shift = abs(A[n1-1,n1-2])
457
+ elif (its % 30) == 29:
458
+ # exceptional shift
459
+ shift = norm
460
+ else:
461
+ # A = [ a b ] det(x-A)=x*x-x*tr(A)+det(A)
462
+ # [ c d ]
463
+ #
464
+ # eigenvalues bad: (tr(A)+sqrt((tr(A))**2-4*det(A)))/2
465
+ # bad because of cancellation if |c| is small and |a-d| is small, too.
466
+ #
467
+ # eigenvalues good: (a+d+sqrt((a-d)**2+4*b*c))/2
468
+
469
+ t = A[n1-2,n1-2] + A[n1-1,n1-1]
470
+ s = (A[n1-1,n1-1] - A[n1-2,n1-2]) ** 2 + 4 * A[n1-1,n1-2] * A[n1-2,n1-1]
471
+ if ctx.re(s) > 0:
472
+ s = ctx.sqrt(s)
473
+ else:
474
+ s = ctx.sqrt(-s) * 1j
475
+ a = (t + s) / 2
476
+ b = (t - s) / 2
477
+ if abs(A[n1-1,n1-1] - a) > abs(A[n1-1,n1-1] - b):
478
+ shift = b
479
+ else:
480
+ shift = a
481
+
482
+ its += 1
483
+ totalits += 1
484
+
485
+ qr_step(ctx, n0, n1, A, Q, shift)
486
+
487
+ if its > maxits:
488
+ raise RuntimeError("qr: failed to converge after %d steps" % its)
489
+
490
+
491
+ @defun
492
+ def schur(ctx, A, overwrite_a = False):
493
+ """
494
+ This routine computes the Schur decomposition of a square matrix A.
495
+ Given A, an unitary matrix Q is determined such that
496
+
497
+ Q' A Q = R and Q' Q = Q Q' = 1
498
+
499
+ where R is an upper right triangular matrix. Here ' denotes the
500
+ hermitian transpose (i.e. transposition and conjugation).
501
+
502
+ input:
503
+ A : a real or complex square matrix
504
+ overwrite_a : if true, allows modification of A which may improve
505
+ performance. if false, A is not modified.
506
+
507
+ output:
508
+ Q : an unitary matrix
509
+ R : an upper right triangular matrix
510
+
511
+ return value: (Q, R)
512
+
513
+ example:
514
+ >>> from mpmath import mp
515
+ >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
516
+ >>> Q, R = mp.schur(A)
517
+ >>> mp.nprint(R, 3) # doctest:+SKIP
518
+ [2.0 0.417 -2.53]
519
+ [0.0 4.0 -4.74]
520
+ [0.0 0.0 9.0]
521
+ >>> print(mp.chop(A - Q * R * Q.transpose_conj()))
522
+ [0.0 0.0 0.0]
523
+ [0.0 0.0 0.0]
524
+ [0.0 0.0 0.0]
525
+
526
+ warning: The Schur decomposition is not unique.
527
+ """
528
+
529
+ n = A.rows
530
+
531
+ if n == 1:
532
+ return (ctx.matrix([[1]]), A)
533
+
534
+ if not overwrite_a:
535
+ A = A.copy()
536
+
537
+ T = ctx.matrix(n, 1)
538
+
539
+ hessenberg_reduce_0(ctx, A, T)
540
+ Q = A.copy()
541
+ hessenberg_reduce_1(ctx, Q, T)
542
+
543
+ for x in xrange(n):
544
+ for y in xrange(x + 2, n):
545
+ A[y,x] = 0
546
+
547
+ hessenberg_qr(ctx, A, Q)
548
+
549
+ return Q, A
550
+
551
+
552
+ def eig_tr_r(ctx, A):
553
+ """
554
+ This routine calculates the right eigenvectors of an upper right triangular matrix.
555
+
556
+ input:
557
+ A an upper right triangular matrix
558
+
559
+ output:
560
+ ER a matrix whose columns form the right eigenvectors of A
561
+
562
+ return value: ER
563
+ """
564
+
565
+ # this subroutine is inspired by the lapack routines ctrevc.f,clatrs.f
566
+
567
+ n = A.rows
568
+
569
+ ER = ctx.eye(n)
570
+
571
+ eps = ctx.eps
572
+
573
+ unfl = ctx.ldexp(ctx.one, -ctx.prec * 30)
574
+ # since mpmath effectively has no limits on the exponent, we simply scale doubles up
575
+ # original double has prec*20
576
+
577
+ smlnum = unfl * (n / eps)
578
+ simin = 1 / ctx.sqrt(eps)
579
+
580
+ rmax = 1
581
+
582
+ for i in xrange(1, n):
583
+ s = A[i,i]
584
+
585
+ smin = max(eps * abs(s), smlnum)
586
+
587
+ for j in xrange(i - 1, -1, -1):
588
+
589
+ r = 0
590
+ for k in xrange(j + 1, i + 1):
591
+ r += A[j,k] * ER[k,i]
592
+
593
+ t = A[j,j] - s
594
+ if abs(t) < smin:
595
+ t = smin
596
+
597
+ r = -r / t
598
+ ER[j,i] = r
599
+
600
+ rmax = max(rmax, abs(r))
601
+ if rmax > simin:
602
+ for k in xrange(j, i+1):
603
+ ER[k,i] /= rmax
604
+ rmax = 1
605
+
606
+ if rmax != 1:
607
+ for k in xrange(0, i + 1):
608
+ ER[k,i] /= rmax
609
+
610
+ return ER
611
+
612
+ def eig_tr_l(ctx, A):
613
+ """
614
+ This routine calculates the left eigenvectors of an upper right triangular matrix.
615
+
616
+ input:
617
+ A an upper right triangular matrix
618
+
619
+ output:
620
+ EL a matrix whose rows form the left eigenvectors of A
621
+
622
+ return value: EL
623
+ """
624
+
625
+ n = A.rows
626
+
627
+ EL = ctx.eye(n)
628
+
629
+ eps = ctx.eps
630
+
631
+ unfl = ctx.ldexp(ctx.one, -ctx.prec * 30)
632
+ # since mpmath effectively has no limits on the exponent, we simply scale doubles up
633
+ # original double has prec*20
634
+
635
+ smlnum = unfl * (n / eps)
636
+ simin = 1 / ctx.sqrt(eps)
637
+
638
+ rmax = 1
639
+
640
+ for i in xrange(0, n - 1):
641
+ s = A[i,i]
642
+
643
+ smin = max(eps * abs(s), smlnum)
644
+
645
+ for j in xrange(i + 1, n):
646
+
647
+ r = 0
648
+ for k in xrange(i, j):
649
+ r += EL[i,k] * A[k,j]
650
+
651
+ t = A[j,j] - s
652
+ if abs(t) < smin:
653
+ t = smin
654
+
655
+ r = -r / t
656
+ EL[i,j] = r
657
+
658
+ rmax = max(rmax, abs(r))
659
+ if rmax > simin:
660
+ for k in xrange(i, j + 1):
661
+ EL[i,k] /= rmax
662
+ rmax = 1
663
+
664
+ if rmax != 1:
665
+ for k in xrange(i, n):
666
+ EL[i,k] /= rmax
667
+
668
+ return EL
669
+
670
+ @defun
671
+ def eig(ctx, A, left = False, right = True, overwrite_a = False):
672
+ """
673
+ This routine computes the eigenvalues and optionally the left and right
674
+ eigenvectors of a square matrix A. Given A, a vector E and matrices ER
675
+ and EL are calculated such that
676
+
677
+ A ER[:,i] = E[i] ER[:,i]
678
+ EL[i,:] A = EL[i,:] E[i]
679
+
680
+ E contains the eigenvalues of A. The columns of ER contain the right eigenvectors
681
+ of A whereas the rows of EL contain the left eigenvectors.
682
+
683
+
684
+ input:
685
+ A : a real or complex square matrix of shape (n, n)
686
+ left : if true, the left eigenvectors are calculated.
687
+ right : if true, the right eigenvectors are calculated.
688
+ overwrite_a : if true, allows modification of A which may improve
689
+ performance. if false, A is not modified.
690
+
691
+ output:
692
+ E : a list of length n containing the eigenvalues of A.
693
+ ER : a matrix whose columns contain the right eigenvectors of A.
694
+ EL : a matrix whose rows contain the left eigenvectors of A.
695
+
696
+ return values:
697
+ E if left and right are both false.
698
+ (E, ER) if right is true and left is false.
699
+ (E, EL) if left is true and right is false.
700
+ (E, EL, ER) if left and right are true.
701
+
702
+
703
+ examples:
704
+ >>> from mpmath import mp
705
+ >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
706
+ >>> E, ER = mp.eig(A)
707
+ >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0]))
708
+ [0.0]
709
+ [0.0]
710
+ [0.0]
711
+
712
+ >>> E, EL, ER = mp.eig(A,left = True, right = True)
713
+ >>> E, EL, ER = mp.eig_sort(E, EL, ER)
714
+ >>> mp.nprint(E)
715
+ [2.0, 4.0, 9.0]
716
+ >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0]))
717
+ [0.0]
718
+ [0.0]
719
+ [0.0]
720
+ >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0]))
721
+ [0.0 0.0 0.0]
722
+
723
+ warning:
724
+ - If there are multiple eigenvalues, the eigenvectors do not necessarily
725
+ span the whole vectorspace, i.e. ER and EL may have not full rank.
726
+ Furthermore in that case the eigenvectors are numerical ill-conditioned.
727
+ - In the general case the eigenvalues have no natural order.
728
+
729
+ see also:
730
+ - eigh (or eigsy, eighe) for the symmetric eigenvalue problem.
731
+ - eig_sort for sorting of eigenvalues and eigenvectors
732
+ """
733
+
734
+ n = A.rows
735
+
736
+ if n == 1:
737
+ if left and (not right):
738
+ return ([A[0]], ctx.matrix([[1]]))
739
+
740
+ if right and (not left):
741
+ return ([A[0]], ctx.matrix([[1]]))
742
+
743
+ return ([A[0]], ctx.matrix([[1]]), ctx.matrix([[1]]))
744
+
745
+ if not overwrite_a:
746
+ A = A.copy()
747
+
748
+ T = ctx.zeros(n, 1)
749
+
750
+ hessenberg_reduce_0(ctx, A, T)
751
+
752
+ if left or right:
753
+ Q = A.copy()
754
+ hessenberg_reduce_1(ctx, Q, T)
755
+ else:
756
+ Q = False
757
+
758
+ for x in xrange(n):
759
+ for y in xrange(x + 2, n):
760
+ A[y,x] = 0
761
+
762
+ hessenberg_qr(ctx, A, Q)
763
+
764
+ E = [0 for i in xrange(n)]
765
+ for i in xrange(n):
766
+ E[i] = A[i,i]
767
+
768
+ if not (left or right):
769
+ return E
770
+
771
+ if left:
772
+ EL = eig_tr_l(ctx, A)
773
+ EL = EL * Q.transpose_conj()
774
+
775
+ if right:
776
+ ER = eig_tr_r(ctx, A)
777
+ ER = Q * ER
778
+
779
+ if left and (not right):
780
+ return (E, EL)
781
+
782
+ if right and (not left):
783
+ return (E, ER)
784
+
785
+ return (E, EL, ER)
786
+
787
+ @defun
788
+ def eig_sort(ctx, E, EL = False, ER = False, f = "real"):
789
+ """
790
+ This routine sorts the eigenvalues and eigenvectors delivered by ``eig``.
791
+
792
+ parameters:
793
+ E : the eigenvalues as delivered by eig
794
+ EL : the left eigenvectors as delivered by eig, or false
795
+ ER : the right eigenvectors as delivered by eig, or false
796
+ f : either a string ("real" sort by increasing real part, "imag" sort by
797
+ increasing imag part, "abs" sort by absolute value) or a function
798
+ mapping complexs to the reals, i.e. ``f = lambda x: -mp.re(x) ``
799
+ would sort the eigenvalues by decreasing real part.
800
+
801
+ return values:
802
+ E if EL and ER are both false.
803
+ (E, ER) if ER is not false and left is false.
804
+ (E, EL) if EL is not false and right is false.
805
+ (E, EL, ER) if EL and ER are not false.
806
+
807
+ example:
808
+ >>> from mpmath import mp
809
+ >>> A = mp.matrix([[3, -1, 2], [2, 5, -5], [-2, -3, 7]])
810
+ >>> E, EL, ER = mp.eig(A,left = True, right = True)
811
+ >>> E, EL, ER = mp.eig_sort(E, EL, ER)
812
+ >>> mp.nprint(E)
813
+ [2.0, 4.0, 9.0]
814
+ >>> E, EL, ER = mp.eig_sort(E, EL, ER,f = lambda x: -mp.re(x))
815
+ >>> mp.nprint(E)
816
+ [9.0, 4.0, 2.0]
817
+ >>> print(mp.chop(A * ER[:,0] - E[0] * ER[:,0]))
818
+ [0.0]
819
+ [0.0]
820
+ [0.0]
821
+ >>> print(mp.chop( EL[0,:] * A - EL[0,:] * E[0]))
822
+ [0.0 0.0 0.0]
823
+ """
824
+
825
+ if isinstance(f, str):
826
+ if f == "real":
827
+ f = ctx.re
828
+ elif f == "imag":
829
+ f = ctx.im
830
+ elif f == "abs":
831
+ f = abs
832
+ else:
833
+ raise RuntimeError("unknown function %s" % f)
834
+
835
+ n = len(E)
836
+
837
+ # Sort eigenvalues (bubble-sort)
838
+
839
+ for i in xrange(n):
840
+ imax = i
841
+ s = f(E[i]) # s is the current maximal element
842
+
843
+ for j in xrange(i + 1, n):
844
+ c = f(E[j])
845
+ if c < s:
846
+ s = c
847
+ imax = j
848
+
849
+ if imax != i:
850
+ # swap eigenvalues
851
+
852
+ z = E[i]
853
+ E[i] = E[imax]
854
+ E[imax] = z
855
+
856
+ if not isinstance(EL, bool):
857
+ for j in xrange(n):
858
+ z = EL[i,j]
859
+ EL[i,j] = EL[imax,j]
860
+ EL[imax,j] = z
861
+
862
+ if not isinstance(ER, bool):
863
+ for j in xrange(n):
864
+ z = ER[j,i]
865
+ ER[j,i] = ER[j,imax]
866
+ ER[j,imax] = z
867
+
868
+ if isinstance(EL, bool) and isinstance(ER, bool):
869
+ return E
870
+
871
+ if isinstance(EL, bool) and not(isinstance(ER, bool)):
872
+ return (E, ER)
873
+
874
+ if isinstance(ER, bool) and not(isinstance(EL, bool)):
875
+ return (E, EL)
876
+
877
+ return (E, EL, ER)
.venv/lib/python3.11/site-packages/mpmath/matrices/eigen_symmetric.py ADDED
@@ -0,0 +1,1807 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/python
2
+ # -*- coding: utf-8 -*-
3
+
4
+ ##################################################################################################
5
+ # module for the symmetric eigenvalue problem
6
+ # Copyright 2013 Timo Hartmann (thartmann15 at gmail.com)
7
+ #
8
+ # todo:
9
+ # - implement balancing
10
+ #
11
+ ##################################################################################################
12
+
13
+ """
14
+ The symmetric eigenvalue problem.
15
+ ---------------------------------
16
+
17
+ This file contains routines for the symmetric eigenvalue problem.
18
+
19
+ high level routines:
20
+
21
+ eigsy : real symmetric (ordinary) eigenvalue problem
22
+ eighe : complex hermitian (ordinary) eigenvalue problem
23
+ eigh : unified interface for eigsy and eighe
24
+ svd_r : singular value decomposition for real matrices
25
+ svd_c : singular value decomposition for complex matrices
26
+ svd : unified interface for svd_r and svd_c
27
+
28
+
29
+ low level routines:
30
+
31
+ r_sy_tridiag : reduction of real symmetric matrix to real symmetric tridiagonal matrix
32
+ c_he_tridiag_0 : reduction of complex hermitian matrix to real symmetric tridiagonal matrix
33
+ c_he_tridiag_1 : auxiliary routine to c_he_tridiag_0
34
+ c_he_tridiag_2 : auxiliary routine to c_he_tridiag_0
35
+ tridiag_eigen : solves the real symmetric tridiagonal matrix eigenvalue problem
36
+ svd_r_raw : raw singular value decomposition for real matrices
37
+ svd_c_raw : raw singular value decomposition for complex matrices
38
+ """
39
+
40
+ from ..libmp.backend import xrange
41
+ from .eigen import defun
42
+
43
+
44
+ def r_sy_tridiag(ctx, A, D, E, calc_ev = True):
45
+ """
46
+ This routine transforms a real symmetric matrix A to a real symmetric
47
+ tridiagonal matrix T using an orthogonal similarity transformation:
48
+ Q' * A * Q = T (here ' denotes the matrix transpose).
49
+ The orthogonal matrix Q is build up from Householder reflectors.
50
+
51
+ parameters:
52
+ A (input/output) On input, A contains the real symmetric matrix of
53
+ dimension (n,n). On output, if calc_ev is true, A contains the
54
+ orthogonal matrix Q, otherwise A is destroyed.
55
+
56
+ D (output) real array of length n, contains the diagonal elements
57
+ of the tridiagonal matrix
58
+
59
+ E (output) real array of length n, contains the offdiagonal elements
60
+ of the tridiagonal matrix in E[0:(n-1)] where is the dimension of
61
+ the matrix A. E[n-1] is undefined.
62
+
63
+ calc_ev (input) If calc_ev is true, this routine explicitly calculates the
64
+ orthogonal matrix Q which is then returned in A. If calc_ev is
65
+ false, Q is not explicitly calculated resulting in a shorter run time.
66
+
67
+ This routine is a python translation of the fortran routine tred2.f in the
68
+ software library EISPACK (see netlib.org) which itself is based on the algol
69
+ procedure tred2 described in:
70
+ - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson
71
+ - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971)
72
+
73
+ For a good introduction to Householder reflections, see also
74
+ Stoer, Bulirsch - Introduction to Numerical Analysis.
75
+ """
76
+
77
+ # note : the vector v of the i-th houshoulder reflector is stored in a[(i+1):,i]
78
+ # whereas v/<v,v> is stored in a[i,(i+1):]
79
+
80
+ n = A.rows
81
+ for i in xrange(n - 1, 0, -1):
82
+ # scale the vector
83
+
84
+ scale = 0
85
+ for k in xrange(0, i):
86
+ scale += abs(A[k,i])
87
+
88
+ scale_inv = 0
89
+ if scale != 0:
90
+ scale_inv = 1/scale
91
+
92
+ # sadly there are floating point numbers not equal to zero whose reciprocal is infinity
93
+
94
+ if i == 1 or scale == 0 or ctx.isinf(scale_inv):
95
+ E[i] = A[i-1,i] # nothing to do
96
+ D[i] = 0
97
+ continue
98
+
99
+ # calculate parameters for housholder transformation
100
+
101
+ H = 0
102
+ for k in xrange(0, i):
103
+ A[k,i] *= scale_inv
104
+ H += A[k,i] * A[k,i]
105
+
106
+ F = A[i-1,i]
107
+ G = ctx.sqrt(H)
108
+ if F > 0:
109
+ G = -G
110
+ E[i] = scale * G
111
+ H -= F * G
112
+ A[i-1,i] = F - G
113
+ F = 0
114
+
115
+ # apply housholder transformation
116
+
117
+ for j in xrange(0, i):
118
+ if calc_ev:
119
+ A[i,j] = A[j,i] / H
120
+
121
+ G = 0 # calculate A*U
122
+ for k in xrange(0, j + 1):
123
+ G += A[k,j] * A[k,i]
124
+ for k in xrange(j + 1, i):
125
+ G += A[j,k] * A[k,i]
126
+
127
+ E[j] = G / H # calculate P
128
+ F += E[j] * A[j,i]
129
+
130
+ HH = F / (2 * H)
131
+
132
+ for j in xrange(0, i): # calculate reduced A
133
+ F = A[j,i]
134
+ G = E[j] - HH * F # calculate Q
135
+ E[j] = G
136
+
137
+ for k in xrange(0, j + 1):
138
+ A[k,j] -= F * E[k] + G * A[k,i]
139
+
140
+ D[i] = H
141
+
142
+ for i in xrange(1, n): # better for compatibility
143
+ E[i-1] = E[i]
144
+ E[n-1] = 0
145
+
146
+ if calc_ev:
147
+ D[0] = 0
148
+ for i in xrange(0, n):
149
+ if D[i] != 0:
150
+ for j in xrange(0, i): # accumulate transformation matrices
151
+ G = 0
152
+ for k in xrange(0, i):
153
+ G += A[i,k] * A[k,j]
154
+ for k in xrange(0, i):
155
+ A[k,j] -= G * A[k,i]
156
+
157
+ D[i] = A[i,i]
158
+ A[i,i] = 1
159
+
160
+ for j in xrange(0, i):
161
+ A[j,i] = A[i,j] = 0
162
+ else:
163
+ for i in xrange(0, n):
164
+ D[i] = A[i,i]
165
+
166
+
167
+
168
+
169
+
170
+ def c_he_tridiag_0(ctx, A, D, E, T):
171
+ """
172
+ This routine transforms a complex hermitian matrix A to a real symmetric
173
+ tridiagonal matrix T using an unitary similarity transformation:
174
+ Q' * A * Q = T (here ' denotes the hermitian matrix transpose,
175
+ i.e. transposition und conjugation).
176
+ The unitary matrix Q is build up from Householder reflectors and
177
+ an unitary diagonal matrix.
178
+
179
+ parameters:
180
+ A (input/output) On input, A contains the complex hermitian matrix
181
+ of dimension (n,n). On output, A contains the unitary matrix Q
182
+ in compressed form.
183
+
184
+ D (output) real array of length n, contains the diagonal elements
185
+ of the tridiagonal matrix.
186
+
187
+ E (output) real array of length n, contains the offdiagonal elements
188
+ of the tridiagonal matrix in E[0:(n-1)] where is the dimension of
189
+ the matrix A. E[n-1] is undefined.
190
+
191
+ T (output) complex array of length n, contains a unitary diagonal
192
+ matrix.
193
+
194
+ This routine is a python translation (in slightly modified form) of the fortran
195
+ routine htridi.f in the software library EISPACK (see netlib.org) which itself
196
+ is a complex version of the algol procedure tred1 described in:
197
+ - Num. Math. 11, p.181-195 (1968) by Martin, Reinsch and Wilkonson
198
+ - Handbook for auto. comp., Vol II, Linear Algebra, p.212-226 (1971)
199
+
200
+ For a good introduction to Householder reflections, see also
201
+ Stoer, Bulirsch - Introduction to Numerical Analysis.
202
+ """
203
+
204
+ n = A.rows
205
+ T[n-1] = 1
206
+ for i in xrange(n - 1, 0, -1):
207
+
208
+ # scale the vector
209
+
210
+ scale = 0
211
+ for k in xrange(0, i):
212
+ scale += abs(ctx.re(A[k,i])) + abs(ctx.im(A[k,i]))
213
+
214
+ scale_inv = 0
215
+ if scale != 0:
216
+ scale_inv = 1 / scale
217
+
218
+ # sadly there are floating point numbers not equal to zero whose reciprocal is infinity
219
+
220
+ if scale == 0 or ctx.isinf(scale_inv):
221
+ E[i] = 0
222
+ D[i] = 0
223
+ T[i-1] = 1
224
+ continue
225
+
226
+ if i == 1:
227
+ F = A[i-1,i]
228
+ f = abs(F)
229
+ E[i] = f
230
+ D[i] = 0
231
+ if f != 0:
232
+ T[i-1] = T[i] * F / f
233
+ else:
234
+ T[i-1] = T[i]
235
+ continue
236
+
237
+ # calculate parameters for housholder transformation
238
+
239
+ H = 0
240
+ for k in xrange(0, i):
241
+ A[k,i] *= scale_inv
242
+ rr = ctx.re(A[k,i])
243
+ ii = ctx.im(A[k,i])
244
+ H += rr * rr + ii * ii
245
+
246
+ F = A[i-1,i]
247
+ f = abs(F)
248
+ G = ctx.sqrt(H)
249
+ H += G * f
250
+ E[i] = scale * G
251
+ if f != 0:
252
+ F = F / f
253
+ TZ = - T[i] * F # T[i-1]=-T[i]*F, but we need T[i-1] as temporary storage
254
+ G *= F
255
+ else:
256
+ TZ = -T[i] # T[i-1]=-T[i]
257
+ A[i-1,i] += G
258
+ F = 0
259
+
260
+ # apply housholder transformation
261
+
262
+ for j in xrange(0, i):
263
+ A[i,j] = A[j,i] / H
264
+
265
+ G = 0 # calculate A*U
266
+ for k in xrange(0, j + 1):
267
+ G += ctx.conj(A[k,j]) * A[k,i]
268
+ for k in xrange(j + 1, i):
269
+ G += A[j,k] * A[k,i]
270
+
271
+ T[j] = G / H # calculate P
272
+ F += ctx.conj(T[j]) * A[j,i]
273
+
274
+ HH = F / (2 * H)
275
+
276
+ for j in xrange(0, i): # calculate reduced A
277
+ F = A[j,i]
278
+ G = T[j] - HH * F # calculate Q
279
+ T[j] = G
280
+
281
+ for k in xrange(0, j + 1):
282
+ A[k,j] -= ctx.conj(F) * T[k] + ctx.conj(G) * A[k,i]
283
+ # as we use the lower left part for storage
284
+ # we have to use the transpose of the normal formula
285
+
286
+ T[i-1] = TZ
287
+ D[i] = H
288
+
289
+ for i in xrange(1, n): # better for compatibility
290
+ E[i-1] = E[i]
291
+ E[n-1] = 0
292
+
293
+ D[0] = 0
294
+ for i in xrange(0, n):
295
+ zw = D[i]
296
+ D[i] = ctx.re(A[i,i])
297
+ A[i,i] = zw
298
+
299
+
300
+
301
+
302
+
303
+
304
+
305
+ def c_he_tridiag_1(ctx, A, T):
306
+ """
307
+ This routine forms the unitary matrix Q described in c_he_tridiag_0.
308
+
309
+ parameters:
310
+ A (input/output) On input, A is the same matrix as delivered by
311
+ c_he_tridiag_0. On output, A is set to Q.
312
+
313
+ T (input) On input, T is the same array as delivered by c_he_tridiag_0.
314
+
315
+ """
316
+
317
+ n = A.rows
318
+
319
+ for i in xrange(0, n):
320
+ if A[i,i] != 0:
321
+ for j in xrange(0, i):
322
+ G = 0
323
+ for k in xrange(0, i):
324
+ G += ctx.conj(A[i,k]) * A[k,j]
325
+ for k in xrange(0, i):
326
+ A[k,j] -= G * A[k,i]
327
+
328
+ A[i,i] = 1
329
+
330
+ for j in xrange(0, i):
331
+ A[j,i] = A[i,j] = 0
332
+
333
+ for i in xrange(0, n):
334
+ for k in xrange(0, n):
335
+ A[i,k] *= T[k]
336
+
337
+
338
+
339
+
340
+ def c_he_tridiag_2(ctx, A, T, B):
341
+ """
342
+ This routine applied the unitary matrix Q described in c_he_tridiag_0
343
+ onto the the matrix B, i.e. it forms Q*B.
344
+
345
+ parameters:
346
+ A (input) On input, A is the same matrix as delivered by c_he_tridiag_0.
347
+
348
+ T (input) On input, T is the same array as delivered by c_he_tridiag_0.
349
+
350
+ B (input/output) On input, B is a complex matrix. On output B is replaced
351
+ by Q*B.
352
+
353
+ This routine is a python translation of the fortran routine htribk.f in the
354
+ software library EISPACK (see netlib.org). See c_he_tridiag_0 for more
355
+ references.
356
+ """
357
+
358
+ n = A.rows
359
+
360
+ for i in xrange(0, n):
361
+ for k in xrange(0, n):
362
+ B[k,i] *= T[k]
363
+
364
+ for i in xrange(0, n):
365
+ if A[i,i] != 0:
366
+ for j in xrange(0, n):
367
+ G = 0
368
+ for k in xrange(0, i):
369
+ G += ctx.conj(A[i,k]) * B[k,j]
370
+ for k in xrange(0, i):
371
+ B[k,j] -= G * A[k,i]
372
+
373
+
374
+
375
+
376
+
377
+ def tridiag_eigen(ctx, d, e, z = False):
378
+ """
379
+ This subroutine find the eigenvalues and the first components of the
380
+ eigenvectors of a real symmetric tridiagonal matrix using the implicit
381
+ QL method.
382
+
383
+ parameters:
384
+
385
+ d (input/output) real array of length n. on input, d contains the diagonal
386
+ elements of the input matrix. on output, d contains the eigenvalues in
387
+ ascending order.
388
+
389
+ e (input) real array of length n. on input, e contains the offdiagonal
390
+ elements of the input matrix in e[0:(n-1)]. On output, e has been
391
+ destroyed.
392
+
393
+ z (input/output) If z is equal to False, no eigenvectors will be computed.
394
+ Otherwise on input z should have the format z[0:m,0:n] (i.e. a real or
395
+ complex matrix of dimension (m,n) ). On output this matrix will be
396
+ multiplied by the matrix of the eigenvectors (i.e. the columns of this
397
+ matrix are the eigenvectors): z --> z*EV
398
+ That means if z[i,j]={1 if j==j; 0 otherwise} on input, then on output
399
+ z will contain the first m components of the eigenvectors. That means
400
+ if m is equal to n, the i-th eigenvector will be z[:,i].
401
+
402
+ This routine is a python translation (in slightly modified form) of the
403
+ fortran routine imtql2.f in the software library EISPACK (see netlib.org)
404
+ which itself is based on the algol procudure imtql2 desribed in:
405
+ - num. math. 12, p. 377-383(1968) by matrin and wilkinson
406
+ - modified in num. math. 15, p. 450(1970) by dubrulle
407
+ - handbook for auto. comp., vol. II-linear algebra, p. 241-248 (1971)
408
+ See also the routine gaussq.f in netlog.org or acm algorithm 726.
409
+ """
410
+
411
+ n = len(d)
412
+ e[n-1] = 0
413
+ iterlim = 2 * ctx.dps
414
+
415
+ for l in xrange(n):
416
+ j = 0
417
+ while 1:
418
+ m = l
419
+ while 1:
420
+ # look for a small subdiagonal element
421
+ if m + 1 == n:
422
+ break
423
+ if abs(e[m]) <= ctx.eps * (abs(d[m]) + abs(d[m + 1])):
424
+ break
425
+ m = m + 1
426
+ if m == l:
427
+ break
428
+
429
+ if j >= iterlim:
430
+ raise RuntimeError("tridiag_eigen: no convergence to an eigenvalue after %d iterations" % iterlim)
431
+
432
+ j += 1
433
+
434
+ # form shift
435
+
436
+ p = d[l]
437
+ g = (d[l + 1] - p) / (2 * e[l])
438
+ r = ctx.hypot(g, 1)
439
+
440
+ if g < 0:
441
+ s = g - r
442
+ else:
443
+ s = g + r
444
+
445
+ g = d[m] - p + e[l] / s
446
+
447
+ s, c, p = 1, 1, 0
448
+
449
+ for i in xrange(m - 1, l - 1, -1):
450
+ f = s * e[i]
451
+ b = c * e[i]
452
+ if abs(f) > abs(g): # this here is a slight improvement also used in gaussq.f or acm algorithm 726.
453
+ c = g / f
454
+ r = ctx.hypot(c, 1)
455
+ e[i + 1] = f * r
456
+ s = 1 / r
457
+ c = c * s
458
+ else:
459
+ s = f / g
460
+ r = ctx.hypot(s, 1)
461
+ e[i + 1] = g * r
462
+ c = 1 / r
463
+ s = s * c
464
+ g = d[i + 1] - p
465
+ r = (d[i] - g) * s + 2 * c * b
466
+ p = s * r
467
+ d[i + 1] = g + p
468
+ g = c * r - b
469
+
470
+ if not isinstance(z, bool):
471
+ # calculate eigenvectors
472
+ for w in xrange(z.rows):
473
+ f = z[w,i+1]
474
+ z[w,i+1] = s * z[w,i] + c * f
475
+ z[w,i ] = c * z[w,i] - s * f
476
+
477
+ d[l] = d[l] - p
478
+ e[l] = g
479
+ e[m] = 0
480
+
481
+ for ii in xrange(1, n):
482
+ # sort eigenvalues and eigenvectors (bubble-sort)
483
+ i = ii - 1
484
+ k = i
485
+ p = d[i]
486
+ for j in xrange(ii, n):
487
+ if d[j] >= p:
488
+ continue
489
+ k = j
490
+ p = d[k]
491
+ if k == i:
492
+ continue
493
+ d[k] = d[i]
494
+ d[i] = p
495
+
496
+ if not isinstance(z, bool):
497
+ for w in xrange(z.rows):
498
+ p = z[w,i]
499
+ z[w,i] = z[w,k]
500
+ z[w,k] = p
501
+
502
+ ########################################################################################
503
+
504
+ @defun
505
+ def eigsy(ctx, A, eigvals_only = False, overwrite_a = False):
506
+ """
507
+ This routine solves the (ordinary) eigenvalue problem for a real symmetric
508
+ square matrix A. Given A, an orthogonal matrix Q is calculated which
509
+ diagonalizes A:
510
+
511
+ Q' A Q = diag(E) and Q Q' = Q' Q = 1
512
+
513
+ Here diag(E) is a diagonal matrix whose diagonal is E.
514
+ ' denotes the transpose.
515
+
516
+ The columns of Q are the eigenvectors of A and E contains the eigenvalues:
517
+
518
+ A Q[:,i] = E[i] Q[:,i]
519
+
520
+
521
+ input:
522
+
523
+ A: real matrix of format (n,n) which is symmetric
524
+ (i.e. A=A' or A[i,j]=A[j,i])
525
+
526
+ eigvals_only: if true, calculates only the eigenvalues E.
527
+ if false, calculates both eigenvectors and eigenvalues.
528
+
529
+ overwrite_a: if true, allows modification of A which may improve
530
+ performance. if false, A is not modified.
531
+
532
+ output:
533
+
534
+ E: vector of format (n). contains the eigenvalues of A in ascending order.
535
+
536
+ Q: orthogonal matrix of format (n,n). contains the eigenvectors
537
+ of A as columns.
538
+
539
+ return value:
540
+
541
+ E if eigvals_only is true
542
+ (E, Q) if eigvals_only is false
543
+
544
+ example:
545
+ >>> from mpmath import mp
546
+ >>> A = mp.matrix([[3, 2], [2, 0]])
547
+ >>> E = mp.eigsy(A, eigvals_only = True)
548
+ >>> print(E)
549
+ [-1.0]
550
+ [ 4.0]
551
+
552
+ >>> A = mp.matrix([[1, 2], [2, 3]])
553
+ >>> E, Q = mp.eigsy(A)
554
+ >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
555
+ [0.0]
556
+ [0.0]
557
+
558
+ see also: eighe, eigh, eig
559
+ """
560
+
561
+ if not overwrite_a:
562
+ A = A.copy()
563
+
564
+ d = ctx.zeros(A.rows, 1)
565
+ e = ctx.zeros(A.rows, 1)
566
+
567
+ if eigvals_only:
568
+ r_sy_tridiag(ctx, A, d, e, calc_ev = False)
569
+ tridiag_eigen(ctx, d, e, False)
570
+ return d
571
+ else:
572
+ r_sy_tridiag(ctx, A, d, e, calc_ev = True)
573
+ tridiag_eigen(ctx, d, e, A)
574
+ return (d, A)
575
+
576
+
577
+ @defun
578
+ def eighe(ctx, A, eigvals_only = False, overwrite_a = False):
579
+ """
580
+ This routine solves the (ordinary) eigenvalue problem for a complex
581
+ hermitian square matrix A. Given A, an unitary matrix Q is calculated which
582
+ diagonalizes A:
583
+
584
+ Q' A Q = diag(E) and Q Q' = Q' Q = 1
585
+
586
+ Here diag(E) a is diagonal matrix whose diagonal is E.
587
+ ' denotes the hermitian transpose (i.e. ordinary transposition and
588
+ complex conjugation).
589
+
590
+ The columns of Q are the eigenvectors of A and E contains the eigenvalues:
591
+
592
+ A Q[:,i] = E[i] Q[:,i]
593
+
594
+
595
+ input:
596
+
597
+ A: complex matrix of format (n,n) which is hermitian
598
+ (i.e. A=A' or A[i,j]=conj(A[j,i]))
599
+
600
+ eigvals_only: if true, calculates only the eigenvalues E.
601
+ if false, calculates both eigenvectors and eigenvalues.
602
+
603
+ overwrite_a: if true, allows modification of A which may improve
604
+ performance. if false, A is not modified.
605
+
606
+ output:
607
+
608
+ E: vector of format (n). contains the eigenvalues of A in ascending order.
609
+
610
+ Q: unitary matrix of format (n,n). contains the eigenvectors
611
+ of A as columns.
612
+
613
+ return value:
614
+
615
+ E if eigvals_only is true
616
+ (E, Q) if eigvals_only is false
617
+
618
+ example:
619
+ >>> from mpmath import mp
620
+ >>> A = mp.matrix([[1, -3 - 1j], [-3 + 1j, -2]])
621
+ >>> E = mp.eighe(A, eigvals_only = True)
622
+ >>> print(E)
623
+ [-4.0]
624
+ [ 3.0]
625
+
626
+ >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]])
627
+ >>> E, Q = mp.eighe(A)
628
+ >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
629
+ [0.0]
630
+ [0.0]
631
+
632
+ see also: eigsy, eigh, eig
633
+ """
634
+
635
+ if not overwrite_a:
636
+ A = A.copy()
637
+
638
+ d = ctx.zeros(A.rows, 1)
639
+ e = ctx.zeros(A.rows, 1)
640
+ t = ctx.zeros(A.rows, 1)
641
+
642
+ if eigvals_only:
643
+ c_he_tridiag_0(ctx, A, d, e, t)
644
+ tridiag_eigen(ctx, d, e, False)
645
+ return d
646
+ else:
647
+ c_he_tridiag_0(ctx, A, d, e, t)
648
+ B = ctx.eye(A.rows)
649
+ tridiag_eigen(ctx, d, e, B)
650
+ c_he_tridiag_2(ctx, A, t, B)
651
+ return (d, B)
652
+
653
+ @defun
654
+ def eigh(ctx, A, eigvals_only = False, overwrite_a = False):
655
+ """
656
+ "eigh" is a unified interface for "eigsy" and "eighe". Depending on
657
+ whether A is real or complex the appropriate function is called.
658
+
659
+ This routine solves the (ordinary) eigenvalue problem for a real symmetric
660
+ or complex hermitian square matrix A. Given A, an orthogonal (A real) or
661
+ unitary (A complex) matrix Q is calculated which diagonalizes A:
662
+
663
+ Q' A Q = diag(E) and Q Q' = Q' Q = 1
664
+
665
+ Here diag(E) a is diagonal matrix whose diagonal is E.
666
+ ' denotes the hermitian transpose (i.e. ordinary transposition and
667
+ complex conjugation).
668
+
669
+ The columns of Q are the eigenvectors of A and E contains the eigenvalues:
670
+
671
+ A Q[:,i] = E[i] Q[:,i]
672
+
673
+ input:
674
+
675
+ A: a real or complex square matrix of format (n,n) which is symmetric
676
+ (i.e. A[i,j]=A[j,i]) or hermitian (i.e. A[i,j]=conj(A[j,i])).
677
+
678
+ eigvals_only: if true, calculates only the eigenvalues E.
679
+ if false, calculates both eigenvectors and eigenvalues.
680
+
681
+ overwrite_a: if true, allows modification of A which may improve
682
+ performance. if false, A is not modified.
683
+
684
+ output:
685
+
686
+ E: vector of format (n). contains the eigenvalues of A in ascending order.
687
+
688
+ Q: an orthogonal or unitary matrix of format (n,n). contains the
689
+ eigenvectors of A as columns.
690
+
691
+ return value:
692
+
693
+ E if eigvals_only is true
694
+ (E, Q) if eigvals_only is false
695
+
696
+ example:
697
+ >>> from mpmath import mp
698
+ >>> A = mp.matrix([[3, 2], [2, 0]])
699
+ >>> E = mp.eigh(A, eigvals_only = True)
700
+ >>> print(E)
701
+ [-1.0]
702
+ [ 4.0]
703
+
704
+ >>> A = mp.matrix([[1, 2], [2, 3]])
705
+ >>> E, Q = mp.eigh(A)
706
+ >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
707
+ [0.0]
708
+ [0.0]
709
+
710
+ >>> A = mp.matrix([[1, 2 + 5j], [2 - 5j, 3]])
711
+ >>> E, Q = mp.eigh(A)
712
+ >>> print(mp.chop(A * Q[:,0] - E[0] * Q[:,0]))
713
+ [0.0]
714
+ [0.0]
715
+
716
+ see also: eigsy, eighe, eig
717
+ """
718
+
719
+ iscomplex = any(type(x) is ctx.mpc for x in A)
720
+
721
+ if iscomplex:
722
+ return ctx.eighe(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a)
723
+ else:
724
+ return ctx.eigsy(A, eigvals_only = eigvals_only, overwrite_a = overwrite_a)
725
+
726
+
727
+ @defun
728
+ def gauss_quadrature(ctx, n, qtype = "legendre", alpha = 0, beta = 0):
729
+ """
730
+ This routine calulates gaussian quadrature rules for different
731
+ families of orthogonal polynomials. Let (a, b) be an interval,
732
+ W(x) a positive weight function and n a positive integer.
733
+ Then the purpose of this routine is to calculate pairs (x_k, w_k)
734
+ for k=0, 1, 2, ... (n-1) which give
735
+
736
+ int(W(x) * F(x), x = a..b) = sum(w_k * F(x_k),k = 0..(n-1))
737
+
738
+ exact for all polynomials F(x) of degree (strictly) less than 2*n. For all
739
+ integrable functions F(x) the sum is a (more or less) good approximation to
740
+ the integral. The x_k are called nodes (which are the zeros of the
741
+ related orthogonal polynomials) and the w_k are called the weights.
742
+
743
+ parameters
744
+ n (input) The degree of the quadrature rule, i.e. its number of
745
+ nodes.
746
+
747
+ qtype (input) The family of orthogonal polynmomials for which to
748
+ compute the quadrature rule. See the list below.
749
+
750
+ alpha (input) real number, used as parameter for some orthogonal
751
+ polynomials
752
+
753
+ beta (input) real number, used as parameter for some orthogonal
754
+ polynomials.
755
+
756
+ return value
757
+
758
+ (X, W) a pair of two real arrays where x_k = X[k] and w_k = W[k].
759
+
760
+
761
+ orthogonal polynomials:
762
+
763
+ qtype polynomial
764
+ ----- ----------
765
+
766
+ "legendre" Legendre polynomials, W(x)=1 on the interval (-1, +1)
767
+ "legendre01" shifted Legendre polynomials, W(x)=1 on the interval (0, +1)
768
+ "hermite" Hermite polynomials, W(x)=exp(-x*x) on (-infinity,+infinity)
769
+ "laguerre" Laguerre polynomials, W(x)=exp(-x) on (0,+infinity)
770
+ "glaguerre" generalized Laguerre polynomials, W(x)=exp(-x)*x**alpha
771
+ on (0, +infinity)
772
+ "chebyshev1" Chebyshev polynomials of the first kind, W(x)=1/sqrt(1-x*x)
773
+ on (-1, +1)
774
+ "chebyshev2" Chebyshev polynomials of the second kind, W(x)=sqrt(1-x*x)
775
+ on (-1, +1)
776
+ "jacobi" Jacobi polynomials, W(x)=(1-x)**alpha * (1+x)**beta on (-1, +1)
777
+ with alpha>-1 and beta>-1
778
+
779
+ examples:
780
+ >>> from mpmath import mp
781
+ >>> f = lambda x: x**8 + 2 * x**6 - 3 * x**4 + 5 * x**2 - 7
782
+ >>> X, W = mp.gauss_quadrature(5, "hermite")
783
+ >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
784
+ >>> B = mp.sqrt(mp.pi) * 57 / 16
785
+ >>> C = mp.quad(lambda x: mp.exp(- x * x) * f(x), [-mp.inf, +mp.inf])
786
+ >>> mp.nprint((mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10)))
787
+ (0.0, 0.0)
788
+
789
+ >>> f = lambda x: x**5 - 2 * x**4 + 3 * x**3 - 5 * x**2 + 7 * x - 11
790
+ >>> X, W = mp.gauss_quadrature(3, "laguerre")
791
+ >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
792
+ >>> B = 76
793
+ >>> C = mp.quad(lambda x: mp.exp(-x) * f(x), [0, +mp.inf])
794
+ >>> mp.nprint(mp.chop(A-B, tol = 1e-10), mp.chop(A-C, tol = 1e-10))
795
+ .0
796
+
797
+ # orthogonality of the chebyshev polynomials:
798
+ >>> f = lambda x: mp.chebyt(3, x) * mp.chebyt(2, x)
799
+ >>> X, W = mp.gauss_quadrature(3, "chebyshev1")
800
+ >>> A = mp.fdot([(f(x), w) for x, w in zip(X, W)])
801
+ >>> print(mp.chop(A, tol = 1e-10))
802
+ 0.0
803
+
804
+ references:
805
+ - golub and welsch, "calculations of gaussian quadrature rules", mathematics of
806
+ computation 23, p. 221-230 (1969)
807
+ - golub, "some modified matrix eigenvalue problems", siam review 15, p. 318-334 (1973)
808
+ - stroud and secrest, "gaussian quadrature formulas", prentice-hall (1966)
809
+
810
+ See also the routine gaussq.f in netlog.org or ACM Transactions on
811
+ Mathematical Software algorithm 726.
812
+ """
813
+
814
+ d = ctx.zeros(n, 1)
815
+ e = ctx.zeros(n, 1)
816
+ z = ctx.zeros(1, n)
817
+
818
+ z[0,0] = 1
819
+
820
+ if qtype == "legendre":
821
+ # legendre on the range -1 +1 , abramowitz, table 25.4, p.916
822
+ w = 2
823
+ for i in xrange(n):
824
+ j = i + 1
825
+ e[i] = ctx.sqrt(j * j / (4 * j * j - ctx.mpf(1)))
826
+ elif qtype == "legendre01":
827
+ # legendre shifted to 0 1 , abramowitz, table 25.8, p.921
828
+ w = 1
829
+ for i in xrange(n):
830
+ d[i] = 1 / ctx.mpf(2)
831
+ j = i + 1
832
+ e[i] = ctx.sqrt(j * j / (16 * j * j - ctx.mpf(4)))
833
+ elif qtype == "hermite":
834
+ # hermite on the range -inf +inf , abramowitz, table 25.10,p.924
835
+ w = ctx.sqrt(ctx.pi)
836
+ for i in xrange(n):
837
+ j = i + 1
838
+ e[i] = ctx.sqrt(j / ctx.mpf(2))
839
+ elif qtype == "laguerre":
840
+ # laguerre on the range 0 +inf , abramowitz, table 25.9, p. 923
841
+ w = 1
842
+ for i in xrange(n):
843
+ j = i + 1
844
+ d[i] = 2 * j - 1
845
+ e[i] = j
846
+ elif qtype=="chebyshev1":
847
+ # chebyshev polynimials of the first kind
848
+ w = ctx.pi
849
+ for i in xrange(n):
850
+ e[i] = 1 / ctx.mpf(2)
851
+ e[0] = ctx.sqrt(1 / ctx.mpf(2))
852
+ elif qtype == "chebyshev2":
853
+ # chebyshev polynimials of the second kind
854
+ w = ctx.pi / 2
855
+ for i in xrange(n):
856
+ e[i] = 1 / ctx.mpf(2)
857
+ elif qtype == "glaguerre":
858
+ # generalized laguerre on the range 0 +inf
859
+ w = ctx.gamma(1 + alpha)
860
+ for i in xrange(n):
861
+ j = i + 1
862
+ d[i] = 2 * j - 1 + alpha
863
+ e[i] = ctx.sqrt(j * (j + alpha))
864
+ elif qtype == "jacobi":
865
+ # jacobi polynomials
866
+ alpha = ctx.mpf(alpha)
867
+ beta = ctx.mpf(beta)
868
+ ab = alpha + beta
869
+ abi = ab + 2
870
+ w = (2**(ab+1)) * ctx.gamma(alpha + 1) * ctx.gamma(beta + 1) / ctx.gamma(abi)
871
+ d[0] = (beta - alpha) / abi
872
+ e[0] = ctx.sqrt(4 * (1 + alpha) * (1 + beta) / ((abi + 1) * (abi * abi)))
873
+ a2b2 = beta * beta - alpha * alpha
874
+ for i in xrange(1, n):
875
+ j = i + 1
876
+ abi = 2 * j + ab
877
+ d[i] = a2b2 / ((abi - 2) * abi)
878
+ e[i] = ctx.sqrt(4 * j * (j + alpha) * (j + beta) * (j + ab) / ((abi * abi - 1) * abi * abi))
879
+ elif isinstance(qtype, str):
880
+ raise ValueError("unknown quadrature rule \"%s\"" % qtype)
881
+ elif not isinstance(qtype, str):
882
+ w = qtype(d, e)
883
+ else:
884
+ assert 0
885
+
886
+ tridiag_eigen(ctx, d, e, z)
887
+
888
+ for i in xrange(len(z)):
889
+ z[i] *= z[i]
890
+
891
+ z = z.transpose()
892
+ return (d, w * z)
893
+
894
+ ##################################################################################################
895
+ ##################################################################################################
896
+ ##################################################################################################
897
+
898
+ def svd_r_raw(ctx, A, V = False, calc_u = False):
899
+ """
900
+ This routine computes the singular value decomposition of a matrix A.
901
+ Given A, two orthogonal matrices U and V are calculated such that
902
+
903
+ A = U S V
904
+
905
+ where S is a suitable shaped matrix whose off-diagonal elements are zero.
906
+ The diagonal elements of S are the singular values of A, i.e. the
907
+ squareroots of the eigenvalues of A' A or A A'. Here ' denotes the transpose.
908
+ Householder bidiagonalization and a variant of the QR algorithm is used.
909
+
910
+ overview of the matrices :
911
+
912
+ A : m*n A gets replaced by U
913
+ U : m*n U replaces A. If n>m then only the first m*m block of U is
914
+ non-zero. column-orthogonal: U' U = B
915
+ here B is a n*n matrix whose first min(m,n) diagonal
916
+ elements are 1 and all other elements are zero.
917
+ S : n*n diagonal matrix, only the diagonal elements are stored in
918
+ the array S. only the first min(m,n) diagonal elements are non-zero.
919
+ V : n*n orthogonal: V V' = V' V = 1
920
+
921
+ parameters:
922
+ A (input/output) On input, A contains a real matrix of shape m*n.
923
+ On output, if calc_u is true A contains the column-orthogonal
924
+ matrix U; otherwise A is simply used as workspace and thus destroyed.
925
+
926
+ V (input/output) if false, the matrix V is not calculated. otherwise
927
+ V must be a matrix of shape n*n.
928
+
929
+ calc_u (input) If true, the matrix U is calculated and replaces A.
930
+ if false, U is not calculated and A is simply destroyed
931
+
932
+ return value:
933
+ S an array of length n containing the singular values of A sorted by
934
+ decreasing magnitude. only the first min(m,n) elements are non-zero.
935
+
936
+ This routine is a python translation of the fortran routine svd.f in the
937
+ software library EISPACK (see netlib.org) which itself is based on the
938
+ algol procedure svd described in:
939
+ - num. math. 14, 403-420(1970) by golub and reinsch.
940
+ - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).
941
+
942
+ """
943
+
944
+ m, n = A.rows, A.cols
945
+
946
+ S = ctx.zeros(n, 1)
947
+
948
+ # work is a temporary array of size n
949
+ work = ctx.zeros(n, 1)
950
+
951
+ g = scale = anorm = 0
952
+ maxits = 3 * ctx.dps
953
+
954
+ for i in xrange(n): # householder reduction to bidiagonal form
955
+ work[i] = scale*g
956
+ g = s = scale = 0
957
+ if i < m:
958
+ for k in xrange(i, m):
959
+ scale += ctx.fabs(A[k,i])
960
+ if scale != 0:
961
+ for k in xrange(i, m):
962
+ A[k,i] /= scale
963
+ s += A[k,i] * A[k,i]
964
+ f = A[i,i]
965
+ g = -ctx.sqrt(s)
966
+ if f < 0:
967
+ g = -g
968
+ h = f * g - s
969
+ A[i,i] = f - g
970
+ for j in xrange(i+1, n):
971
+ s = 0
972
+ for k in xrange(i, m):
973
+ s += A[k,i] * A[k,j]
974
+ f = s / h
975
+ for k in xrange(i, m):
976
+ A[k,j] += f * A[k,i]
977
+ for k in xrange(i,m):
978
+ A[k,i] *= scale
979
+
980
+ S[i] = scale * g
981
+ g = s = scale = 0
982
+
983
+ if i < m and i != n - 1:
984
+ for k in xrange(i+1, n):
985
+ scale += ctx.fabs(A[i,k])
986
+ if scale:
987
+ for k in xrange(i+1, n):
988
+ A[i,k] /= scale
989
+ s += A[i,k] * A[i,k]
990
+ f = A[i,i+1]
991
+ g = -ctx.sqrt(s)
992
+ if f < 0:
993
+ g = -g
994
+ h = f * g - s
995
+ A[i,i+1] = f - g
996
+
997
+ for k in xrange(i+1, n):
998
+ work[k] = A[i,k] / h
999
+
1000
+ for j in xrange(i+1, m):
1001
+ s = 0
1002
+ for k in xrange(i+1, n):
1003
+ s += A[j,k] * A[i,k]
1004
+ for k in xrange(i+1, n):
1005
+ A[j,k] += s * work[k]
1006
+
1007
+ for k in xrange(i+1, n):
1008
+ A[i,k] *= scale
1009
+
1010
+ anorm = max(anorm, ctx.fabs(S[i]) + ctx.fabs(work[i]))
1011
+
1012
+ if not isinstance(V, bool):
1013
+ for i in xrange(n-2, -1, -1): # accumulation of right hand transformations
1014
+ V[i+1,i+1] = 1
1015
+
1016
+ if work[i+1] != 0:
1017
+ for j in xrange(i+1, n):
1018
+ V[i,j] = (A[i,j] / A[i,i+1]) / work[i+1]
1019
+ for j in xrange(i+1, n):
1020
+ s = 0
1021
+ for k in xrange(i+1, n):
1022
+ s += A[i,k] * V[j,k]
1023
+ for k in xrange(i+1, n):
1024
+ V[j,k] += s * V[i,k]
1025
+
1026
+ for j in xrange(i+1, n):
1027
+ V[j,i] = V[i,j] = 0
1028
+
1029
+ V[0,0] = 1
1030
+
1031
+ if m<n : minnm = m
1032
+ else : minnm = n
1033
+
1034
+ if calc_u:
1035
+ for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations
1036
+ g = S[i]
1037
+ for j in xrange(i+1, n):
1038
+ A[i,j] = 0
1039
+ if g != 0:
1040
+ g = 1 / g
1041
+ for j in xrange(i+1, n):
1042
+ s = 0
1043
+ for k in xrange(i+1, m):
1044
+ s += A[k,i] * A[k,j]
1045
+ f = (s / A[i,i]) * g
1046
+ for k in xrange(i, m):
1047
+ A[k,j] += f * A[k,i]
1048
+ for j in xrange(i, m):
1049
+ A[j,i] *= g
1050
+ else:
1051
+ for j in xrange(i, m):
1052
+ A[j,i] = 0
1053
+ A[i,i] += 1
1054
+
1055
+ for k in xrange(n - 1, -1, -1):
1056
+ # diagonalization of the bidiagonal form:
1057
+ # loop over singular values, and over allowed itations
1058
+
1059
+ its = 0
1060
+ while 1:
1061
+ its += 1
1062
+ flag = True
1063
+
1064
+ for l in xrange(k, -1, -1):
1065
+ nm = l-1
1066
+
1067
+ if ctx.fabs(work[l]) + anorm == anorm:
1068
+ flag = False
1069
+ break
1070
+
1071
+ if ctx.fabs(S[nm]) + anorm == anorm:
1072
+ break
1073
+
1074
+ if flag:
1075
+ c = 0
1076
+ s = 1
1077
+ for i in xrange(l, k + 1):
1078
+ f = s * work[i]
1079
+ work[i] *= c
1080
+ if ctx.fabs(f) + anorm == anorm:
1081
+ break
1082
+ g = S[i]
1083
+ h = ctx.hypot(f, g)
1084
+ S[i] = h
1085
+ h = 1 / h
1086
+ c = g * h
1087
+ s = - f * h
1088
+
1089
+ if calc_u:
1090
+ for j in xrange(m):
1091
+ y = A[j,nm]
1092
+ z = A[j,i]
1093
+ A[j,nm] = y * c + z * s
1094
+ A[j,i] = z * c - y * s
1095
+
1096
+ z = S[k]
1097
+
1098
+ if l == k: # convergence
1099
+ if z < 0: # singular value is made nonnegative
1100
+ S[k] = -z
1101
+ if not isinstance(V, bool):
1102
+ for j in xrange(n):
1103
+ V[k,j] = -V[k,j]
1104
+ break
1105
+
1106
+ if its >= maxits:
1107
+ raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its)
1108
+
1109
+ x = S[l] # shift from bottom 2 by 2 minor
1110
+ nm = k-1
1111
+ y = S[nm]
1112
+ g = work[nm]
1113
+ h = work[k]
1114
+ f = ((y - z) * (y + z) + (g - h) * (g + h))/(2 * h * y)
1115
+ g = ctx.hypot(f, 1)
1116
+ if f >= 0: f = ((x - z) * (x + z) + h * ((y / (f + g)) - h)) / x
1117
+ else: f = ((x - z) * (x + z) + h * ((y / (f - g)) - h)) / x
1118
+
1119
+ c = s = 1 # next qt transformation
1120
+
1121
+ for j in xrange(l, nm + 1):
1122
+ g = work[j+1]
1123
+ y = S[j+1]
1124
+ h = s * g
1125
+ g = c * g
1126
+ z = ctx.hypot(f, h)
1127
+ work[j] = z
1128
+ c = f / z
1129
+ s = h / z
1130
+ f = x * c + g * s
1131
+ g = g * c - x * s
1132
+ h = y * s
1133
+ y *= c
1134
+ if not isinstance(V, bool):
1135
+ for jj in xrange(n):
1136
+ x = V[j ,jj]
1137
+ z = V[j+1,jj]
1138
+ V[j ,jj]= x * c + z * s
1139
+ V[j+1 ,jj]= z * c - x * s
1140
+ z = ctx.hypot(f, h)
1141
+ S[j] = z
1142
+ if z != 0: # rotation can be arbitray if z=0
1143
+ z = 1 / z
1144
+ c = f * z
1145
+ s = h * z
1146
+ f = c * g + s * y
1147
+ x = c * y - s * g
1148
+
1149
+ if calc_u:
1150
+ for jj in xrange(m):
1151
+ y = A[jj,j ]
1152
+ z = A[jj,j+1]
1153
+ A[jj,j ] = y * c + z * s
1154
+ A[jj,j+1 ] = z * c - y * s
1155
+
1156
+ work[l] = 0
1157
+ work[k] = f
1158
+ S[k] = x
1159
+
1160
+ ##########################
1161
+
1162
+ # Sort singular values into decreasing order (bubble-sort)
1163
+
1164
+ for i in xrange(n):
1165
+ imax = i
1166
+ s = ctx.fabs(S[i]) # s is the current maximal element
1167
+
1168
+ for j in xrange(i + 1, n):
1169
+ c = ctx.fabs(S[j])
1170
+ if c > s:
1171
+ s = c
1172
+ imax = j
1173
+
1174
+ if imax != i:
1175
+ # swap singular values
1176
+
1177
+ z = S[i]
1178
+ S[i] = S[imax]
1179
+ S[imax] = z
1180
+
1181
+ if calc_u:
1182
+ for j in xrange(m):
1183
+ z = A[j,i]
1184
+ A[j,i] = A[j,imax]
1185
+ A[j,imax] = z
1186
+
1187
+ if not isinstance(V, bool):
1188
+ for j in xrange(n):
1189
+ z = V[i,j]
1190
+ V[i,j] = V[imax,j]
1191
+ V[imax,j] = z
1192
+
1193
+ return S
1194
+
1195
+ #######################
1196
+
1197
+ def svd_c_raw(ctx, A, V = False, calc_u = False):
1198
+ """
1199
+ This routine computes the singular value decomposition of a matrix A.
1200
+ Given A, two unitary matrices U and V are calculated such that
1201
+
1202
+ A = U S V
1203
+
1204
+ where S is a suitable shaped matrix whose off-diagonal elements are zero.
1205
+ The diagonal elements of S are the singular values of A, i.e. the
1206
+ squareroots of the eigenvalues of A' A or A A'. Here ' denotes the hermitian
1207
+ transpose (i.e. transposition and conjugation). Householder bidiagonalization
1208
+ and a variant of the QR algorithm is used.
1209
+
1210
+ overview of the matrices :
1211
+
1212
+ A : m*n A gets replaced by U
1213
+ U : m*n U replaces A. If n>m then only the first m*m block of U is
1214
+ non-zero. column-unitary: U' U = B
1215
+ here B is a n*n matrix whose first min(m,n) diagonal
1216
+ elements are 1 and all other elements are zero.
1217
+ S : n*n diagonal matrix, only the diagonal elements are stored in
1218
+ the array S. only the first min(m,n) diagonal elements are non-zero.
1219
+ V : n*n unitary: V V' = V' V = 1
1220
+
1221
+ parameters:
1222
+ A (input/output) On input, A contains a complex matrix of shape m*n.
1223
+ On output, if calc_u is true A contains the column-unitary
1224
+ matrix U; otherwise A is simply used as workspace and thus destroyed.
1225
+
1226
+ V (input/output) if false, the matrix V is not calculated. otherwise
1227
+ V must be a matrix of shape n*n.
1228
+
1229
+ calc_u (input) If true, the matrix U is calculated and replaces A.
1230
+ if false, U is not calculated and A is simply destroyed
1231
+
1232
+ return value:
1233
+ S an array of length n containing the singular values of A sorted by
1234
+ decreasing magnitude. only the first min(m,n) elements are non-zero.
1235
+
1236
+ This routine is a python translation of the fortran routine svd.f in the
1237
+ software library EISPACK (see netlib.org) which itself is based on the
1238
+ algol procedure svd described in:
1239
+ - num. math. 14, 403-420(1970) by golub and reinsch.
1240
+ - wilkinson/reinsch: handbook for auto. comp., vol ii-linear algebra, 134-151(1971).
1241
+
1242
+ """
1243
+
1244
+ m, n = A.rows, A.cols
1245
+
1246
+ S = ctx.zeros(n, 1)
1247
+
1248
+ # work is a temporary array of size n
1249
+ work = ctx.zeros(n, 1)
1250
+ lbeta = ctx.zeros(n, 1)
1251
+ rbeta = ctx.zeros(n, 1)
1252
+ dwork = ctx.zeros(n, 1)
1253
+
1254
+ g = scale = anorm = 0
1255
+ maxits = 3 * ctx.dps
1256
+
1257
+ for i in xrange(n): # householder reduction to bidiagonal form
1258
+ dwork[i] = scale * g # dwork are the side-diagonal elements
1259
+ g = s = scale = 0
1260
+ if i < m:
1261
+ for k in xrange(i, m):
1262
+ scale += ctx.fabs(ctx.re(A[k,i])) + ctx.fabs(ctx.im(A[k,i]))
1263
+ if scale != 0:
1264
+ for k in xrange(i, m):
1265
+ A[k,i] /= scale
1266
+ ar = ctx.re(A[k,i])
1267
+ ai = ctx.im(A[k,i])
1268
+ s += ar * ar + ai * ai
1269
+ f = A[i,i]
1270
+ g = -ctx.sqrt(s)
1271
+ if ctx.re(f) < 0:
1272
+ beta = -g - ctx.conj(f)
1273
+ g = -g
1274
+ else:
1275
+ beta = -g + ctx.conj(f)
1276
+ beta /= ctx.conj(beta)
1277
+ beta += 1
1278
+ h = 2 * (ctx.re(f) * g - s)
1279
+ A[i,i] = f - g
1280
+ beta /= h
1281
+ lbeta[i] = (beta / scale) / scale
1282
+ for j in xrange(i+1, n):
1283
+ s = 0
1284
+ for k in xrange(i, m):
1285
+ s += ctx.conj(A[k,i]) * A[k,j]
1286
+ f = beta * s
1287
+ for k in xrange(i, m):
1288
+ A[k,j] += f * A[k,i]
1289
+ for k in xrange(i, m):
1290
+ A[k,i] *= scale
1291
+
1292
+ S[i] = scale * g # S are the diagonal elements
1293
+ g = s = scale = 0
1294
+
1295
+ if i < m and i != n - 1:
1296
+ for k in xrange(i+1, n):
1297
+ scale += ctx.fabs(ctx.re(A[i,k])) + ctx.fabs(ctx.im(A[i,k]))
1298
+ if scale:
1299
+ for k in xrange(i+1, n):
1300
+ A[i,k] /= scale
1301
+ ar = ctx.re(A[i,k])
1302
+ ai = ctx.im(A[i,k])
1303
+ s += ar * ar + ai * ai
1304
+ f = A[i,i+1]
1305
+ g = -ctx.sqrt(s)
1306
+ if ctx.re(f) < 0:
1307
+ beta = -g - ctx.conj(f)
1308
+ g = -g
1309
+ else:
1310
+ beta = -g + ctx.conj(f)
1311
+
1312
+ beta /= ctx.conj(beta)
1313
+ beta += 1
1314
+
1315
+ h = 2 * (ctx.re(f) * g - s)
1316
+ A[i,i+1] = f - g
1317
+
1318
+ beta /= h
1319
+ rbeta[i] = (beta / scale) / scale
1320
+
1321
+ for k in xrange(i+1, n):
1322
+ work[k] = A[i, k]
1323
+
1324
+ for j in xrange(i+1, m):
1325
+ s = 0
1326
+ for k in xrange(i+1, n):
1327
+ s += ctx.conj(A[i,k]) * A[j,k]
1328
+ f = s * beta
1329
+ for k in xrange(i+1,n):
1330
+ A[j,k] += f * work[k]
1331
+
1332
+ for k in xrange(i+1, n):
1333
+ A[i,k] *= scale
1334
+
1335
+ anorm = max(anorm,ctx.fabs(S[i]) + ctx.fabs(dwork[i]))
1336
+
1337
+ if not isinstance(V, bool):
1338
+ for i in xrange(n-2, -1, -1): # accumulation of right hand transformations
1339
+ V[i+1,i+1] = 1
1340
+
1341
+ if dwork[i+1] != 0:
1342
+ f = ctx.conj(rbeta[i])
1343
+ for j in xrange(i+1, n):
1344
+ V[i,j] = A[i,j] * f
1345
+ for j in xrange(i+1, n):
1346
+ s = 0
1347
+ for k in xrange(i+1, n):
1348
+ s += ctx.conj(A[i,k]) * V[j,k]
1349
+ for k in xrange(i+1, n):
1350
+ V[j,k] += s * V[i,k]
1351
+
1352
+ for j in xrange(i+1,n):
1353
+ V[j,i] = V[i,j] = 0
1354
+
1355
+ V[0,0] = 1
1356
+
1357
+ if m < n : minnm = m
1358
+ else : minnm = n
1359
+
1360
+ if calc_u:
1361
+ for i in xrange(minnm-1, -1, -1): # accumulation of left hand transformations
1362
+ g = S[i]
1363
+ for j in xrange(i+1, n):
1364
+ A[i,j] = 0
1365
+ if g != 0:
1366
+ g = 1 / g
1367
+ for j in xrange(i+1, n):
1368
+ s = 0
1369
+ for k in xrange(i+1, m):
1370
+ s += ctx.conj(A[k,i]) * A[k,j]
1371
+ f = s * ctx.conj(lbeta[i])
1372
+ for k in xrange(i, m):
1373
+ A[k,j] += f * A[k,i]
1374
+ for j in xrange(i, m):
1375
+ A[j,i] *= g
1376
+ else:
1377
+ for j in xrange(i, m):
1378
+ A[j,i] = 0
1379
+ A[i,i] += 1
1380
+
1381
+ for k in xrange(n-1, -1, -1):
1382
+ # diagonalization of the bidiagonal form:
1383
+ # loop over singular values, and over allowed itations
1384
+
1385
+ its = 0
1386
+ while 1:
1387
+ its += 1
1388
+ flag = True
1389
+
1390
+ for l in xrange(k, -1, -1):
1391
+ nm = l - 1
1392
+
1393
+ if ctx.fabs(dwork[l]) + anorm == anorm:
1394
+ flag = False
1395
+ break
1396
+
1397
+ if ctx.fabs(S[nm]) + anorm == anorm:
1398
+ break
1399
+
1400
+ if flag:
1401
+ c = 0
1402
+ s = 1
1403
+ for i in xrange(l, k+1):
1404
+ f = s * dwork[i]
1405
+ dwork[i] *= c
1406
+ if ctx.fabs(f) + anorm == anorm:
1407
+ break
1408
+ g = S[i]
1409
+ h = ctx.hypot(f, g)
1410
+ S[i] = h
1411
+ h = 1 / h
1412
+ c = g * h
1413
+ s = -f * h
1414
+
1415
+ if calc_u:
1416
+ for j in xrange(m):
1417
+ y = A[j,nm]
1418
+ z = A[j,i]
1419
+ A[j,nm]= y * c + z * s
1420
+ A[j,i] = z * c - y * s
1421
+
1422
+ z = S[k]
1423
+
1424
+ if l == k: # convergence
1425
+ if z < 0: # singular value is made nonnegative
1426
+ S[k] = -z
1427
+ if not isinstance(V, bool):
1428
+ for j in xrange(n):
1429
+ V[k,j] = -V[k,j]
1430
+ break
1431
+
1432
+ if its >= maxits:
1433
+ raise RuntimeError("svd: no convergence to an eigenvalue after %d iterations" % its)
1434
+
1435
+ x = S[l] # shift from bottom 2 by 2 minor
1436
+ nm = k-1
1437
+ y = S[nm]
1438
+ g = dwork[nm]
1439
+ h = dwork[k]
1440
+ f = ((y - z) * (y + z) + (g - h) * (g + h)) / (2 * h * y)
1441
+ g = ctx.hypot(f, 1)
1442
+ if f >=0: f = (( x - z) *( x + z) + h *((y / (f + g)) - h)) / x
1443
+ else: f = (( x - z) *( x + z) + h *((y / (f - g)) - h)) / x
1444
+
1445
+ c = s = 1 # next qt transformation
1446
+
1447
+ for j in xrange(l, nm + 1):
1448
+ g = dwork[j+1]
1449
+ y = S[j+1]
1450
+ h = s * g
1451
+ g = c * g
1452
+ z = ctx.hypot(f, h)
1453
+ dwork[j] = z
1454
+ c = f / z
1455
+ s = h / z
1456
+ f = x * c + g * s
1457
+ g = g * c - x * s
1458
+ h = y * s
1459
+ y *= c
1460
+ if not isinstance(V, bool):
1461
+ for jj in xrange(n):
1462
+ x = V[j ,jj]
1463
+ z = V[j+1,jj]
1464
+ V[j ,jj]= x * c + z * s
1465
+ V[j+1,jj ]= z * c - x * s
1466
+ z = ctx.hypot(f, h)
1467
+ S[j] = z
1468
+ if z != 0: # rotation can be arbitray if z=0
1469
+ z = 1 / z
1470
+ c = f * z
1471
+ s = h * z
1472
+ f = c * g + s * y
1473
+ x = c * y - s * g
1474
+ if calc_u:
1475
+ for jj in xrange(m):
1476
+ y = A[jj,j ]
1477
+ z = A[jj,j+1]
1478
+ A[jj,j ]= y * c + z * s
1479
+ A[jj,j+1 ]= z * c - y * s
1480
+
1481
+ dwork[l] = 0
1482
+ dwork[k] = f
1483
+ S[k] = x
1484
+
1485
+ ##########################
1486
+
1487
+ # Sort singular values into decreasing order (bubble-sort)
1488
+
1489
+ for i in xrange(n):
1490
+ imax = i
1491
+ s = ctx.fabs(S[i]) # s is the current maximal element
1492
+
1493
+ for j in xrange(i + 1, n):
1494
+ c = ctx.fabs(S[j])
1495
+ if c > s:
1496
+ s = c
1497
+ imax = j
1498
+
1499
+ if imax != i:
1500
+ # swap singular values
1501
+
1502
+ z = S[i]
1503
+ S[i] = S[imax]
1504
+ S[imax] = z
1505
+
1506
+ if calc_u:
1507
+ for j in xrange(m):
1508
+ z = A[j,i]
1509
+ A[j,i] = A[j,imax]
1510
+ A[j,imax] = z
1511
+
1512
+ if not isinstance(V, bool):
1513
+ for j in xrange(n):
1514
+ z = V[i,j]
1515
+ V[i,j] = V[imax,j]
1516
+ V[imax,j] = z
1517
+
1518
+ return S
1519
+
1520
+ ##################################################################################################
1521
+
1522
+ @defun
1523
+ def svd_r(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
1524
+ """
1525
+ This routine computes the singular value decomposition of a matrix A.
1526
+ Given A, two orthogonal matrices U and V are calculated such that
1527
+
1528
+ A = U S V and U' U = 1 and V V' = 1
1529
+
1530
+ where S is a suitable shaped matrix whose off-diagonal elements are zero.
1531
+ Here ' denotes the transpose. The diagonal elements of S are the singular
1532
+ values of A, i.e. the squareroots of the eigenvalues of A' A or A A'.
1533
+
1534
+ input:
1535
+ A : a real matrix of shape (m, n)
1536
+ full_matrices : if true, U and V are of shape (m, m) and (n, n).
1537
+ if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
1538
+ compute_uv : if true, U and V are calculated. if false, only S is calculated.
1539
+ overwrite_a : if true, allows modification of A which may improve
1540
+ performance. if false, A is not modified.
1541
+
1542
+ output:
1543
+ U : an orthogonal matrix: U' U = 1. if full_matrices is true, U is of
1544
+ shape (m, m). ortherwise it is of shape (m, min(m, n)).
1545
+
1546
+ S : an array of length min(m, n) containing the singular values of A sorted by
1547
+ decreasing magnitude.
1548
+
1549
+ V : an orthogonal matrix: V V' = 1. if full_matrices is true, V is of
1550
+ shape (n, n). ortherwise it is of shape (min(m, n), n).
1551
+
1552
+ return value:
1553
+
1554
+ S if compute_uv is false
1555
+ (U, S, V) if compute_uv is true
1556
+
1557
+ overview of the matrices:
1558
+
1559
+ full_matrices true:
1560
+ A : m*n
1561
+ U : m*m U' U = 1
1562
+ S as matrix : m*n
1563
+ V : n*n V V' = 1
1564
+
1565
+ full_matrices false:
1566
+ A : m*n
1567
+ U : m*min(n,m) U' U = 1
1568
+ S as matrix : min(m,n)*min(m,n)
1569
+ V : min(m,n)*n V V' = 1
1570
+
1571
+ examples:
1572
+
1573
+ >>> from mpmath import mp
1574
+ >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]])
1575
+ >>> S = mp.svd_r(A, compute_uv = False)
1576
+ >>> print(S)
1577
+ [6.0]
1578
+ [3.0]
1579
+ [1.0]
1580
+
1581
+ >>> U, S, V = mp.svd_r(A)
1582
+ >>> print(mp.chop(A - U * mp.diag(S) * V))
1583
+ [0.0 0.0 0.0]
1584
+ [0.0 0.0 0.0]
1585
+ [0.0 0.0 0.0]
1586
+
1587
+
1588
+ see also: svd, svd_c
1589
+ """
1590
+
1591
+ m, n = A.rows, A.cols
1592
+
1593
+ if not compute_uv:
1594
+ if not overwrite_a:
1595
+ A = A.copy()
1596
+ S = svd_r_raw(ctx, A, V = False, calc_u = False)
1597
+ S = S[:min(m,n)]
1598
+ return S
1599
+
1600
+ if full_matrices and n < m:
1601
+ V = ctx.zeros(m, m)
1602
+ A0 = ctx.zeros(m, m)
1603
+ A0[:,:n] = A
1604
+ S = svd_r_raw(ctx, A0, V, calc_u = True)
1605
+
1606
+ S = S[:n]
1607
+ V = V[:n,:n]
1608
+
1609
+ return (A0, S, V)
1610
+ else:
1611
+ if not overwrite_a:
1612
+ A = A.copy()
1613
+ V = ctx.zeros(n, n)
1614
+ S = svd_r_raw(ctx, A, V, calc_u = True)
1615
+
1616
+ if n > m:
1617
+ if full_matrices == False:
1618
+ V = V[:m,:]
1619
+
1620
+ S = S[:m]
1621
+ A = A[:,:m]
1622
+
1623
+ return (A, S, V)
1624
+
1625
+ ##############################
1626
+
1627
+ @defun
1628
+ def svd_c(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
1629
+ """
1630
+ This routine computes the singular value decomposition of a matrix A.
1631
+ Given A, two unitary matrices U and V are calculated such that
1632
+
1633
+ A = U S V and U' U = 1 and V V' = 1
1634
+
1635
+ where S is a suitable shaped matrix whose off-diagonal elements are zero.
1636
+ Here ' denotes the hermitian transpose (i.e. transposition and complex
1637
+ conjugation). The diagonal elements of S are the singular values of A,
1638
+ i.e. the squareroots of the eigenvalues of A' A or A A'.
1639
+
1640
+ input:
1641
+ A : a complex matrix of shape (m, n)
1642
+ full_matrices : if true, U and V are of shape (m, m) and (n, n).
1643
+ if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
1644
+ compute_uv : if true, U and V are calculated. if false, only S is calculated.
1645
+ overwrite_a : if true, allows modification of A which may improve
1646
+ performance. if false, A is not modified.
1647
+
1648
+ output:
1649
+ U : an unitary matrix: U' U = 1. if full_matrices is true, U is of
1650
+ shape (m, m). ortherwise it is of shape (m, min(m, n)).
1651
+
1652
+ S : an array of length min(m, n) containing the singular values of A sorted by
1653
+ decreasing magnitude.
1654
+
1655
+ V : an unitary matrix: V V' = 1. if full_matrices is true, V is of
1656
+ shape (n, n). ortherwise it is of shape (min(m, n), n).
1657
+
1658
+ return value:
1659
+
1660
+ S if compute_uv is false
1661
+ (U, S, V) if compute_uv is true
1662
+
1663
+ overview of the matrices:
1664
+
1665
+ full_matrices true:
1666
+ A : m*n
1667
+ U : m*m U' U = 1
1668
+ S as matrix : m*n
1669
+ V : n*n V V' = 1
1670
+
1671
+ full_matrices false:
1672
+ A : m*n
1673
+ U : m*min(n,m) U' U = 1
1674
+ S as matrix : min(m,n)*min(m,n)
1675
+ V : min(m,n)*n V V' = 1
1676
+
1677
+ example:
1678
+ >>> from mpmath import mp
1679
+ >>> A = mp.matrix([[-2j, -1-3j, -2+2j], [2-2j, -1-3j, 1], [-3+1j,-2j,0]])
1680
+ >>> S = mp.svd_c(A, compute_uv = False)
1681
+ >>> print(mp.chop(S - mp.matrix([mp.sqrt(34), mp.sqrt(15), mp.sqrt(6)])))
1682
+ [0.0]
1683
+ [0.0]
1684
+ [0.0]
1685
+
1686
+ >>> U, S, V = mp.svd_c(A)
1687
+ >>> print(mp.chop(A - U * mp.diag(S) * V))
1688
+ [0.0 0.0 0.0]
1689
+ [0.0 0.0 0.0]
1690
+ [0.0 0.0 0.0]
1691
+
1692
+ see also: svd, svd_r
1693
+ """
1694
+
1695
+ m, n = A.rows, A.cols
1696
+
1697
+ if not compute_uv:
1698
+ if not overwrite_a:
1699
+ A = A.copy()
1700
+ S = svd_c_raw(ctx, A, V = False, calc_u = False)
1701
+ S = S[:min(m,n)]
1702
+ return S
1703
+
1704
+ if full_matrices and n < m:
1705
+ V = ctx.zeros(m, m)
1706
+ A0 = ctx.zeros(m, m)
1707
+ A0[:,:n] = A
1708
+ S = svd_c_raw(ctx, A0, V, calc_u = True)
1709
+
1710
+ S = S[:n]
1711
+ V = V[:n,:n]
1712
+
1713
+ return (A0, S, V)
1714
+ else:
1715
+ if not overwrite_a:
1716
+ A = A.copy()
1717
+ V = ctx.zeros(n, n)
1718
+ S = svd_c_raw(ctx, A, V, calc_u = True)
1719
+
1720
+ if n > m:
1721
+ if full_matrices == False:
1722
+ V = V[:m,:]
1723
+
1724
+ S = S[:m]
1725
+ A = A[:,:m]
1726
+
1727
+ return (A, S, V)
1728
+
1729
+ @defun
1730
+ def svd(ctx, A, full_matrices = False, compute_uv = True, overwrite_a = False):
1731
+ """
1732
+ "svd" is a unified interface for "svd_r" and "svd_c". Depending on
1733
+ whether A is real or complex the appropriate function is called.
1734
+
1735
+ This routine computes the singular value decomposition of a matrix A.
1736
+ Given A, two orthogonal (A real) or unitary (A complex) matrices U and V
1737
+ are calculated such that
1738
+
1739
+ A = U S V and U' U = 1 and V V' = 1
1740
+
1741
+ where S is a suitable shaped matrix whose off-diagonal elements are zero.
1742
+ Here ' denotes the hermitian transpose (i.e. transposition and complex
1743
+ conjugation). The diagonal elements of S are the singular values of A,
1744
+ i.e. the squareroots of the eigenvalues of A' A or A A'.
1745
+
1746
+ input:
1747
+ A : a real or complex matrix of shape (m, n)
1748
+ full_matrices : if true, U and V are of shape (m, m) and (n, n).
1749
+ if false, U and V are of shape (m, min(m, n)) and (min(m, n), n).
1750
+ compute_uv : if true, U and V are calculated. if false, only S is calculated.
1751
+ overwrite_a : if true, allows modification of A which may improve
1752
+ performance. if false, A is not modified.
1753
+
1754
+ output:
1755
+ U : an orthogonal or unitary matrix: U' U = 1. if full_matrices is true, U is of
1756
+ shape (m, m). ortherwise it is of shape (m, min(m, n)).
1757
+
1758
+ S : an array of length min(m, n) containing the singular values of A sorted by
1759
+ decreasing magnitude.
1760
+
1761
+ V : an orthogonal or unitary matrix: V V' = 1. if full_matrices is true, V is of
1762
+ shape (n, n). ortherwise it is of shape (min(m, n), n).
1763
+
1764
+ return value:
1765
+
1766
+ S if compute_uv is false
1767
+ (U, S, V) if compute_uv is true
1768
+
1769
+ overview of the matrices:
1770
+
1771
+ full_matrices true:
1772
+ A : m*n
1773
+ U : m*m U' U = 1
1774
+ S as matrix : m*n
1775
+ V : n*n V V' = 1
1776
+
1777
+ full_matrices false:
1778
+ A : m*n
1779
+ U : m*min(n,m) U' U = 1
1780
+ S as matrix : min(m,n)*min(m,n)
1781
+ V : min(m,n)*n V V' = 1
1782
+
1783
+ examples:
1784
+
1785
+ >>> from mpmath import mp
1786
+ >>> A = mp.matrix([[2, -2, -1], [3, 4, -2], [-2, -2, 0]])
1787
+ >>> S = mp.svd(A, compute_uv = False)
1788
+ >>> print(S)
1789
+ [6.0]
1790
+ [3.0]
1791
+ [1.0]
1792
+
1793
+ >>> U, S, V = mp.svd(A)
1794
+ >>> print(mp.chop(A - U * mp.diag(S) * V))
1795
+ [0.0 0.0 0.0]
1796
+ [0.0 0.0 0.0]
1797
+ [0.0 0.0 0.0]
1798
+
1799
+ see also: svd_r, svd_c
1800
+ """
1801
+
1802
+ iscomplex = any(type(x) is ctx.mpc for x in A)
1803
+
1804
+ if iscomplex:
1805
+ return ctx.svd_c(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a)
1806
+ else:
1807
+ return ctx.svd_r(A, full_matrices = full_matrices, compute_uv = compute_uv, overwrite_a = overwrite_a)
.venv/lib/python3.11/site-packages/mpmath/matrices/linalg.py ADDED
@@ -0,0 +1,790 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Linear algebra
3
+ --------------
4
+
5
+ Linear equations
6
+ ................
7
+
8
+ Basic linear algebra is implemented; you can for example solve the linear
9
+ equation system::
10
+
11
+ x + 2*y = -10
12
+ 3*x + 4*y = 10
13
+
14
+ using ``lu_solve``::
15
+
16
+ >>> from mpmath import *
17
+ >>> mp.pretty = False
18
+ >>> A = matrix([[1, 2], [3, 4]])
19
+ >>> b = matrix([-10, 10])
20
+ >>> x = lu_solve(A, b)
21
+ >>> x
22
+ matrix(
23
+ [['30.0'],
24
+ ['-20.0']])
25
+
26
+ If you don't trust the result, use ``residual`` to calculate the residual ||A*x-b||::
27
+
28
+ >>> residual(A, x, b)
29
+ matrix(
30
+ [['3.46944695195361e-18'],
31
+ ['3.46944695195361e-18']])
32
+ >>> str(eps)
33
+ '2.22044604925031e-16'
34
+
35
+ As you can see, the solution is quite accurate. The error is caused by the
36
+ inaccuracy of the internal floating point arithmetic. Though, it's even smaller
37
+ than the current machine epsilon, which basically means you can trust the
38
+ result.
39
+
40
+ If you need more speed, use NumPy, or ``fp.lu_solve`` for a floating-point computation.
41
+
42
+ >>> fp.lu_solve(A, b) # doctest: +ELLIPSIS
43
+ matrix(...)
44
+
45
+ ``lu_solve`` accepts overdetermined systems. It is usually not possible to solve
46
+ such systems, so the residual is minimized instead. Internally this is done
47
+ using Cholesky decomposition to compute a least squares approximation. This means
48
+ that that ``lu_solve`` will square the errors. If you can't afford this, use
49
+ ``qr_solve`` instead. It is twice as slow but more accurate, and it calculates
50
+ the residual automatically.
51
+
52
+
53
+ Matrix factorization
54
+ ....................
55
+
56
+ The function ``lu`` computes an explicit LU factorization of a matrix::
57
+
58
+ >>> P, L, U = lu(matrix([[0,2,3],[4,5,6],[7,8,9]]))
59
+ >>> print(P)
60
+ [0.0 0.0 1.0]
61
+ [1.0 0.0 0.0]
62
+ [0.0 1.0 0.0]
63
+ >>> print(L)
64
+ [ 1.0 0.0 0.0]
65
+ [ 0.0 1.0 0.0]
66
+ [0.571428571428571 0.214285714285714 1.0]
67
+ >>> print(U)
68
+ [7.0 8.0 9.0]
69
+ [0.0 2.0 3.0]
70
+ [0.0 0.0 0.214285714285714]
71
+ >>> print(P.T*L*U)
72
+ [0.0 2.0 3.0]
73
+ [4.0 5.0 6.0]
74
+ [7.0 8.0 9.0]
75
+
76
+ Interval matrices
77
+ -----------------
78
+
79
+ Matrices may contain interval elements. This allows one to perform
80
+ basic linear algebra operations such as matrix multiplication
81
+ and equation solving with rigorous error bounds::
82
+
83
+ >>> a = iv.matrix([['0.1','0.3','1.0'],
84
+ ... ['7.1','5.5','4.8'],
85
+ ... ['3.2','4.4','5.6']])
86
+ >>>
87
+ >>> b = iv.matrix(['4','0.6','0.5'])
88
+ >>> c = iv.lu_solve(a, b)
89
+ >>> print(c)
90
+ [ [5.2582327113062568605927528666, 5.25823271130625686059275702219]]
91
+ [[-13.1550493962678375411635581388, -13.1550493962678375411635540152]]
92
+ [ [7.42069154774972557628979076189, 7.42069154774972557628979190734]]
93
+ >>> print(a*c)
94
+ [ [3.99999999999999999999999844904, 4.00000000000000000000000155096]]
95
+ [[0.599999999999999999999968898009, 0.600000000000000000000031763736]]
96
+ [[0.499999999999999999999979320485, 0.500000000000000000000020679515]]
97
+ """
98
+
99
+ # TODO:
100
+ # *implement high-level qr()
101
+ # *test unitvector
102
+ # *iterative solving
103
+
104
+ from copy import copy
105
+
106
+ from ..libmp.backend import xrange
107
+
108
+ class LinearAlgebraMethods(object):
109
+
110
+ def LU_decomp(ctx, A, overwrite=False, use_cache=True):
111
+ """
112
+ LU-factorization of a n*n matrix using the Gauss algorithm.
113
+ Returns L and U in one matrix and the pivot indices.
114
+
115
+ Use overwrite to specify whether A will be overwritten with L and U.
116
+ """
117
+ if not A.rows == A.cols:
118
+ raise ValueError('need n*n matrix')
119
+ # get from cache if possible
120
+ if use_cache and isinstance(A, ctx.matrix) and A._LU:
121
+ return A._LU
122
+ if not overwrite:
123
+ orig = A
124
+ A = A.copy()
125
+ tol = ctx.absmin(ctx.mnorm(A,1) * ctx.eps) # each pivot element has to be bigger
126
+ n = A.rows
127
+ p = [None]*(n - 1)
128
+ for j in xrange(n - 1):
129
+ # pivoting, choose max(abs(reciprocal row sum)*abs(pivot element))
130
+ biggest = 0
131
+ for k in xrange(j, n):
132
+ s = ctx.fsum([ctx.absmin(A[k,l]) for l in xrange(j, n)])
133
+ if ctx.absmin(s) <= tol:
134
+ raise ZeroDivisionError('matrix is numerically singular')
135
+ current = 1/s * ctx.absmin(A[k,j])
136
+ if current > biggest: # TODO: what if equal?
137
+ biggest = current
138
+ p[j] = k
139
+ # swap rows according to p
140
+ ctx.swap_row(A, j, p[j])
141
+ if ctx.absmin(A[j,j]) <= tol:
142
+ raise ZeroDivisionError('matrix is numerically singular')
143
+ # calculate elimination factors and add rows
144
+ for i in xrange(j + 1, n):
145
+ A[i,j] /= A[j,j]
146
+ for k in xrange(j + 1, n):
147
+ A[i,k] -= A[i,j]*A[j,k]
148
+ if ctx.absmin(A[n - 1,n - 1]) <= tol:
149
+ raise ZeroDivisionError('matrix is numerically singular')
150
+ # cache decomposition
151
+ if not overwrite and isinstance(orig, ctx.matrix):
152
+ orig._LU = (A, p)
153
+ return A, p
154
+
155
+ def L_solve(ctx, L, b, p=None):
156
+ """
157
+ Solve the lower part of a LU factorized matrix for y.
158
+ """
159
+ if L.rows != L.cols:
160
+ raise RuntimeError("need n*n matrix")
161
+ n = L.rows
162
+ if len(b) != n:
163
+ raise ValueError("Value should be equal to n")
164
+ b = copy(b)
165
+ if p: # swap b according to p
166
+ for k in xrange(0, len(p)):
167
+ ctx.swap_row(b, k, p[k])
168
+ # solve
169
+ for i in xrange(1, n):
170
+ for j in xrange(i):
171
+ b[i] -= L[i,j] * b[j]
172
+ return b
173
+
174
+ def U_solve(ctx, U, y):
175
+ """
176
+ Solve the upper part of a LU factorized matrix for x.
177
+ """
178
+ if U.rows != U.cols:
179
+ raise RuntimeError("need n*n matrix")
180
+ n = U.rows
181
+ if len(y) != n:
182
+ raise ValueError("Value should be equal to n")
183
+ x = copy(y)
184
+ for i in xrange(n - 1, -1, -1):
185
+ for j in xrange(i + 1, n):
186
+ x[i] -= U[i,j] * x[j]
187
+ x[i] /= U[i,i]
188
+ return x
189
+
190
+ def lu_solve(ctx, A, b, **kwargs):
191
+ """
192
+ Ax = b => x
193
+
194
+ Solve a determined or overdetermined linear equations system.
195
+ Fast LU decomposition is used, which is less accurate than QR decomposition
196
+ (especially for overdetermined systems), but it's twice as efficient.
197
+ Use qr_solve if you want more precision or have to solve a very ill-
198
+ conditioned system.
199
+
200
+ If you specify real=True, it does not check for overdeterminded complex
201
+ systems.
202
+ """
203
+ prec = ctx.prec
204
+ try:
205
+ ctx.prec += 10
206
+ # do not overwrite A nor b
207
+ A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
208
+ if A.rows < A.cols:
209
+ raise ValueError('cannot solve underdetermined system')
210
+ if A.rows > A.cols:
211
+ # use least-squares method if overdetermined
212
+ # (this increases errors)
213
+ AH = A.H
214
+ A = AH * A
215
+ b = AH * b
216
+ if (kwargs.get('real', False) or
217
+ not sum(type(i) is ctx.mpc for i in A)):
218
+ # TODO: necessary to check also b?
219
+ x = ctx.cholesky_solve(A, b)
220
+ else:
221
+ x = ctx.lu_solve(A, b)
222
+ else:
223
+ # LU factorization
224
+ A, p = ctx.LU_decomp(A)
225
+ b = ctx.L_solve(A, b, p)
226
+ x = ctx.U_solve(A, b)
227
+ finally:
228
+ ctx.prec = prec
229
+ return x
230
+
231
+ def improve_solution(ctx, A, x, b, maxsteps=1):
232
+ """
233
+ Improve a solution to a linear equation system iteratively.
234
+
235
+ This re-uses the LU decomposition and is thus cheap.
236
+ Usually 3 up to 4 iterations are giving the maximal improvement.
237
+ """
238
+ if A.rows != A.cols:
239
+ raise RuntimeError("need n*n matrix") # TODO: really?
240
+ for _ in xrange(maxsteps):
241
+ r = ctx.residual(A, x, b)
242
+ if ctx.norm(r, 2) < 10*ctx.eps:
243
+ break
244
+ # this uses cached LU decomposition and is thus cheap
245
+ dx = ctx.lu_solve(A, -r)
246
+ x += dx
247
+ return x
248
+
249
+ def lu(ctx, A):
250
+ """
251
+ A -> P, L, U
252
+
253
+ LU factorisation of a square matrix A. L is the lower, U the upper part.
254
+ P is the permutation matrix indicating the row swaps.
255
+
256
+ P*A = L*U
257
+
258
+ If you need efficiency, use the low-level method LU_decomp instead, it's
259
+ much more memory efficient.
260
+ """
261
+ # get factorization
262
+ A, p = ctx.LU_decomp(A)
263
+ n = A.rows
264
+ L = ctx.matrix(n)
265
+ U = ctx.matrix(n)
266
+ for i in xrange(n):
267
+ for j in xrange(n):
268
+ if i > j:
269
+ L[i,j] = A[i,j]
270
+ elif i == j:
271
+ L[i,j] = 1
272
+ U[i,j] = A[i,j]
273
+ else:
274
+ U[i,j] = A[i,j]
275
+ # calculate permutation matrix
276
+ P = ctx.eye(n)
277
+ for k in xrange(len(p)):
278
+ ctx.swap_row(P, k, p[k])
279
+ return P, L, U
280
+
281
+ def unitvector(ctx, n, i):
282
+ """
283
+ Return the i-th n-dimensional unit vector.
284
+ """
285
+ assert 0 < i <= n, 'this unit vector does not exist'
286
+ return [ctx.zero]*(i-1) + [ctx.one] + [ctx.zero]*(n-i)
287
+
288
+ def inverse(ctx, A, **kwargs):
289
+ """
290
+ Calculate the inverse of a matrix.
291
+
292
+ If you want to solve an equation system Ax = b, it's recommended to use
293
+ solve(A, b) instead, it's about 3 times more efficient.
294
+ """
295
+ prec = ctx.prec
296
+ try:
297
+ ctx.prec += 10
298
+ # do not overwrite A
299
+ A = ctx.matrix(A, **kwargs).copy()
300
+ n = A.rows
301
+ # get LU factorisation
302
+ A, p = ctx.LU_decomp(A)
303
+ cols = []
304
+ # calculate unit vectors and solve corresponding system to get columns
305
+ for i in xrange(1, n + 1):
306
+ e = ctx.unitvector(n, i)
307
+ y = ctx.L_solve(A, e, p)
308
+ cols.append(ctx.U_solve(A, y))
309
+ # convert columns to matrix
310
+ inv = []
311
+ for i in xrange(n):
312
+ row = []
313
+ for j in xrange(n):
314
+ row.append(cols[j][i])
315
+ inv.append(row)
316
+ result = ctx.matrix(inv, **kwargs)
317
+ finally:
318
+ ctx.prec = prec
319
+ return result
320
+
321
+ def householder(ctx, A):
322
+ """
323
+ (A|b) -> H, p, x, res
324
+
325
+ (A|b) is the coefficient matrix with left hand side of an optionally
326
+ overdetermined linear equation system.
327
+ H and p contain all information about the transformation matrices.
328
+ x is the solution, res the residual.
329
+ """
330
+ if not isinstance(A, ctx.matrix):
331
+ raise TypeError("A should be a type of ctx.matrix")
332
+ m = A.rows
333
+ n = A.cols
334
+ if m < n - 1:
335
+ raise RuntimeError("Columns should not be less than rows")
336
+ # calculate Householder matrix
337
+ p = []
338
+ for j in xrange(0, n - 1):
339
+ s = ctx.fsum(abs(A[i,j])**2 for i in xrange(j, m))
340
+ if not abs(s) > ctx.eps:
341
+ raise ValueError('matrix is numerically singular')
342
+ p.append(-ctx.sign(ctx.re(A[j,j])) * ctx.sqrt(s))
343
+ kappa = ctx.one / (s - p[j] * A[j,j])
344
+ A[j,j] -= p[j]
345
+ for k in xrange(j+1, n):
346
+ y = ctx.fsum(ctx.conj(A[i,j]) * A[i,k] for i in xrange(j, m)) * kappa
347
+ for i in xrange(j, m):
348
+ A[i,k] -= A[i,j] * y
349
+ # solve Rx = c1
350
+ x = [A[i,n - 1] for i in xrange(n - 1)]
351
+ for i in xrange(n - 2, -1, -1):
352
+ x[i] -= ctx.fsum(A[i,j] * x[j] for j in xrange(i + 1, n - 1))
353
+ x[i] /= p[i]
354
+ # calculate residual
355
+ if not m == n - 1:
356
+ r = [A[m-1-i, n-1] for i in xrange(m - n + 1)]
357
+ else:
358
+ # determined system, residual should be 0
359
+ r = [0]*m # maybe a bad idea, changing r[i] will change all elements
360
+ return A, p, x, r
361
+
362
+ #def qr(ctx, A):
363
+ # """
364
+ # A -> Q, R
365
+ #
366
+ # QR factorisation of a square matrix A using Householder decomposition.
367
+ # Q is orthogonal, this leads to very few numerical errors.
368
+ #
369
+ # A = Q*R
370
+ # """
371
+ # H, p, x, res = householder(A)
372
+ # TODO: implement this
373
+
374
+ def residual(ctx, A, x, b, **kwargs):
375
+ """
376
+ Calculate the residual of a solution to a linear equation system.
377
+
378
+ r = A*x - b for A*x = b
379
+ """
380
+ oldprec = ctx.prec
381
+ try:
382
+ ctx.prec *= 2
383
+ A, x, b = ctx.matrix(A, **kwargs), ctx.matrix(x, **kwargs), ctx.matrix(b, **kwargs)
384
+ return A*x - b
385
+ finally:
386
+ ctx.prec = oldprec
387
+
388
+ def qr_solve(ctx, A, b, norm=None, **kwargs):
389
+ """
390
+ Ax = b => x, ||Ax - b||
391
+
392
+ Solve a determined or overdetermined linear equations system and
393
+ calculate the norm of the residual (error).
394
+ QR decomposition using Householder factorization is applied, which gives very
395
+ accurate results even for ill-conditioned matrices. qr_solve is twice as
396
+ efficient.
397
+ """
398
+ if norm is None:
399
+ norm = ctx.norm
400
+ prec = ctx.prec
401
+ try:
402
+ ctx.prec += 10
403
+ # do not overwrite A nor b
404
+ A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
405
+ if A.rows < A.cols:
406
+ raise ValueError('cannot solve underdetermined system')
407
+ H, p, x, r = ctx.householder(ctx.extend(A, b))
408
+ res = ctx.norm(r)
409
+ # calculate residual "manually" for determined systems
410
+ if res == 0:
411
+ res = ctx.norm(ctx.residual(A, x, b))
412
+ return ctx.matrix(x, **kwargs), res
413
+ finally:
414
+ ctx.prec = prec
415
+
416
+ def cholesky(ctx, A, tol=None):
417
+ r"""
418
+ Cholesky decomposition of a symmetric positive-definite matrix `A`.
419
+ Returns a lower triangular matrix `L` such that `A = L \times L^T`.
420
+ More generally, for a complex Hermitian positive-definite matrix,
421
+ a Cholesky decomposition satisfying `A = L \times L^H` is returned.
422
+
423
+ The Cholesky decomposition can be used to solve linear equation
424
+ systems twice as efficiently as LU decomposition, or to
425
+ test whether `A` is positive-definite.
426
+
427
+ The optional parameter ``tol`` determines the tolerance for
428
+ verifying positive-definiteness.
429
+
430
+ **Examples**
431
+
432
+ Cholesky decomposition of a positive-definite symmetric matrix::
433
+
434
+ >>> from mpmath import *
435
+ >>> mp.dps = 25; mp.pretty = True
436
+ >>> A = eye(3) + hilbert(3)
437
+ >>> nprint(A)
438
+ [ 2.0 0.5 0.333333]
439
+ [ 0.5 1.33333 0.25]
440
+ [0.333333 0.25 1.2]
441
+ >>> L = cholesky(A)
442
+ >>> nprint(L)
443
+ [ 1.41421 0.0 0.0]
444
+ [0.353553 1.09924 0.0]
445
+ [0.235702 0.15162 1.05899]
446
+ >>> chop(A - L*L.T)
447
+ [0.0 0.0 0.0]
448
+ [0.0 0.0 0.0]
449
+ [0.0 0.0 0.0]
450
+
451
+ Cholesky decomposition of a Hermitian matrix::
452
+
453
+ >>> A = eye(3) + matrix([[0,0.25j,-0.5j],[-0.25j,0,0],[0.5j,0,0]])
454
+ >>> L = cholesky(A)
455
+ >>> nprint(L)
456
+ [ 1.0 0.0 0.0]
457
+ [(0.0 - 0.25j) (0.968246 + 0.0j) 0.0]
458
+ [ (0.0 + 0.5j) (0.129099 + 0.0j) (0.856349 + 0.0j)]
459
+ >>> chop(A - L*L.H)
460
+ [0.0 0.0 0.0]
461
+ [0.0 0.0 0.0]
462
+ [0.0 0.0 0.0]
463
+
464
+ Attempted Cholesky decomposition of a matrix that is not positive
465
+ definite::
466
+
467
+ >>> A = -eye(3) + hilbert(3)
468
+ >>> L = cholesky(A)
469
+ Traceback (most recent call last):
470
+ ...
471
+ ValueError: matrix is not positive-definite
472
+
473
+ **References**
474
+
475
+ 1. [Wikipedia]_ http://en.wikipedia.org/wiki/Cholesky_decomposition
476
+
477
+ """
478
+ if not isinstance(A, ctx.matrix):
479
+ raise RuntimeError("A should be a type of ctx.matrix")
480
+ if not A.rows == A.cols:
481
+ raise ValueError('need n*n matrix')
482
+ if tol is None:
483
+ tol = +ctx.eps
484
+ n = A.rows
485
+ L = ctx.matrix(n)
486
+ for j in xrange(n):
487
+ c = ctx.re(A[j,j])
488
+ if abs(c-A[j,j]) > tol:
489
+ raise ValueError('matrix is not Hermitian')
490
+ s = c - ctx.fsum((L[j,k] for k in xrange(j)),
491
+ absolute=True, squared=True)
492
+ if s < tol:
493
+ raise ValueError('matrix is not positive-definite')
494
+ L[j,j] = ctx.sqrt(s)
495
+ for i in xrange(j, n):
496
+ it1 = (L[i,k] for k in xrange(j))
497
+ it2 = (L[j,k] for k in xrange(j))
498
+ t = ctx.fdot(it1, it2, conjugate=True)
499
+ L[i,j] = (A[i,j] - t) / L[j,j]
500
+ return L
501
+
502
+ def cholesky_solve(ctx, A, b, **kwargs):
503
+ """
504
+ Ax = b => x
505
+
506
+ Solve a symmetric positive-definite linear equation system.
507
+ This is twice as efficient as lu_solve.
508
+
509
+ Typical use cases:
510
+ * A.T*A
511
+ * Hessian matrix
512
+ * differential equations
513
+ """
514
+ prec = ctx.prec
515
+ try:
516
+ ctx.prec += 10
517
+ # do not overwrite A nor b
518
+ A, b = ctx.matrix(A, **kwargs).copy(), ctx.matrix(b, **kwargs).copy()
519
+ if A.rows != A.cols:
520
+ raise ValueError('can only solve determined system')
521
+ # Cholesky factorization
522
+ L = ctx.cholesky(A)
523
+ # solve
524
+ n = L.rows
525
+ if len(b) != n:
526
+ raise ValueError("Value should be equal to n")
527
+ for i in xrange(n):
528
+ b[i] -= ctx.fsum(L[i,j] * b[j] for j in xrange(i))
529
+ b[i] /= L[i,i]
530
+ x = ctx.U_solve(L.T, b)
531
+ return x
532
+ finally:
533
+ ctx.prec = prec
534
+
535
+ def det(ctx, A):
536
+ """
537
+ Calculate the determinant of a matrix.
538
+ """
539
+ prec = ctx.prec
540
+ try:
541
+ # do not overwrite A
542
+ A = ctx.matrix(A).copy()
543
+ # use LU factorization to calculate determinant
544
+ try:
545
+ R, p = ctx.LU_decomp(A)
546
+ except ZeroDivisionError:
547
+ return 0
548
+ z = 1
549
+ for i, e in enumerate(p):
550
+ if i != e:
551
+ z *= -1
552
+ for i in xrange(A.rows):
553
+ z *= R[i,i]
554
+ return z
555
+ finally:
556
+ ctx.prec = prec
557
+
558
+ def cond(ctx, A, norm=None):
559
+ """
560
+ Calculate the condition number of a matrix using a specified matrix norm.
561
+
562
+ The condition number estimates the sensitivity of a matrix to errors.
563
+ Example: small input errors for ill-conditioned coefficient matrices
564
+ alter the solution of the system dramatically.
565
+
566
+ For ill-conditioned matrices it's recommended to use qr_solve() instead
567
+ of lu_solve(). This does not help with input errors however, it just avoids
568
+ to add additional errors.
569
+
570
+ Definition: cond(A) = ||A|| * ||A**-1||
571
+ """
572
+ if norm is None:
573
+ norm = lambda x: ctx.mnorm(x,1)
574
+ return norm(A) * norm(ctx.inverse(A))
575
+
576
+ def lu_solve_mat(ctx, a, b):
577
+ """Solve a * x = b where a and b are matrices."""
578
+ r = ctx.matrix(a.rows, b.cols)
579
+ for i in range(b.cols):
580
+ c = ctx.lu_solve(a, b.column(i))
581
+ for j in range(len(c)):
582
+ r[j, i] = c[j]
583
+ return r
584
+
585
+ def qr(ctx, A, mode = 'full', edps = 10):
586
+ """
587
+ Compute a QR factorization $A = QR$ where
588
+ A is an m x n matrix of real or complex numbers where m >= n
589
+
590
+ mode has following meanings:
591
+ (1) mode = 'raw' returns two matrixes (A, tau) in the
592
+ internal format used by LAPACK
593
+ (2) mode = 'skinny' returns the leading n columns of Q
594
+ and n rows of R
595
+ (3) Any other value returns the leading m columns of Q
596
+ and m rows of R
597
+
598
+ edps is the increase in mp precision used for calculations
599
+
600
+ **Examples**
601
+
602
+ >>> from mpmath import *
603
+ >>> mp.dps = 15
604
+ >>> mp.pretty = True
605
+ >>> A = matrix([[1, 2], [3, 4], [1, 1]])
606
+ >>> Q, R = qr(A)
607
+ >>> Q
608
+ [-0.301511344577764 0.861640436855329 0.408248290463863]
609
+ [-0.904534033733291 -0.123091490979333 -0.408248290463863]
610
+ [-0.301511344577764 -0.492365963917331 0.816496580927726]
611
+ >>> R
612
+ [-3.3166247903554 -4.52267016866645]
613
+ [ 0.0 0.738548945875996]
614
+ [ 0.0 0.0]
615
+ >>> Q * R
616
+ [1.0 2.0]
617
+ [3.0 4.0]
618
+ [1.0 1.0]
619
+ >>> chop(Q.T * Q)
620
+ [1.0 0.0 0.0]
621
+ [0.0 1.0 0.0]
622
+ [0.0 0.0 1.0]
623
+ >>> B = matrix([[1+0j, 2-3j], [3+j, 4+5j]])
624
+ >>> Q, R = qr(B)
625
+ >>> nprint(Q)
626
+ [ (-0.301511 + 0.0j) (0.0695795 - 0.95092j)]
627
+ [(-0.904534 - 0.301511j) (-0.115966 + 0.278318j)]
628
+ >>> nprint(R)
629
+ [(-3.31662 + 0.0j) (-5.72872 - 2.41209j)]
630
+ [ 0.0 (3.91965 + 0.0j)]
631
+ >>> Q * R
632
+ [(1.0 + 0.0j) (2.0 - 3.0j)]
633
+ [(3.0 + 1.0j) (4.0 + 5.0j)]
634
+ >>> chop(Q.T * Q.conjugate())
635
+ [1.0 0.0]
636
+ [0.0 1.0]
637
+
638
+ """
639
+
640
+ # check values before continuing
641
+ assert isinstance(A, ctx.matrix)
642
+ m = A.rows
643
+ n = A.cols
644
+ assert n >= 0
645
+ assert m >= n
646
+ assert edps >= 0
647
+
648
+ # check for complex data type
649
+ cmplx = any(type(x) is ctx.mpc for x in A)
650
+
651
+ # temporarily increase the precision and initialize
652
+ with ctx.extradps(edps):
653
+ tau = ctx.matrix(n,1)
654
+ A = A.copy()
655
+
656
+ # ---------------
657
+ # FACTOR MATRIX A
658
+ # ---------------
659
+ if cmplx:
660
+ one = ctx.mpc('1.0', '0.0')
661
+ zero = ctx.mpc('0.0', '0.0')
662
+ rzero = ctx.mpf('0.0')
663
+
664
+ # main loop to factor A (complex)
665
+ for j in xrange(0, n):
666
+ alpha = A[j,j]
667
+ alphr = ctx.re(alpha)
668
+ alphi = ctx.im(alpha)
669
+
670
+ if (m-j) >= 2:
671
+ xnorm = ctx.fsum( A[i,j]*ctx.conj(A[i,j]) for i in xrange(j+1, m) )
672
+ xnorm = ctx.re( ctx.sqrt(xnorm) )
673
+ else:
674
+ xnorm = rzero
675
+
676
+ if (xnorm == rzero) and (alphi == rzero):
677
+ tau[j] = zero
678
+ continue
679
+
680
+ if alphr < rzero:
681
+ beta = ctx.sqrt(alphr**2 + alphi**2 + xnorm**2)
682
+ else:
683
+ beta = -ctx.sqrt(alphr**2 + alphi**2 + xnorm**2)
684
+
685
+ tau[j] = ctx.mpc( (beta - alphr) / beta, -alphi / beta )
686
+ t = -ctx.conj(tau[j])
687
+ za = one / (alpha - beta)
688
+
689
+ for i in xrange(j+1, m):
690
+ A[i,j] *= za
691
+
692
+ A[j,j] = one
693
+ for k in xrange(j+1, n):
694
+ y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j, m))
695
+ temp = t * ctx.conj(y)
696
+ for i in xrange(j, m):
697
+ A[i,k] += A[i,j] * temp
698
+
699
+ A[j,j] = ctx.mpc(beta, '0.0')
700
+ else:
701
+ one = ctx.mpf('1.0')
702
+ zero = ctx.mpf('0.0')
703
+
704
+ # main loop to factor A (real)
705
+ for j in xrange(0, n):
706
+ alpha = A[j,j]
707
+
708
+ if (m-j) > 2:
709
+ xnorm = ctx.fsum( (A[i,j])**2 for i in xrange(j+1, m) )
710
+ xnorm = ctx.sqrt(xnorm)
711
+ elif (m-j) == 2:
712
+ xnorm = abs( A[m-1,j] )
713
+ else:
714
+ xnorm = zero
715
+
716
+ if xnorm == zero:
717
+ tau[j] = zero
718
+ continue
719
+
720
+ if alpha < zero:
721
+ beta = ctx.sqrt(alpha**2 + xnorm**2)
722
+ else:
723
+ beta = -ctx.sqrt(alpha**2 + xnorm**2)
724
+
725
+ tau[j] = (beta - alpha) / beta
726
+ t = -tau[j]
727
+ da = one / (alpha - beta)
728
+
729
+ for i in xrange(j+1, m):
730
+ A[i,j] *= da
731
+
732
+ A[j,j] = one
733
+ for k in xrange(j+1, n):
734
+ y = ctx.fsum( A[i,j] * A[i,k] for i in xrange(j, m) )
735
+ temp = t * y
736
+ for i in xrange(j,m):
737
+ A[i,k] += A[i,j] * temp
738
+
739
+ A[j,j] = beta
740
+
741
+ # return factorization in same internal format as LAPACK
742
+ if (mode == 'raw') or (mode == 'RAW'):
743
+ return A, tau
744
+
745
+ # ----------------------------------
746
+ # FORM Q USING BACKWARD ACCUMULATION
747
+ # ----------------------------------
748
+
749
+ # form R before the values are overwritten
750
+ R = A.copy()
751
+ for j in xrange(0, n):
752
+ for i in xrange(j+1, m):
753
+ R[i,j] = zero
754
+
755
+ # set the value of p (number of columns of Q to return)
756
+ p = m
757
+ if (mode == 'skinny') or (mode == 'SKINNY'):
758
+ p = n
759
+
760
+ # add columns to A if needed and initialize
761
+ A.cols += (p-n)
762
+ for j in xrange(0, p):
763
+ A[j,j] = one
764
+ for i in xrange(0, j):
765
+ A[i,j] = zero
766
+
767
+ # main loop to form Q
768
+ for j in xrange(n-1, -1, -1):
769
+ t = -tau[j]
770
+ A[j,j] += t
771
+
772
+ for k in xrange(j+1, p):
773
+ if cmplx:
774
+ y = ctx.fsum(A[i,j] * ctx.conj(A[i,k]) for i in xrange(j+1, m))
775
+ temp = t * ctx.conj(y)
776
+ else:
777
+ y = ctx.fsum(A[i,j] * A[i,k] for i in xrange(j+1, m))
778
+ temp = t * y
779
+ A[j,k] = temp
780
+ for i in xrange(j+1, m):
781
+ A[i,k] += A[i,j] * temp
782
+
783
+ for i in xrange(j+1, m):
784
+ A[i, j] *= t
785
+
786
+ return A, R[0:p,0:n]
787
+
788
+ # ------------------
789
+ # END OF FUNCTION QR
790
+ # ------------------
.venv/lib/python3.11/site-packages/mpmath/matrices/matrices.py ADDED
@@ -0,0 +1,1005 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from ..libmp.backend import xrange
2
+ import warnings
3
+
4
+ # TODO: interpret list as vectors (for multiplication)
5
+
6
+ rowsep = '\n'
7
+ colsep = ' '
8
+
9
+ class _matrix(object):
10
+ """
11
+ Numerical matrix.
12
+
13
+ Specify the dimensions or the data as a nested list.
14
+ Elements default to zero.
15
+ Use a flat list to create a column vector easily.
16
+
17
+ The datatype of the context (mpf for mp, mpi for iv, and float for fp) is used to store the data.
18
+
19
+ Creating matrices
20
+ -----------------
21
+
22
+ Matrices in mpmath are implemented using dictionaries. Only non-zero values
23
+ are stored, so it is cheap to represent sparse matrices.
24
+
25
+ The most basic way to create one is to use the ``matrix`` class directly.
26
+ You can create an empty matrix specifying the dimensions:
27
+
28
+ >>> from mpmath import *
29
+ >>> mp.dps = 15
30
+ >>> matrix(2)
31
+ matrix(
32
+ [['0.0', '0.0'],
33
+ ['0.0', '0.0']])
34
+ >>> matrix(2, 3)
35
+ matrix(
36
+ [['0.0', '0.0', '0.0'],
37
+ ['0.0', '0.0', '0.0']])
38
+
39
+ Calling ``matrix`` with one dimension will create a square matrix.
40
+
41
+ To access the dimensions of a matrix, use the ``rows`` or ``cols`` keyword:
42
+
43
+ >>> A = matrix(3, 2)
44
+ >>> A
45
+ matrix(
46
+ [['0.0', '0.0'],
47
+ ['0.0', '0.0'],
48
+ ['0.0', '0.0']])
49
+ >>> A.rows
50
+ 3
51
+ >>> A.cols
52
+ 2
53
+
54
+ You can also change the dimension of an existing matrix. This will set the
55
+ new elements to 0. If the new dimension is smaller than before, the
56
+ concerning elements are discarded:
57
+
58
+ >>> A.rows = 2
59
+ >>> A
60
+ matrix(
61
+ [['0.0', '0.0'],
62
+ ['0.0', '0.0']])
63
+
64
+ Internally ``mpmathify`` is used every time an element is set. This
65
+ is done using the syntax A[row,column], counting from 0:
66
+
67
+ >>> A = matrix(2)
68
+ >>> A[1,1] = 1 + 1j
69
+ >>> A
70
+ matrix(
71
+ [['0.0', '0.0'],
72
+ ['0.0', mpc(real='1.0', imag='1.0')]])
73
+
74
+ A more comfortable way to create a matrix lets you use nested lists:
75
+
76
+ >>> matrix([[1, 2], [3, 4]])
77
+ matrix(
78
+ [['1.0', '2.0'],
79
+ ['3.0', '4.0']])
80
+
81
+ Convenient advanced functions are available for creating various standard
82
+ matrices, see ``zeros``, ``ones``, ``diag``, ``eye``, ``randmatrix`` and
83
+ ``hilbert``.
84
+
85
+ Vectors
86
+ .......
87
+
88
+ Vectors may also be represented by the ``matrix`` class (with rows = 1 or cols = 1).
89
+ For vectors there are some things which make life easier. A column vector can
90
+ be created using a flat list, a row vectors using an almost flat nested list::
91
+
92
+ >>> matrix([1, 2, 3])
93
+ matrix(
94
+ [['1.0'],
95
+ ['2.0'],
96
+ ['3.0']])
97
+ >>> matrix([[1, 2, 3]])
98
+ matrix(
99
+ [['1.0', '2.0', '3.0']])
100
+
101
+ Optionally vectors can be accessed like lists, using only a single index::
102
+
103
+ >>> x = matrix([1, 2, 3])
104
+ >>> x[1]
105
+ mpf('2.0')
106
+ >>> x[1,0]
107
+ mpf('2.0')
108
+
109
+ Other
110
+ .....
111
+
112
+ Like you probably expected, matrices can be printed::
113
+
114
+ >>> print randmatrix(3) # doctest:+SKIP
115
+ [ 0.782963853573023 0.802057689719883 0.427895717335467]
116
+ [0.0541876859348597 0.708243266653103 0.615134039977379]
117
+ [ 0.856151514955773 0.544759264818486 0.686210904770947]
118
+
119
+ Use ``nstr`` or ``nprint`` to specify the number of digits to print::
120
+
121
+ >>> nprint(randmatrix(5), 3) # doctest:+SKIP
122
+ [2.07e-1 1.66e-1 5.06e-1 1.89e-1 8.29e-1]
123
+ [6.62e-1 6.55e-1 4.47e-1 4.82e-1 2.06e-2]
124
+ [4.33e-1 7.75e-1 6.93e-2 2.86e-1 5.71e-1]
125
+ [1.01e-1 2.53e-1 6.13e-1 3.32e-1 2.59e-1]
126
+ [1.56e-1 7.27e-2 6.05e-1 6.67e-2 2.79e-1]
127
+
128
+ As matrices are mutable, you will need to copy them sometimes::
129
+
130
+ >>> A = matrix(2)
131
+ >>> A
132
+ matrix(
133
+ [['0.0', '0.0'],
134
+ ['0.0', '0.0']])
135
+ >>> B = A.copy()
136
+ >>> B[0,0] = 1
137
+ >>> B
138
+ matrix(
139
+ [['1.0', '0.0'],
140
+ ['0.0', '0.0']])
141
+ >>> A
142
+ matrix(
143
+ [['0.0', '0.0'],
144
+ ['0.0', '0.0']])
145
+
146
+ Finally, it is possible to convert a matrix to a nested list. This is very useful,
147
+ as most Python libraries involving matrices or arrays (namely NumPy or SymPy)
148
+ support this format::
149
+
150
+ >>> B.tolist()
151
+ [[mpf('1.0'), mpf('0.0')], [mpf('0.0'), mpf('0.0')]]
152
+
153
+
154
+ Matrix operations
155
+ -----------------
156
+
157
+ You can add and subtract matrices of compatible dimensions::
158
+
159
+ >>> A = matrix([[1, 2], [3, 4]])
160
+ >>> B = matrix([[-2, 4], [5, 9]])
161
+ >>> A + B
162
+ matrix(
163
+ [['-1.0', '6.0'],
164
+ ['8.0', '13.0']])
165
+ >>> A - B
166
+ matrix(
167
+ [['3.0', '-2.0'],
168
+ ['-2.0', '-5.0']])
169
+ >>> A + ones(3) # doctest:+ELLIPSIS
170
+ Traceback (most recent call last):
171
+ ...
172
+ ValueError: incompatible dimensions for addition
173
+
174
+ It is possible to multiply or add matrices and scalars. In the latter case the
175
+ operation will be done element-wise::
176
+
177
+ >>> A * 2
178
+ matrix(
179
+ [['2.0', '4.0'],
180
+ ['6.0', '8.0']])
181
+ >>> A / 4
182
+ matrix(
183
+ [['0.25', '0.5'],
184
+ ['0.75', '1.0']])
185
+ >>> A - 1
186
+ matrix(
187
+ [['0.0', '1.0'],
188
+ ['2.0', '3.0']])
189
+
190
+ Of course you can perform matrix multiplication, if the dimensions are
191
+ compatible, using ``@`` (for Python >= 3.5) or ``*``. For clarity, ``@`` is
192
+ recommended (`PEP 465 <https://www.python.org/dev/peps/pep-0465/>`), because
193
+ the meaning of ``*`` is different in many other Python libraries such as NumPy.
194
+
195
+ >>> A @ B # doctest:+SKIP
196
+ matrix(
197
+ [['8.0', '22.0'],
198
+ ['14.0', '48.0']])
199
+ >>> A * B # same as A @ B
200
+ matrix(
201
+ [['8.0', '22.0'],
202
+ ['14.0', '48.0']])
203
+ >>> matrix([[1, 2, 3]]) * matrix([[-6], [7], [-2]])
204
+ matrix(
205
+ [['2.0']])
206
+
207
+ ..
208
+ COMMENT: TODO: the above "doctest:+SKIP" may be removed as soon as we
209
+ have dropped support for Python 3.5 and below.
210
+
211
+ You can raise powers of square matrices::
212
+
213
+ >>> A**2
214
+ matrix(
215
+ [['7.0', '10.0'],
216
+ ['15.0', '22.0']])
217
+
218
+ Negative powers will calculate the inverse::
219
+
220
+ >>> A**-1
221
+ matrix(
222
+ [['-2.0', '1.0'],
223
+ ['1.5', '-0.5']])
224
+ >>> A * A**-1
225
+ matrix(
226
+ [['1.0', '1.0842021724855e-19'],
227
+ ['-2.16840434497101e-19', '1.0']])
228
+
229
+
230
+
231
+ Matrix transposition is straightforward::
232
+
233
+ >>> A = ones(2, 3)
234
+ >>> A
235
+ matrix(
236
+ [['1.0', '1.0', '1.0'],
237
+ ['1.0', '1.0', '1.0']])
238
+ >>> A.T
239
+ matrix(
240
+ [['1.0', '1.0'],
241
+ ['1.0', '1.0'],
242
+ ['1.0', '1.0']])
243
+
244
+ Norms
245
+ .....
246
+
247
+ Sometimes you need to know how "large" a matrix or vector is. Due to their
248
+ multidimensional nature it's not possible to compare them, but there are
249
+ several functions to map a matrix or a vector to a positive real number, the
250
+ so called norms.
251
+
252
+ For vectors the p-norm is intended, usually the 1-, the 2- and the oo-norm are
253
+ used.
254
+
255
+ >>> x = matrix([-10, 2, 100])
256
+ >>> norm(x, 1)
257
+ mpf('112.0')
258
+ >>> norm(x, 2)
259
+ mpf('100.5186549850325')
260
+ >>> norm(x, inf)
261
+ mpf('100.0')
262
+
263
+ Please note that the 2-norm is the most used one, though it is more expensive
264
+ to calculate than the 1- or oo-norm.
265
+
266
+ It is possible to generalize some vector norms to matrix norm::
267
+
268
+ >>> A = matrix([[1, -1000], [100, 50]])
269
+ >>> mnorm(A, 1)
270
+ mpf('1050.0')
271
+ >>> mnorm(A, inf)
272
+ mpf('1001.0')
273
+ >>> mnorm(A, 'F')
274
+ mpf('1006.2310867787777')
275
+
276
+ The last norm (the "Frobenius-norm") is an approximation for the 2-norm, which
277
+ is hard to calculate and not available. The Frobenius-norm lacks some
278
+ mathematical properties you might expect from a norm.
279
+ """
280
+
281
+ def __init__(self, *args, **kwargs):
282
+ self.__data = {}
283
+ # LU decompostion cache, this is useful when solving the same system
284
+ # multiple times, when calculating the inverse and when calculating the
285
+ # determinant
286
+ self._LU = None
287
+ if "force_type" in kwargs:
288
+ warnings.warn("The force_type argument was removed, it did not work"
289
+ " properly anyway. If you want to force floating-point or"
290
+ " interval computations, use the respective methods from `fp`"
291
+ " or `mp` instead, e.g., `fp.matrix()` or `iv.matrix()`."
292
+ " If you want to truncate values to integer, use .apply(int) instead.")
293
+ if isinstance(args[0], (list, tuple)):
294
+ if isinstance(args[0][0], (list, tuple)):
295
+ # interpret nested list as matrix
296
+ A = args[0]
297
+ self.__rows = len(A)
298
+ self.__cols = len(A[0])
299
+ for i, row in enumerate(A):
300
+ for j, a in enumerate(row):
301
+ # note: this will call __setitem__ which will call self.ctx.convert() to convert the datatype.
302
+ self[i, j] = a
303
+ else:
304
+ # interpret list as row vector
305
+ v = args[0]
306
+ self.__rows = len(v)
307
+ self.__cols = 1
308
+ for i, e in enumerate(v):
309
+ self[i, 0] = e
310
+ elif isinstance(args[0], int):
311
+ # create empty matrix of given dimensions
312
+ if len(args) == 1:
313
+ self.__rows = self.__cols = args[0]
314
+ else:
315
+ if not isinstance(args[1], int):
316
+ raise TypeError("expected int")
317
+ self.__rows = args[0]
318
+ self.__cols = args[1]
319
+ elif isinstance(args[0], _matrix):
320
+ A = args[0]
321
+ self.__rows = A._matrix__rows
322
+ self.__cols = A._matrix__cols
323
+ for i in xrange(A.__rows):
324
+ for j in xrange(A.__cols):
325
+ self[i, j] = A[i, j]
326
+ elif hasattr(args[0], 'tolist'):
327
+ A = self.ctx.matrix(args[0].tolist())
328
+ self.__data = A._matrix__data
329
+ self.__rows = A._matrix__rows
330
+ self.__cols = A._matrix__cols
331
+ else:
332
+ raise TypeError('could not interpret given arguments')
333
+
334
+ def apply(self, f):
335
+ """
336
+ Return a copy of self with the function `f` applied elementwise.
337
+ """
338
+ new = self.ctx.matrix(self.__rows, self.__cols)
339
+ for i in xrange(self.__rows):
340
+ for j in xrange(self.__cols):
341
+ new[i,j] = f(self[i,j])
342
+ return new
343
+
344
+ def __nstr__(self, n=None, **kwargs):
345
+ # Build table of string representations of the elements
346
+ res = []
347
+ # Track per-column max lengths for pretty alignment
348
+ maxlen = [0] * self.cols
349
+ for i in range(self.rows):
350
+ res.append([])
351
+ for j in range(self.cols):
352
+ if n:
353
+ string = self.ctx.nstr(self[i,j], n, **kwargs)
354
+ else:
355
+ string = str(self[i,j])
356
+ res[-1].append(string)
357
+ maxlen[j] = max(len(string), maxlen[j])
358
+ # Patch strings together
359
+ for i, row in enumerate(res):
360
+ for j, elem in enumerate(row):
361
+ # Pad each element up to maxlen so the columns line up
362
+ row[j] = elem.rjust(maxlen[j])
363
+ res[i] = "[" + colsep.join(row) + "]"
364
+ return rowsep.join(res)
365
+
366
+ def __str__(self):
367
+ return self.__nstr__()
368
+
369
+ def _toliststr(self, avoid_type=False):
370
+ """
371
+ Create a list string from a matrix.
372
+
373
+ If avoid_type: avoid multiple 'mpf's.
374
+ """
375
+ # XXX: should be something like self.ctx._types
376
+ typ = self.ctx.mpf
377
+ s = '['
378
+ for i in xrange(self.__rows):
379
+ s += '['
380
+ for j in xrange(self.__cols):
381
+ if not avoid_type or not isinstance(self[i,j], typ):
382
+ a = repr(self[i,j])
383
+ else:
384
+ a = "'" + str(self[i,j]) + "'"
385
+ s += a + ', '
386
+ s = s[:-2]
387
+ s += '],\n '
388
+ s = s[:-3]
389
+ s += ']'
390
+ return s
391
+
392
+ def tolist(self):
393
+ """
394
+ Convert the matrix to a nested list.
395
+ """
396
+ return [[self[i,j] for j in range(self.__cols)] for i in range(self.__rows)]
397
+
398
+ def __repr__(self):
399
+ if self.ctx.pretty:
400
+ return self.__str__()
401
+ s = 'matrix(\n'
402
+ s += self._toliststr(avoid_type=True) + ')'
403
+ return s
404
+
405
+ def __get_element(self, key):
406
+ '''
407
+ Fast extraction of the i,j element from the matrix
408
+ This function is for private use only because is unsafe:
409
+ 1. Does not check on the value of key it expects key to be a integer tuple (i,j)
410
+ 2. Does not check bounds
411
+ '''
412
+ if key in self.__data:
413
+ return self.__data[key]
414
+ else:
415
+ return self.ctx.zero
416
+
417
+ def __set_element(self, key, value):
418
+ '''
419
+ Fast assignment of the i,j element in the matrix
420
+ This function is unsafe:
421
+ 1. Does not check on the value of key it expects key to be a integer tuple (i,j)
422
+ 2. Does not check bounds
423
+ 3. Does not check the value type
424
+ 4. Does not reset the LU cache
425
+ '''
426
+ if value: # only store non-zeros
427
+ self.__data[key] = value
428
+ elif key in self.__data:
429
+ del self.__data[key]
430
+
431
+
432
+ def __getitem__(self, key):
433
+ '''
434
+ Getitem function for mp matrix class with slice index enabled
435
+ it allows the following assingments
436
+ scalar to a slice of the matrix
437
+ B = A[:,2:6]
438
+ '''
439
+ # Convert vector to matrix indexing
440
+ if isinstance(key, int) or isinstance(key,slice):
441
+ # only sufficent for vectors
442
+ if self.__rows == 1:
443
+ key = (0, key)
444
+ elif self.__cols == 1:
445
+ key = (key, 0)
446
+ else:
447
+ raise IndexError('insufficient indices for matrix')
448
+
449
+ if isinstance(key[0],slice) or isinstance(key[1],slice):
450
+
451
+ #Rows
452
+ if isinstance(key[0],slice):
453
+ #Check bounds
454
+ if (key[0].start is None or key[0].start >= 0) and \
455
+ (key[0].stop is None or key[0].stop <= self.__rows+1):
456
+ # Generate indices
457
+ rows = xrange(*key[0].indices(self.__rows))
458
+ else:
459
+ raise IndexError('Row index out of bounds')
460
+ else:
461
+ # Single row
462
+ rows = [key[0]]
463
+
464
+ # Columns
465
+ if isinstance(key[1],slice):
466
+ # Check bounds
467
+ if (key[1].start is None or key[1].start >= 0) and \
468
+ (key[1].stop is None or key[1].stop <= self.__cols+1):
469
+ # Generate indices
470
+ columns = xrange(*key[1].indices(self.__cols))
471
+ else:
472
+ raise IndexError('Column index out of bounds')
473
+
474
+ else:
475
+ # Single column
476
+ columns = [key[1]]
477
+
478
+ # Create matrix slice
479
+ m = self.ctx.matrix(len(rows),len(columns))
480
+
481
+ # Assign elements to the output matrix
482
+ for i,x in enumerate(rows):
483
+ for j,y in enumerate(columns):
484
+ m.__set_element((i,j),self.__get_element((x,y)))
485
+
486
+ return m
487
+
488
+ else:
489
+ # single element extraction
490
+ if key[0] >= self.__rows or key[1] >= self.__cols:
491
+ raise IndexError('matrix index out of range')
492
+ if key in self.__data:
493
+ return self.__data[key]
494
+ else:
495
+ return self.ctx.zero
496
+
497
+ def __setitem__(self, key, value):
498
+ # setitem function for mp matrix class with slice index enabled
499
+ # it allows the following assingments
500
+ # scalar to a slice of the matrix
501
+ # A[:,2:6] = 2.5
502
+ # submatrix to matrix (the value matrix should be the same size as the slice size)
503
+ # A[3,:] = B where A is n x m and B is n x 1
504
+ # Convert vector to matrix indexing
505
+ if isinstance(key, int) or isinstance(key,slice):
506
+ # only sufficent for vectors
507
+ if self.__rows == 1:
508
+ key = (0, key)
509
+ elif self.__cols == 1:
510
+ key = (key, 0)
511
+ else:
512
+ raise IndexError('insufficient indices for matrix')
513
+ # Slice indexing
514
+ if isinstance(key[0],slice) or isinstance(key[1],slice):
515
+ # Rows
516
+ if isinstance(key[0],slice):
517
+ # Check bounds
518
+ if (key[0].start is None or key[0].start >= 0) and \
519
+ (key[0].stop is None or key[0].stop <= self.__rows+1):
520
+ # generate row indices
521
+ rows = xrange(*key[0].indices(self.__rows))
522
+ else:
523
+ raise IndexError('Row index out of bounds')
524
+ else:
525
+ # Single row
526
+ rows = [key[0]]
527
+ # Columns
528
+ if isinstance(key[1],slice):
529
+ # Check bounds
530
+ if (key[1].start is None or key[1].start >= 0) and \
531
+ (key[1].stop is None or key[1].stop <= self.__cols+1):
532
+ # Generate column indices
533
+ columns = xrange(*key[1].indices(self.__cols))
534
+ else:
535
+ raise IndexError('Column index out of bounds')
536
+ else:
537
+ # Single column
538
+ columns = [key[1]]
539
+ # Assign slice with a scalar
540
+ if isinstance(value,self.ctx.matrix):
541
+ # Assign elements to matrix if input and output dimensions match
542
+ if len(rows) == value.rows and len(columns) == value.cols:
543
+ for i,x in enumerate(rows):
544
+ for j,y in enumerate(columns):
545
+ self.__set_element((x,y), value.__get_element((i,j)))
546
+ else:
547
+ raise ValueError('Dimensions do not match')
548
+ else:
549
+ # Assign slice with scalars
550
+ value = self.ctx.convert(value)
551
+ for i in rows:
552
+ for j in columns:
553
+ self.__set_element((i,j), value)
554
+ else:
555
+ # Single element assingment
556
+ # Check bounds
557
+ if key[0] >= self.__rows or key[1] >= self.__cols:
558
+ raise IndexError('matrix index out of range')
559
+ # Convert and store value
560
+ value = self.ctx.convert(value)
561
+ if value: # only store non-zeros
562
+ self.__data[key] = value
563
+ elif key in self.__data:
564
+ del self.__data[key]
565
+
566
+ if self._LU:
567
+ self._LU = None
568
+ return
569
+
570
+ def __iter__(self):
571
+ for i in xrange(self.__rows):
572
+ for j in xrange(self.__cols):
573
+ yield self[i,j]
574
+
575
+ def __mul__(self, other):
576
+ if isinstance(other, self.ctx.matrix):
577
+ # dot multiplication
578
+ if self.__cols != other.__rows:
579
+ raise ValueError('dimensions not compatible for multiplication')
580
+ new = self.ctx.matrix(self.__rows, other.__cols)
581
+ self_zero = self.ctx.zero
582
+ self_get = self.__data.get
583
+ other_zero = other.ctx.zero
584
+ other_get = other.__data.get
585
+ for i in xrange(self.__rows):
586
+ for j in xrange(other.__cols):
587
+ new[i, j] = self.ctx.fdot((self_get((i,k), self_zero), other_get((k,j), other_zero))
588
+ for k in xrange(other.__rows))
589
+ return new
590
+ else:
591
+ # try scalar multiplication
592
+ new = self.ctx.matrix(self.__rows, self.__cols)
593
+ for i in xrange(self.__rows):
594
+ for j in xrange(self.__cols):
595
+ new[i, j] = other * self[i, j]
596
+ return new
597
+
598
+ def __matmul__(self, other):
599
+ return self.__mul__(other)
600
+
601
+ def __rmul__(self, other):
602
+ # assume other is scalar and thus commutative
603
+ if isinstance(other, self.ctx.matrix):
604
+ raise TypeError("other should not be type of ctx.matrix")
605
+ return self.__mul__(other)
606
+
607
+ def __pow__(self, other):
608
+ # avoid cyclic import problems
609
+ #from linalg import inverse
610
+ if not isinstance(other, int):
611
+ raise ValueError('only integer exponents are supported')
612
+ if not self.__rows == self.__cols:
613
+ raise ValueError('only powers of square matrices are defined')
614
+ n = other
615
+ if n == 0:
616
+ return self.ctx.eye(self.__rows)
617
+ if n < 0:
618
+ n = -n
619
+ neg = True
620
+ else:
621
+ neg = False
622
+ i = n
623
+ y = 1
624
+ z = self.copy()
625
+ while i != 0:
626
+ if i % 2 == 1:
627
+ y = y * z
628
+ z = z*z
629
+ i = i // 2
630
+ if neg:
631
+ y = self.ctx.inverse(y)
632
+ return y
633
+
634
+ def __div__(self, other):
635
+ # assume other is scalar and do element-wise divison
636
+ assert not isinstance(other, self.ctx.matrix)
637
+ new = self.ctx.matrix(self.__rows, self.__cols)
638
+ for i in xrange(self.__rows):
639
+ for j in xrange(self.__cols):
640
+ new[i,j] = self[i,j] / other
641
+ return new
642
+
643
+ __truediv__ = __div__
644
+
645
+ def __add__(self, other):
646
+ if isinstance(other, self.ctx.matrix):
647
+ if not (self.__rows == other.__rows and self.__cols == other.__cols):
648
+ raise ValueError('incompatible dimensions for addition')
649
+ new = self.ctx.matrix(self.__rows, self.__cols)
650
+ for i in xrange(self.__rows):
651
+ for j in xrange(self.__cols):
652
+ new[i,j] = self[i,j] + other[i,j]
653
+ return new
654
+ else:
655
+ # assume other is scalar and add element-wise
656
+ new = self.ctx.matrix(self.__rows, self.__cols)
657
+ for i in xrange(self.__rows):
658
+ for j in xrange(self.__cols):
659
+ new[i,j] += self[i,j] + other
660
+ return new
661
+
662
+ def __radd__(self, other):
663
+ return self.__add__(other)
664
+
665
+ def __sub__(self, other):
666
+ if isinstance(other, self.ctx.matrix) and not (self.__rows == other.__rows
667
+ and self.__cols == other.__cols):
668
+ raise ValueError('incompatible dimensions for subtraction')
669
+ return self.__add__(other * (-1))
670
+
671
+ def __pos__(self):
672
+ """
673
+ +M returns a copy of M, rounded to current working precision.
674
+ """
675
+ return (+1) * self
676
+
677
+ def __neg__(self):
678
+ return (-1) * self
679
+
680
+ def __rsub__(self, other):
681
+ return -self + other
682
+
683
+ def __eq__(self, other):
684
+ return self.__rows == other.__rows and self.__cols == other.__cols \
685
+ and self.__data == other.__data
686
+
687
+ def __len__(self):
688
+ if self.rows == 1:
689
+ return self.cols
690
+ elif self.cols == 1:
691
+ return self.rows
692
+ else:
693
+ return self.rows # do it like numpy
694
+
695
+ def __getrows(self):
696
+ return self.__rows
697
+
698
+ def __setrows(self, value):
699
+ for key in self.__data.copy():
700
+ if key[0] >= value:
701
+ del self.__data[key]
702
+ self.__rows = value
703
+
704
+ rows = property(__getrows, __setrows, doc='number of rows')
705
+
706
+ def __getcols(self):
707
+ return self.__cols
708
+
709
+ def __setcols(self, value):
710
+ for key in self.__data.copy():
711
+ if key[1] >= value:
712
+ del self.__data[key]
713
+ self.__cols = value
714
+
715
+ cols = property(__getcols, __setcols, doc='number of columns')
716
+
717
+ def transpose(self):
718
+ new = self.ctx.matrix(self.__cols, self.__rows)
719
+ for i in xrange(self.__rows):
720
+ for j in xrange(self.__cols):
721
+ new[j,i] = self[i,j]
722
+ return new
723
+
724
+ T = property(transpose)
725
+
726
+ def conjugate(self):
727
+ return self.apply(self.ctx.conj)
728
+
729
+ def transpose_conj(self):
730
+ return self.conjugate().transpose()
731
+
732
+ H = property(transpose_conj)
733
+
734
+ def copy(self):
735
+ new = self.ctx.matrix(self.__rows, self.__cols)
736
+ new.__data = self.__data.copy()
737
+ return new
738
+
739
+ __copy__ = copy
740
+
741
+ def column(self, n):
742
+ m = self.ctx.matrix(self.rows, 1)
743
+ for i in range(self.rows):
744
+ m[i] = self[i,n]
745
+ return m
746
+
747
+ class MatrixMethods(object):
748
+
749
+ def __init__(ctx):
750
+ # XXX: subclass
751
+ ctx.matrix = type('matrix', (_matrix,), {})
752
+ ctx.matrix.ctx = ctx
753
+ ctx.matrix.convert = ctx.convert
754
+
755
+ def eye(ctx, n, **kwargs):
756
+ """
757
+ Create square identity matrix n x n.
758
+ """
759
+ A = ctx.matrix(n, **kwargs)
760
+ for i in xrange(n):
761
+ A[i,i] = 1
762
+ return A
763
+
764
+ def diag(ctx, diagonal, **kwargs):
765
+ """
766
+ Create square diagonal matrix using given list.
767
+
768
+ Example:
769
+ >>> from mpmath import diag, mp
770
+ >>> mp.pretty = False
771
+ >>> diag([1, 2, 3])
772
+ matrix(
773
+ [['1.0', '0.0', '0.0'],
774
+ ['0.0', '2.0', '0.0'],
775
+ ['0.0', '0.0', '3.0']])
776
+ """
777
+ A = ctx.matrix(len(diagonal), **kwargs)
778
+ for i in xrange(len(diagonal)):
779
+ A[i,i] = diagonal[i]
780
+ return A
781
+
782
+ def zeros(ctx, *args, **kwargs):
783
+ """
784
+ Create matrix m x n filled with zeros.
785
+ One given dimension will create square matrix n x n.
786
+
787
+ Example:
788
+ >>> from mpmath import zeros, mp
789
+ >>> mp.pretty = False
790
+ >>> zeros(2)
791
+ matrix(
792
+ [['0.0', '0.0'],
793
+ ['0.0', '0.0']])
794
+ """
795
+ if len(args) == 1:
796
+ m = n = args[0]
797
+ elif len(args) == 2:
798
+ m = args[0]
799
+ n = args[1]
800
+ else:
801
+ raise TypeError('zeros expected at most 2 arguments, got %i' % len(args))
802
+ A = ctx.matrix(m, n, **kwargs)
803
+ for i in xrange(m):
804
+ for j in xrange(n):
805
+ A[i,j] = 0
806
+ return A
807
+
808
+ def ones(ctx, *args, **kwargs):
809
+ """
810
+ Create matrix m x n filled with ones.
811
+ One given dimension will create square matrix n x n.
812
+
813
+ Example:
814
+ >>> from mpmath import ones, mp
815
+ >>> mp.pretty = False
816
+ >>> ones(2)
817
+ matrix(
818
+ [['1.0', '1.0'],
819
+ ['1.0', '1.0']])
820
+ """
821
+ if len(args) == 1:
822
+ m = n = args[0]
823
+ elif len(args) == 2:
824
+ m = args[0]
825
+ n = args[1]
826
+ else:
827
+ raise TypeError('ones expected at most 2 arguments, got %i' % len(args))
828
+ A = ctx.matrix(m, n, **kwargs)
829
+ for i in xrange(m):
830
+ for j in xrange(n):
831
+ A[i,j] = 1
832
+ return A
833
+
834
+ def hilbert(ctx, m, n=None):
835
+ """
836
+ Create (pseudo) hilbert matrix m x n.
837
+ One given dimension will create hilbert matrix n x n.
838
+
839
+ The matrix is very ill-conditioned and symmetric, positive definite if
840
+ square.
841
+ """
842
+ if n is None:
843
+ n = m
844
+ A = ctx.matrix(m, n)
845
+ for i in xrange(m):
846
+ for j in xrange(n):
847
+ A[i,j] = ctx.one / (i + j + 1)
848
+ return A
849
+
850
+ def randmatrix(ctx, m, n=None, min=0, max=1, **kwargs):
851
+ """
852
+ Create a random m x n matrix.
853
+
854
+ All values are >= min and <max.
855
+ n defaults to m.
856
+
857
+ Example:
858
+ >>> from mpmath import randmatrix
859
+ >>> randmatrix(2) # doctest:+SKIP
860
+ matrix(
861
+ [['0.53491598236191806', '0.57195669543302752'],
862
+ ['0.85589992269513615', '0.82444367501382143']])
863
+ """
864
+ if not n:
865
+ n = m
866
+ A = ctx.matrix(m, n, **kwargs)
867
+ for i in xrange(m):
868
+ for j in xrange(n):
869
+ A[i,j] = ctx.rand() * (max - min) + min
870
+ return A
871
+
872
+ def swap_row(ctx, A, i, j):
873
+ """
874
+ Swap row i with row j.
875
+ """
876
+ if i == j:
877
+ return
878
+ if isinstance(A, ctx.matrix):
879
+ for k in xrange(A.cols):
880
+ A[i,k], A[j,k] = A[j,k], A[i,k]
881
+ elif isinstance(A, list):
882
+ A[i], A[j] = A[j], A[i]
883
+ else:
884
+ raise TypeError('could not interpret type')
885
+
886
+ def extend(ctx, A, b):
887
+ """
888
+ Extend matrix A with column b and return result.
889
+ """
890
+ if not isinstance(A, ctx.matrix):
891
+ raise TypeError("A should be a type of ctx.matrix")
892
+ if A.rows != len(b):
893
+ raise ValueError("Value should be equal to len(b)")
894
+ A = A.copy()
895
+ A.cols += 1
896
+ for i in xrange(A.rows):
897
+ A[i, A.cols-1] = b[i]
898
+ return A
899
+
900
+ def norm(ctx, x, p=2):
901
+ r"""
902
+ Gives the entrywise `p`-norm of an iterable *x*, i.e. the vector norm
903
+ `\left(\sum_k |x_k|^p\right)^{1/p}`, for any given `1 \le p \le \infty`.
904
+
905
+ Special cases:
906
+
907
+ If *x* is not iterable, this just returns ``absmax(x)``.
908
+
909
+ ``p=1`` gives the sum of absolute values.
910
+
911
+ ``p=2`` is the standard Euclidean vector norm.
912
+
913
+ ``p=inf`` gives the magnitude of the largest element.
914
+
915
+ For *x* a matrix, ``p=2`` is the Frobenius norm.
916
+ For operator matrix norms, use :func:`~mpmath.mnorm` instead.
917
+
918
+ You can use the string 'inf' as well as float('inf') or mpf('inf')
919
+ to specify the infinity norm.
920
+
921
+ **Examples**
922
+
923
+ >>> from mpmath import *
924
+ >>> mp.dps = 15; mp.pretty = False
925
+ >>> x = matrix([-10, 2, 100])
926
+ >>> norm(x, 1)
927
+ mpf('112.0')
928
+ >>> norm(x, 2)
929
+ mpf('100.5186549850325')
930
+ >>> norm(x, inf)
931
+ mpf('100.0')
932
+
933
+ """
934
+ try:
935
+ iter(x)
936
+ except TypeError:
937
+ return ctx.absmax(x)
938
+ if type(p) is not int:
939
+ p = ctx.convert(p)
940
+ if p == ctx.inf:
941
+ return max(ctx.absmax(i) for i in x)
942
+ elif p == 1:
943
+ return ctx.fsum(x, absolute=1)
944
+ elif p == 2:
945
+ return ctx.sqrt(ctx.fsum(x, absolute=1, squared=1))
946
+ elif p > 1:
947
+ return ctx.nthroot(ctx.fsum(abs(i)**p for i in x), p)
948
+ else:
949
+ raise ValueError('p has to be >= 1')
950
+
951
+ def mnorm(ctx, A, p=1):
952
+ r"""
953
+ Gives the matrix (operator) `p`-norm of A. Currently ``p=1`` and ``p=inf``
954
+ are supported:
955
+
956
+ ``p=1`` gives the 1-norm (maximal column sum)
957
+
958
+ ``p=inf`` gives the `\infty`-norm (maximal row sum).
959
+ You can use the string 'inf' as well as float('inf') or mpf('inf')
960
+
961
+ ``p=2`` (not implemented) for a square matrix is the usual spectral
962
+ matrix norm, i.e. the largest singular value.
963
+
964
+ ``p='f'`` (or 'F', 'fro', 'Frobenius, 'frobenius') gives the
965
+ Frobenius norm, which is the elementwise 2-norm. The Frobenius norm is an
966
+ approximation of the spectral norm and satisfies
967
+
968
+ .. math ::
969
+
970
+ \frac{1}{\sqrt{\mathrm{rank}(A)}} \|A\|_F \le \|A\|_2 \le \|A\|_F
971
+
972
+ The Frobenius norm lacks some mathematical properties that might
973
+ be expected of a norm.
974
+
975
+ For general elementwise `p`-norms, use :func:`~mpmath.norm` instead.
976
+
977
+ **Examples**
978
+
979
+ >>> from mpmath import *
980
+ >>> mp.dps = 15; mp.pretty = False
981
+ >>> A = matrix([[1, -1000], [100, 50]])
982
+ >>> mnorm(A, 1)
983
+ mpf('1050.0')
984
+ >>> mnorm(A, inf)
985
+ mpf('1001.0')
986
+ >>> mnorm(A, 'F')
987
+ mpf('1006.2310867787777')
988
+
989
+ """
990
+ A = ctx.matrix(A)
991
+ if type(p) is not int:
992
+ if type(p) is str and 'frobenius'.startswith(p.lower()):
993
+ return ctx.norm(A, 2)
994
+ p = ctx.convert(p)
995
+ m, n = A.rows, A.cols
996
+ if p == 1:
997
+ return max(ctx.fsum((A[i,j] for i in xrange(m)), absolute=1) for j in xrange(n))
998
+ elif p == ctx.inf:
999
+ return max(ctx.fsum((A[i,j] for j in xrange(n)), absolute=1) for i in xrange(m))
1000
+ else:
1001
+ raise NotImplementedError("matrix p-norm for arbitrary p")
1002
+
1003
+ if __name__ == '__main__':
1004
+ import doctest
1005
+ doctest.testmod()
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/runtests.cpython-311.pyc ADDED
Binary file (7.51 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_bitwise.cpython-311.pyc ADDED
Binary file (16.1 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_compatibility.cpython-311.pyc ADDED
Binary file (4.1 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_elliptic.cpython-311.pyc ADDED
Binary file (46.2 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_hp.cpython-311.pyc ADDED
Binary file (13.8 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_linalg.cpython-311.pyc ADDED
Binary file (22.9 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_pickle.cpython-311.pyc ADDED
Binary file (1.27 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_special.cpython-311.pyc ADDED
Binary file (7.96 kB). View file
 
.venv/lib/python3.11/site-packages/mpmath/tests/__pycache__/test_visualization.cpython-311.pyc ADDED
Binary file (2.52 kB). View file
 
.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/INSTALLER ADDED
@@ -0,0 +1 @@
 
 
1
+ pip
.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/LICENSE ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Apache License
2
+ Version 2.0, January 2004
3
+ http://www.apache.org/licenses/
4
+
5
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
6
+
7
+ 1. Definitions.
8
+
9
+ "License" shall mean the terms and conditions for use, reproduction,
10
+ and distribution as defined by Sections 1 through 9 of this document.
11
+
12
+ "Licensor" shall mean the copyright owner or entity authorized by
13
+ the copyright owner that is granting the License.
14
+
15
+ "Legal Entity" shall mean the union of the acting entity and all
16
+ other entities that control, are controlled by, or are under common
17
+ control with that entity. For the purposes of this definition,
18
+ "control" means (i) the power, direct or indirect, to cause the
19
+ direction or management of such entity, whether by contract or
20
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
21
+ outstanding shares, or (iii) beneficial ownership of such entity.
22
+
23
+ "You" (or "Your") shall mean an individual or Legal Entity
24
+ exercising permissions granted by this License.
25
+
26
+ "Source" form shall mean the preferred form for making modifications,
27
+ including but not limited to software source code, documentation
28
+ source, and configuration files.
29
+
30
+ "Object" form shall mean any form resulting from mechanical
31
+ transformation or translation of a Source form, including but
32
+ not limited to compiled object code, generated documentation,
33
+ and conversions to other media types.
34
+
35
+ "Work" shall mean the work of authorship, whether in Source or
36
+ Object form, made available under the License, as indicated by a
37
+ copyright notice that is included in or attached to the work
38
+ (an example is provided in the Appendix below).
39
+
40
+ "Derivative Works" shall mean any work, whether in Source or Object
41
+ form, that is based on (or derived from) the Work and for which the
42
+ editorial revisions, annotations, elaborations, or other modifications
43
+ represent, as a whole, an original work of authorship. For the purposes
44
+ of this License, Derivative Works shall not include works that remain
45
+ separable from, or merely link (or bind by name) to the interfaces of,
46
+ the Work and Derivative Works thereof.
47
+
48
+ "Contribution" shall mean any work of authorship, including
49
+ the original version of the Work and any modifications or additions
50
+ to that Work or Derivative Works thereof, that is intentionally
51
+ submitted to Licensor for inclusion in the Work by the copyright owner
52
+ or by an individual or Legal Entity authorized to submit on behalf of
53
+ the copyright owner. For the purposes of this definition, "submitted"
54
+ means any form of electronic, verbal, or written communication sent
55
+ to the Licensor or its representatives, including but not limited to
56
+ communication on electronic mailing lists, source code control systems,
57
+ and issue tracking systems that are managed by, or on behalf of, the
58
+ Licensor for the purpose of discussing and improving the Work, but
59
+ excluding communication that is conspicuously marked or otherwise
60
+ designated in writing by the copyright owner as "Not a Contribution."
61
+
62
+ "Contributor" shall mean Licensor and any individual or Legal Entity
63
+ on behalf of whom a Contribution has been received by Licensor and
64
+ subsequently incorporated within the Work.
65
+
66
+ 2. Grant of Copyright License. Subject to the terms and conditions of
67
+ this License, each Contributor hereby grants to You a perpetual,
68
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
69
+ copyright license to reproduce, prepare Derivative Works of,
70
+ publicly display, publicly perform, sublicense, and distribute the
71
+ Work and such Derivative Works in Source or Object form.
72
+
73
+ 3. Grant of Patent License. Subject to the terms and conditions of
74
+ this License, each Contributor hereby grants to You a perpetual,
75
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
76
+ (except as stated in this section) patent license to make, have made,
77
+ use, offer to sell, sell, import, and otherwise transfer the Work,
78
+ where such license applies only to those patent claims licensable
79
+ by such Contributor that are necessarily infringed by their
80
+ Contribution(s) alone or by combination of their Contribution(s)
81
+ with the Work to which such Contribution(s) was submitted. If You
82
+ institute patent litigation against any entity (including a
83
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
84
+ or a Contribution incorporated within the Work constitutes direct
85
+ or contributory patent infringement, then any patent licenses
86
+ granted to You under this License for that Work shall terminate
87
+ as of the date such litigation is filed.
88
+
89
+ 4. Redistribution. You may reproduce and distribute copies of the
90
+ Work or Derivative Works thereof in any medium, with or without
91
+ modifications, and in Source or Object form, provided that You
92
+ meet the following conditions:
93
+
94
+ (a) You must give any other recipients of the Work or
95
+ Derivative Works a copy of this License; and
96
+
97
+ (b) You must cause any modified files to carry prominent notices
98
+ stating that You changed the files; and
99
+
100
+ (c) You must retain, in the Source form of any Derivative Works
101
+ that You distribute, all copyright, patent, trademark, and
102
+ attribution notices from the Source form of the Work,
103
+ excluding those notices that do not pertain to any part of
104
+ the Derivative Works; and
105
+
106
+ (d) If the Work includes a "NOTICE" text file as part of its
107
+ distribution, then any Derivative Works that You distribute must
108
+ include a readable copy of the attribution notices contained
109
+ within such NOTICE file, excluding those notices that do not
110
+ pertain to any part of the Derivative Works, in at least one
111
+ of the following places: within a NOTICE text file distributed
112
+ as part of the Derivative Works; within the Source form or
113
+ documentation, if provided along with the Derivative Works; or,
114
+ within a display generated by the Derivative Works, if and
115
+ wherever such third-party notices normally appear. The contents
116
+ of the NOTICE file are for informational purposes only and
117
+ do not modify the License. You may add Your own attribution
118
+ notices within Derivative Works that You distribute, alongside
119
+ or as an addendum to the NOTICE text from the Work, provided
120
+ that such additional attribution notices cannot be construed
121
+ as modifying the License.
122
+
123
+ You may add Your own copyright statement to Your modifications and
124
+ may provide additional or different license terms and conditions
125
+ for use, reproduction, or distribution of Your modifications, or
126
+ for any such Derivative Works as a whole, provided Your use,
127
+ reproduction, and distribution of the Work otherwise complies with
128
+ the conditions stated in this License.
129
+
130
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
131
+ any Contribution intentionally submitted for inclusion in the Work
132
+ by You to the Licensor shall be under the terms and conditions of
133
+ this License, without any additional terms or conditions.
134
+ Notwithstanding the above, nothing herein shall supersede or modify
135
+ the terms of any separate license agreement you may have executed
136
+ with Licensor regarding such Contributions.
137
+
138
+ 6. Trademarks. This License does not grant permission to use the trade
139
+ names, trademarks, service marks, or product names of the Licensor,
140
+ except as required for reasonable and customary use in describing the
141
+ origin of the Work and reproducing the content of the NOTICE file.
142
+
143
+ 7. Disclaimer of Warranty. Unless required by applicable law or
144
+ agreed to in writing, Licensor provides the Work (and each
145
+ Contributor provides its Contributions) on an "AS IS" BASIS,
146
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
147
+ implied, including, without limitation, any warranties or conditions
148
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
149
+ PARTICULAR PURPOSE. You are solely responsible for determining the
150
+ appropriateness of using or redistributing the Work and assume any
151
+ risks associated with Your exercise of permissions under this License.
152
+
153
+ 8. Limitation of Liability. In no event and under no legal theory,
154
+ whether in tort (including negligence), contract, or otherwise,
155
+ unless required by applicable law (such as deliberate and grossly
156
+ negligent acts) or agreed to in writing, shall any Contributor be
157
+ liable to You for damages, including any direct, indirect, special,
158
+ incidental, or consequential damages of any character arising as a
159
+ result of this License or out of the use or inability to use the
160
+ Work (including but not limited to damages for loss of goodwill,
161
+ work stoppage, computer failure or malfunction, or any and all
162
+ other commercial damages or losses), even if such Contributor
163
+ has been advised of the possibility of such damages.
164
+
165
+ 9. Accepting Warranty or Additional Liability. While redistributing
166
+ the Work or Derivative Works thereof, You may choose to offer,
167
+ and charge a fee for, acceptance of support, warranty, indemnity,
168
+ or other liability obligations and/or rights consistent with this
169
+ License. However, in accepting such obligations, You may act only
170
+ on Your own behalf and on Your sole responsibility, not on behalf
171
+ of any other Contributor, and only if You agree to indemnify,
172
+ defend, and hold each Contributor harmless for any liability
173
+ incurred by, or claims asserted against, such Contributor by reason
174
+ of your accepting any such warranty or additional liability.
175
+
176
+ END OF TERMS AND CONDITIONS
177
+
178
+ APPENDIX: How to apply the Apache License to your work.
179
+
180
+ To apply the Apache License to your work, attach the following
181
+ boilerplate notice, with the fields enclosed by brackets "[]"
182
+ replaced with your own identifying information. (Don't include
183
+ the brackets!) The text should be enclosed in the appropriate
184
+ comment syntax for the file format. We also recommend that a
185
+ file or class name and description of purpose be included on the
186
+ same "printed page" as the copyright notice for easier
187
+ identification within third-party archives.
188
+
189
+ Copyright [yyyy] [name of copyright owner]
190
+
191
+ Licensed under the Apache License, Version 2.0 (the "License");
192
+ you may not use this file except in compliance with the License.
193
+ You may obtain a copy of the License at
194
+
195
+ http://www.apache.org/licenses/LICENSE-2.0
196
+
197
+ Unless required by applicable law or agreed to in writing, software
198
+ distributed under the License is distributed on an "AS IS" BASIS,
199
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
200
+ See the License for the specific language governing permissions and
201
+ limitations under the License.
.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/METADATA ADDED
@@ -0,0 +1,313 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ Metadata-Version: 2.1
2
+ Name: opencensus
3
+ Version: 0.11.4
4
+ Summary: A stats collection and distributed tracing framework
5
+ Home-page: https://github.com/census-instrumentation/opencensus-python
6
+ Author: OpenCensus Authors
7
+ Author-email: census-developers@googlegroups.com
8
+ License: Apache-2.0
9
+ Platform: UNKNOWN
10
+ Classifier: Intended Audience :: Developers
11
+ Classifier: Development Status :: 4 - Beta
12
+ Classifier: Intended Audience :: Developers
13
+ Classifier: License :: OSI Approved :: Apache Software License
14
+ Classifier: Programming Language :: Python
15
+ Classifier: Programming Language :: Python :: 2
16
+ Classifier: Programming Language :: Python :: 2.7
17
+ Classifier: Programming Language :: Python :: 3
18
+ Classifier: Programming Language :: Python :: 3.4
19
+ Classifier: Programming Language :: Python :: 3.5
20
+ Classifier: Programming Language :: Python :: 3.6
21
+ Classifier: Programming Language :: Python :: 3.7
22
+ Classifier: Programming Language :: Python :: 3.8
23
+ Classifier: Programming Language :: Python :: 3.9
24
+ Requires-Dist: opencensus-context (>=0.1.3)
25
+ Requires-Dist: six (~=1.16)
26
+ Requires-Dist: google-api-core (<2.0.0,>=1.0.0) ; python_version < "3.6"
27
+ Requires-Dist: google-api-core (<3.0.0,>=1.0.0) ; python_version >= "3.6"
28
+
29
+ OpenCensus - A stats collection and distributed tracing framework
30
+ =================================================================
31
+
32
+ |gitter|
33
+ |travisci|
34
+ |circleci|
35
+ |pypi|
36
+ |compat_check_pypi|
37
+ |compat_check_github|
38
+
39
+
40
+ .. |travisci| image:: https://travis-ci.org/census-instrumentation/opencensus-python.svg?branch=master
41
+ :target: https://travis-ci.org/census-instrumentation/opencensus-python
42
+ .. |circleci| image:: https://circleci.com/gh/census-instrumentation/opencensus-python.svg?style=shield
43
+ :target: https://circleci.com/gh/census-instrumentation/opencensus-python
44
+ .. |gitter| image:: https://badges.gitter.im/census-instrumentation/lobby.svg
45
+ :target: https://gitter.im/census-instrumentation/lobby?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge
46
+ .. |pypi| image:: https://badge.fury.io/py/opencensus.svg
47
+ :target: https://pypi.org/project/opencensus/
48
+ .. |compat_check_pypi| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=opencensus
49
+ :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=opencensus
50
+ .. |compat_check_github| image:: https://python-compatibility-tools.appspot.com/one_badge_image?package=git%2Bgit%3A//github.com/census-instrumentation/opencensus-python.git
51
+ :target: https://python-compatibility-tools.appspot.com/one_badge_target?package=git%2Bgit%3A//github.com/census-instrumentation/opencensus-python.git
52
+
53
+ `OpenCensus`_ for Python. OpenCensus provides a framework to measure a
54
+ server's resource usage and collect performance stats. This repository
55
+ contains Python related utilities and supporting software needed by
56
+ OpenCensus.
57
+
58
+ .. _OpenCensus: https://github.com/census-instrumentation
59
+
60
+ - `API Documentation`_
61
+
62
+ .. _API Documentation: https://opencensus.io/api/python/trace/usage.html
63
+
64
+ --------
65
+ Tracing
66
+ --------
67
+
68
+ Installation & basic usage
69
+ --------------------------
70
+
71
+ 1. Install the opencensus package using `pip`_ or `pipenv`_:
72
+
73
+ ::
74
+
75
+ pip install opencensus
76
+ pipenv install opencensus
77
+
78
+ 2. Initialize a tracer for your application:
79
+
80
+ .. code:: python
81
+
82
+ from opencensus.trace.tracer import Tracer
83
+ from opencensus.trace.samplers import AlwaysOnSampler
84
+
85
+ tracer = Tracer(sampler=AlwaysOnSampler())
86
+
87
+ .. _pip: https://pip.pypa.io
88
+ .. _pipenv: https://docs.pipenv.org/
89
+
90
+ 3. Initialize a view_manager and a stats_recorder for your application:
91
+
92
+ .. code:: python
93
+
94
+ from opencensus.stats import stats as stats_module
95
+
96
+ stats = stats_module.stats
97
+ view_manager = stats.view_manager
98
+ stats_recorder = stats.stats_recorder
99
+
100
+
101
+ Usage
102
+ -----
103
+
104
+ You can collect traces using the ``Tracer`` `context manager`_:
105
+
106
+ .. code:: python
107
+
108
+ from opencensus.trace.tracer import Tracer
109
+ from opencensus.trace.samplers import AlwaysOnSampler
110
+
111
+ # Initialize a tracer, by default using the `PrintExporter`
112
+ tracer = Tracer(sampler=AlwaysOnSampler())
113
+
114
+ # Example for creating nested spans
115
+ with tracer.span(name='span1'):
116
+ do_something_to_trace()
117
+ with tracer.span(name='span1_child1'):
118
+ do_something_to_trace()
119
+ with tracer.span(name='span1_child2'):
120
+ do_something_to_trace()
121
+ with tracer.span(name='span2'):
122
+ do_something_to_trace()
123
+
124
+ OpenCensus will collect everything within the ``with`` statement as a single span.
125
+
126
+ Alternatively, you can explicitly start and end a span:
127
+
128
+ .. code:: python
129
+
130
+ from opencensus.trace.tracer import Tracer
131
+ from opencensus.trace.samplers import AlwaysOnSampler
132
+
133
+ # Initialize a tracer, by default using the `PrintExporter`
134
+ tracer = Tracer(sampler=AlwaysOnSampler())
135
+
136
+ tracer.start_span(name='span1')
137
+ do_something_to_trace()
138
+ tracer.end_span()
139
+
140
+
141
+ .. _context manager: https://docs.python.org/3/reference/datamodel.html#context-managers
142
+
143
+
144
+ Customization
145
+ -------------
146
+
147
+ There are several things you can customize in OpenCensus:
148
+
149
+ * **Excludelist**, which excludes certain hosts and paths from being tracked.
150
+ By default, the health check path for the App Engine flexible environment is
151
+ not tracked, you can turn it on by excluding it from the excludelist setting.
152
+
153
+ * **Exporter**, which sends the traces.
154
+ By default, the traces are printed to stdout in JSON format. You can choose
155
+ different exporters to send the traces to. There are three built-in exporters,
156
+ which are ``PrintExporter``, ``FileExporter`` and ``LoggingExporter``, the
157
+ other exporters are provided as `extensions <#trace-exporter>`__.
158
+
159
+ * **Sampler**, which determines how traces are sampled.
160
+ The default sampler is the ``ProbabilitySampler``, which samples (i.e.
161
+ enables tracing for) a percentage of all requests. Sampling is deterministic
162
+ according to the trace ID. To force sampling for all requests, or to prevent
163
+ any request from being sampled, see ``AlwaysOnSampler`` and
164
+ ``AlwaysOffSampler``.
165
+
166
+ * **Propagator**, which serializes and deserializes the
167
+ ``SpanContext`` and its headers. The default propagator is
168
+ ``TraceContextPropagator``, other propagators include
169
+ ``BinaryFormatPropagator``, ``GoogleCloudFormatPropagator`` and
170
+ ``TextFormatPropagator``.
171
+
172
+
173
+ You can customize while initializing a tracer.
174
+
175
+ .. code:: python
176
+
177
+ import requests
178
+
179
+ from opencensus.trace import config_integration
180
+ from opencensus.trace import file_exporter
181
+ from opencensus.trace import tracer as tracer_module
182
+ from opencensus.trace.propagation import google_cloud_format
183
+ from opencensus.trace.samplers import ProbabilitySampler
184
+
185
+ config_integration.trace_integrations(['httplib'])
186
+
187
+ tracer = tracer_module.Tracer(
188
+ exporter=file_exporter.FileExporter(file_name='traces'),
189
+ propagator=google_cloud_format.GoogleCloudFormatPropagator(),
190
+ sampler=ProbabilitySampler(rate=0.5),
191
+ )
192
+
193
+ with tracer.span(name='parent'):
194
+ with tracer.span(name='child'):
195
+ response = requests.get('http://localhost:5000')
196
+
197
+ You can use a configuration file for Flask/Django/Pyramid. For more
198
+ information, please read the
199
+ `individual integration documentation <#integration>`_.
200
+
201
+ .. code:: python
202
+
203
+ 'OPENCENSUS': {
204
+ 'TRACE': {
205
+ 'EXCLUDELIST_HOSTNAMES': ['localhost', '127.0.0.1'],
206
+ 'EXCLUDELIST_PATHS': ['_ah/health'],
207
+ 'SAMPLER': 'opencensus.trace.samplers.ProbabilitySampler(rate=1)',
208
+ 'EXPORTER': '''opencensus.ext.ocagent.trace_exporter.TraceExporter(
209
+ service_name='foobar',
210
+ )''',
211
+ 'PROPAGATOR': 'opencensus.trace.propagation.google_cloud_format.GoogleCloudFormatPropagator()',
212
+ }
213
+ }
214
+
215
+ ------------
216
+ Extensions
217
+ ------------
218
+
219
+ Integration
220
+ -----------
221
+
222
+ OpenCensus supports integration with popular web frameworks, client libraries and built-in libraries.
223
+
224
+ - `Django`_
225
+ - `Flask`_
226
+ - `gevent`_
227
+ - `Google Cloud Client Libraries`_
228
+ - `gRPC`_
229
+ - `httplib`_
230
+ - `httpx`_
231
+ - `logging`_
232
+ - `MySQL`_
233
+ - `PostgreSQL`_
234
+ - `pymongo`_
235
+ - `PyMySQL`_
236
+ - `Pyramid`_
237
+ - `requests`_
238
+ - `SQLAlchemy`_
239
+ - `threading`_
240
+
241
+ Log Exporter
242
+ ------------
243
+
244
+ - `Azure`_
245
+
246
+ Metrics Exporter
247
+ ----------------
248
+
249
+ - `Azure`_
250
+
251
+ Stats Exporter
252
+ --------------
253
+
254
+ - `OCAgent`_
255
+ - `Prometheus`_
256
+ - `Stackdriver`_
257
+
258
+ Trace Exporter
259
+ --------------
260
+
261
+ - `Azure`_
262
+ - `Datadog`_
263
+ - `Jaeger`_
264
+ - `OCAgent`_
265
+ - `Stackdriver`_
266
+ - `Zipkin`_
267
+
268
+ .. _Azure: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-azure
269
+ .. _Datadog: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-datadog
270
+ .. _Django: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-django
271
+ .. _Flask: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-flask
272
+ .. _FastAPI: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-fastapi
273
+ .. _gevent: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-gevent
274
+ .. _Google Cloud Client Libraries: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-google-cloud-clientlibs
275
+ .. _gRPC: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-grpc
276
+ .. _httplib: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-httplib
277
+ .. _httpx: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-httpx
278
+ .. _Jaeger: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-jaeger
279
+ .. _logging: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-logging
280
+ .. _MySQL: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-mysql
281
+ .. _OCAgent: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-ocagent
282
+ .. _PostgreSQL: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-postgresql
283
+ .. _Prometheus: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-prometheus
284
+ .. _pymongo: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-pymongo
285
+ .. _PyMySQL: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-pymysql
286
+ .. _Pyramid: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-pyramid
287
+ .. _requests: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-requests
288
+ .. _SQLAlchemy: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-sqlalchemy
289
+ .. _Stackdriver: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-stackdriver
290
+ .. _threading: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-threading
291
+ .. _Zipkin: https://github.com/census-instrumentation/opencensus-python/tree/master/contrib/opencensus-ext-zipkin
292
+
293
+ ------------
294
+ Versioning
295
+ ------------
296
+
297
+ This library follows `Semantic Versioning`_.
298
+
299
+ **GA**: Libraries defined at a GA quality level are stable, and will not introduce
300
+ backwards-incompatible changes in any minor or patch releases. We will address issues and requests
301
+ with the highest priority. If we were to make a backwards-incompatible changes on an API, we will
302
+ first mark the existing API as deprecated and keep it for 18 months before removing it.
303
+
304
+ **Beta**: Libraries defined at a Beta quality level are expected to be mostly stable and we're
305
+ working towards their release candidate. We will address issues and requests with a higher priority.
306
+ There may be backwards incompatible changes in a minor version release, though not in a patch
307
+ release. If an element is part of an API that is only meant to be used by exporters or other
308
+ opencensus libraries, then there is no deprecation period. Otherwise, we will deprecate it for 18
309
+ months before removing it, if possible.
310
+
311
+ .. _Semantic Versioning: https://semver.org/
312
+
313
+
.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/RECORD ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ opencensus-0.11.4.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
2
+ opencensus-0.11.4.dist-info/LICENSE,sha256=YmXwJVPDAw7MfsXq36r_DSbI55E92i-djKAC0Fa43ts,11553
3
+ opencensus-0.11.4.dist-info/METADATA,sha256=M0nHiQlDaEteLWFmNwGGs6Xu1C3_YsczItcW70LZG4k,12351
4
+ opencensus-0.11.4.dist-info/RECORD,,
5
+ opencensus-0.11.4.dist-info/WHEEL,sha256=Z-nyYpwrcSqxfdux5Mbn_DQ525iP7J2DG3JgGvOYyTQ,110
6
+ opencensus-0.11.4.dist-info/namespace_packages.txt,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
7
+ opencensus-0.11.4.dist-info/top_level.txt,sha256=J24OU61lnFeMMuwOLPtTNywYsd2Bzp5KbBCPtAhDgaI,11
8
+ opencensus/__init__.py,sha256=F0UD44DuZCpVvI1PX5rW4FcKQ004ORdqeOhf4JsheIY,66
9
+ opencensus/__pycache__/__init__.cpython-311.pyc,,
10
+ opencensus/common/__init__.py,sha256=F0UD44DuZCpVvI1PX5rW4FcKQ004ORdqeOhf4JsheIY,66
11
+ opencensus/common/__pycache__/__init__.cpython-311.pyc,,
12
+ opencensus/common/backports/__init__.py,sha256=ZfPWVZ8q7qcuRBDY9pGyH36kQ5mejJuMNDyOltEZqb0,2888
13
+ opencensus/common/backports/__pycache__/__init__.cpython-311.pyc,,
14
+ opencensus/common/configuration/__init__.py,sha256=YIDloGhqI4TzgvMx8lIrTMephpwaW31xcoO0a6UGmTE,1443
15
+ opencensus/common/configuration/__pycache__/__init__.cpython-311.pyc,,
16
+ opencensus/common/http_handler/__init__.py,sha256=CHqTDxGFN8pq1njv5f53aMVzdc1G-xYJEfqwJKR5QWc,1434
17
+ opencensus/common/http_handler/__pycache__/__init__.cpython-311.pyc,,
18
+ opencensus/common/monitored_resource/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
19
+ opencensus/common/monitored_resource/__pycache__/__init__.cpython-311.pyc,,
20
+ opencensus/common/monitored_resource/__pycache__/aws_identity_doc_utils.cpython-311.pyc,,
21
+ opencensus/common/monitored_resource/__pycache__/gcp_metadata_config.cpython-311.pyc,,
22
+ opencensus/common/monitored_resource/__pycache__/k8s_utils.cpython-311.pyc,,
23
+ opencensus/common/monitored_resource/__pycache__/monitored_resource.cpython-311.pyc,,
24
+ opencensus/common/monitored_resource/aws_identity_doc_utils.py,sha256=v-7tuf9P554rhunHSoYmghszU14gI7i9ot7NuXJB42w,3243
25
+ opencensus/common/monitored_resource/gcp_metadata_config.py,sha256=OufJ6HUxVTfRC-5_WFvs3SiywXxA2damyVyAtvUZsFs,4119
26
+ opencensus/common/monitored_resource/k8s_utils.py,sha256=9RSsoqUdMSpS12P-tn3v7VtSXaIBM8KqqqBxL7MpqiM,2108
27
+ opencensus/common/monitored_resource/monitored_resource.py,sha256=vbxUPe6kFufY7IL46QMBBOxwSI2AVHcnwZSVxqUf2U8,2389
28
+ opencensus/common/resource/__init__.py,sha256=9c0T-2H2oHI1lXm11KvFdUjfSvsOMiowLQaGJqZ6NUY,6591
29
+ opencensus/common/resource/__pycache__/__init__.cpython-311.pyc,,
30
+ opencensus/common/schedule/__init__.py,sha256=6o-WqHxUC075eKEMnqVb-omWzwGY7cxbPcukxkACPUk,4738
31
+ opencensus/common/schedule/__pycache__/__init__.cpython-311.pyc,,
32
+ opencensus/common/transports/__init__.py,sha256=TcAW4NO62hdY2RC2-lLga_icFnrpYMrR9zcS2xcuz6U,596
33
+ opencensus/common/transports/__pycache__/__init__.cpython-311.pyc,,
34
+ opencensus/common/transports/__pycache__/async_.cpython-311.pyc,,
35
+ opencensus/common/transports/__pycache__/base.cpython-311.pyc,,
36
+ opencensus/common/transports/__pycache__/sync.cpython-311.pyc,,
37
+ opencensus/common/transports/async_.py,sha256=yVabLF1avTBMMewrndVZ5Uet7WPVpH6wncfDpF4hXIA,8063
38
+ opencensus/common/transports/base.py,sha256=MHpSA0DWEe3bd9S1Pqz85i3UOa5yjkP0I_b894VYW5A,1021
39
+ opencensus/common/transports/sync.py,sha256=5cxYNnC3ntynQmM59ccER1aGHKWRWp1yNUZD7DNZKCE,1058
40
+ opencensus/common/utils/__init__.py,sha256=JRpGoXBroCFQTlwm7a3sP2AihrPpfFL8Ots8Yp11eYU,3738
41
+ opencensus/common/utils/__pycache__/__init__.cpython-311.pyc,,
42
+ opencensus/common/version/__init__.py,sha256=3A51Klomvw2AvHVZ7lV3G1qmldc3cfIPvHNl8C-N55s,622
43
+ opencensus/common/version/__pycache__/__init__.cpython-311.pyc,,
44
+ opencensus/log/__init__.py,sha256=U3ULx0SpGhjuUlbaItEQv0bQLUKSlQEBjEgplNEyJcI,4159
45
+ opencensus/log/__pycache__/__init__.cpython-311.pyc,,
46
+ opencensus/metrics/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
47
+ opencensus/metrics/__pycache__/__init__.cpython-311.pyc,,
48
+ opencensus/metrics/__pycache__/label_key.cpython-311.pyc,,
49
+ opencensus/metrics/__pycache__/label_value.cpython-311.pyc,,
50
+ opencensus/metrics/__pycache__/transport.cpython-311.pyc,,
51
+ opencensus/metrics/export/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
52
+ opencensus/metrics/export/__pycache__/__init__.cpython-311.pyc,,
53
+ opencensus/metrics/export/__pycache__/cumulative.cpython-311.pyc,,
54
+ opencensus/metrics/export/__pycache__/gauge.cpython-311.pyc,,
55
+ opencensus/metrics/export/__pycache__/metric.cpython-311.pyc,,
56
+ opencensus/metrics/export/__pycache__/metric_descriptor.cpython-311.pyc,,
57
+ opencensus/metrics/export/__pycache__/metric_producer.cpython-311.pyc,,
58
+ opencensus/metrics/export/__pycache__/point.cpython-311.pyc,,
59
+ opencensus/metrics/export/__pycache__/summary.cpython-311.pyc,,
60
+ opencensus/metrics/export/__pycache__/time_series.cpython-311.pyc,,
61
+ opencensus/metrics/export/__pycache__/value.cpython-311.pyc,,
62
+ opencensus/metrics/export/cumulative.py,sha256=pXuvoBu8dk97HlEH_kH5iAXVzMGVnkZB49HQREOPcmc,2958
63
+ opencensus/metrics/export/gauge.py,sha256=XOLPsP2IEmSRaINvIwPcMjJI-07LdscYRqo2oxVeEmI,17770
64
+ opencensus/metrics/export/metric.py,sha256=Uo6rJKw6D7-UcRwSpsqkGfhli8XU4KrvufmjQ1r38mc,3224
65
+ opencensus/metrics/export/metric_descriptor.py,sha256=NhFPIeQn_G42GU0mxkPG5cRD6pH07y4bxXTGq4BUVrg,6174
66
+ opencensus/metrics/export/metric_producer.py,sha256=8XSFwJpAiliqaIR7TTuaYp26sJ1GZhep4H9Ul_5DSjk,2764
67
+ opencensus/metrics/export/point.py,sha256=S5P0dnPFMT5Waj-nIPlT3V0TBeTyCAiQ4Fx0gWw2QyI,1571
68
+ opencensus/metrics/export/summary.py,sha256=n30_7EsVZy5CEeNSfGaxVUe-l7bzvjbLTasRJJH48H8,4449
69
+ opencensus/metrics/export/time_series.py,sha256=K-Va6EKfdS5b1ifO68r8d4rOVJRYhcDPp_w_hW18h7A,3278
70
+ opencensus/metrics/export/value.py,sha256=uX9IrsAZirUspDJrxPWPqE4V2Y0REOoRjidPCsMGRUQ,9082
71
+ opencensus/metrics/label_key.py,sha256=ZS0RSVW4VzQzLESLJho9yLRnV4Oi78Hum93mfMSf8CA,1630
72
+ opencensus/metrics/label_value.py,sha256=lv1HbhzOyi4xOIvLFazzYqi8YPoAz1Saci4Se_T26F8,1289
73
+ opencensus/metrics/transport.py,sha256=rVaKjRwPhdSy1Ik9ks1u6Z7CMwf9hbbBRDpBFV5YTts,4534
74
+ opencensus/stats/__init__.py,sha256=TcAW4NO62hdY2RC2-lLga_icFnrpYMrR9zcS2xcuz6U,596
75
+ opencensus/stats/__pycache__/__init__.cpython-311.pyc,,
76
+ opencensus/stats/__pycache__/aggregation.cpython-311.pyc,,
77
+ opencensus/stats/__pycache__/aggregation_data.cpython-311.pyc,,
78
+ opencensus/stats/__pycache__/base_exporter.cpython-311.pyc,,
79
+ opencensus/stats/__pycache__/bucket_boundaries.cpython-311.pyc,,
80
+ opencensus/stats/__pycache__/execution_context.cpython-311.pyc,,
81
+ opencensus/stats/__pycache__/measure.cpython-311.pyc,,
82
+ opencensus/stats/__pycache__/measure_to_view_map.cpython-311.pyc,,
83
+ opencensus/stats/__pycache__/measurement.cpython-311.pyc,,
84
+ opencensus/stats/__pycache__/measurement_map.cpython-311.pyc,,
85
+ opencensus/stats/__pycache__/metric_utils.cpython-311.pyc,,
86
+ opencensus/stats/__pycache__/stats.cpython-311.pyc,,
87
+ opencensus/stats/__pycache__/stats_recorder.cpython-311.pyc,,
88
+ opencensus/stats/__pycache__/view.cpython-311.pyc,,
89
+ opencensus/stats/__pycache__/view_data.cpython-311.pyc,,
90
+ opencensus/stats/__pycache__/view_manager.cpython-311.pyc,,
91
+ opencensus/stats/aggregation.py,sha256=M8PAxMf0RXMhezuItnOhcPvadmmRvxeX9xoMCWQexFY,5180
92
+ opencensus/stats/aggregation_data.py,sha256=Q4_xZm5hqpVRZH0qW0hhByXAIBQjL3daVcMQBKruMnA,13885
93
+ opencensus/stats/base_exporter.py,sha256=LlCB_0lLw2XMXChIUU4KOoPfvMWirX8U7-i7p7Fmg9w,1569
94
+ opencensus/stats/bucket_boundaries.py,sha256=2e6lj0dbuRwQPELehsaAhnAO6hSThz8QerJ5tWTzswU,1407
95
+ opencensus/stats/execution_context.py,sha256=vSMam4Q_CgCfuJ4lVCPBtbBLTK4PCFCyWOhG6zVUtLc,1067
96
+ opencensus/stats/measure.py,sha256=dCxrmRi88TqMgULZYz4amooB7hvMc91xanUB4aMGp0M,1928
97
+ opencensus/stats/measure_to_view_map.py,sha256=mvEDs_p2NLuNnV6qKL0QB2k5AzxIO9eVpCqZzJJCu3w,6271
98
+ opencensus/stats/measurement.py,sha256=DXSUTsSkabsF8y7kjZjuUN0n76weaJi86z1TRPwETw0,1649
99
+ opencensus/stats/measurement_map.py,sha256=8nr-W2OzvKXpNDKXE0hCDNWWVuH3JykKHQ7269tn1KY,4821
100
+ opencensus/stats/metric_utils.py,sha256=JHJWHFm8ztm4Yy1e7pP5qxeRh6_Jk4CmgbrPwmt7DQA,2858
101
+ opencensus/stats/stats.py,sha256=4nBU5aIQXbuCYNfpdwcmH19tS37_VUSJktoxMArq08c,1545
102
+ opencensus/stats/stats_recorder.py,sha256=xoYyGI__Ml-NiKinYH4q9wyT9Z7jXPbsPH5UH5wYAAw,1378
103
+ opencensus/stats/view.py,sha256=-7wzEVJEw3OnC67pi3FVzr2JM-aqhJTXaKnaAA2gaIc,3638
104
+ opencensus/stats/view_data.py,sha256=mlj-tzvvCHnQjgUF_2Rpyfh4BGUDq8uwdX6TgE5BytA,3273
105
+ opencensus/stats/view_manager.py,sha256=ePgy60hsjWf-eR6FNp1biQYbD8umxYtnpMK8FIjDUV0,2216
106
+ opencensus/tags/__init__.py,sha256=TqjMu0ZDj4wMrhcDfIPuswSw4F2PTpxPwlO4ZGzzjys,966
107
+ opencensus/tags/__pycache__/__init__.cpython-311.pyc,,
108
+ opencensus/tags/__pycache__/tag.cpython-311.pyc,,
109
+ opencensus/tags/__pycache__/tag_key.cpython-311.pyc,,
110
+ opencensus/tags/__pycache__/tag_map.cpython-311.pyc,,
111
+ opencensus/tags/__pycache__/tag_value.cpython-311.pyc,,
112
+ opencensus/tags/__pycache__/validation.cpython-311.pyc,,
113
+ opencensus/tags/propagation/__init__.py,sha256=TcAW4NO62hdY2RC2-lLga_icFnrpYMrR9zcS2xcuz6U,596
114
+ opencensus/tags/propagation/__pycache__/__init__.cpython-311.pyc,,
115
+ opencensus/tags/propagation/__pycache__/binary_serializer.cpython-311.pyc,,
116
+ opencensus/tags/propagation/binary_serializer.py,sha256=eWygMfCWytI66OVO4lRpEQxAb9sKER5wi0IlRjKbGRY,3891
117
+ opencensus/tags/tag.py,sha256=myzlZNWW2TdF4nvL4BsiqIFnsqsmalHAvUYsqWPQnc4,1145
118
+ opencensus/tags/tag_key.py,sha256=obev7j_EvSTK56MD6OOS8yhlzu0-xIMHiVevdt0_C0o,1227
119
+ opencensus/tags/tag_map.py,sha256=hNuiRO1qZJf3l_gesb3extDUnwCMfl-mW-8TH1tfKTE,3339
120
+ opencensus/tags/tag_value.py,sha256=5opAVWqI50CovpSh_Z6mqnaC2T4ZDpurLB423OyI5lE,1255
121
+ opencensus/tags/validation.py,sha256=OV8gJ9b3UyLz9tYr6IbAT65vHr6jp3gB9Bo8UmpeA8I,1255
122
+ opencensus/trace/__init__.py,sha256=3vmAI0NGA5urfT7sweDlIpvWmGUf5LG2luZsd_Ra4f4,660
123
+ opencensus/trace/__pycache__/__init__.cpython-311.pyc,,
124
+ opencensus/trace/__pycache__/attributes.cpython-311.pyc,,
125
+ opencensus/trace/__pycache__/attributes_helper.cpython-311.pyc,,
126
+ opencensus/trace/__pycache__/base_exporter.cpython-311.pyc,,
127
+ opencensus/trace/__pycache__/base_span.cpython-311.pyc,,
128
+ opencensus/trace/__pycache__/blank_span.cpython-311.pyc,,
129
+ opencensus/trace/__pycache__/config_integration.cpython-311.pyc,,
130
+ opencensus/trace/__pycache__/exceptions_status.cpython-311.pyc,,
131
+ opencensus/trace/__pycache__/execution_context.cpython-311.pyc,,
132
+ opencensus/trace/__pycache__/file_exporter.cpython-311.pyc,,
133
+ opencensus/trace/__pycache__/integrations.cpython-311.pyc,,
134
+ opencensus/trace/__pycache__/link.cpython-311.pyc,,
135
+ opencensus/trace/__pycache__/logging_exporter.cpython-311.pyc,,
136
+ opencensus/trace/__pycache__/print_exporter.cpython-311.pyc,,
137
+ opencensus/trace/__pycache__/span.cpython-311.pyc,,
138
+ opencensus/trace/__pycache__/span_context.cpython-311.pyc,,
139
+ opencensus/trace/__pycache__/span_data.cpython-311.pyc,,
140
+ opencensus/trace/__pycache__/stack_trace.cpython-311.pyc,,
141
+ opencensus/trace/__pycache__/status.cpython-311.pyc,,
142
+ opencensus/trace/__pycache__/time_event.cpython-311.pyc,,
143
+ opencensus/trace/__pycache__/trace_options.cpython-311.pyc,,
144
+ opencensus/trace/__pycache__/tracer.cpython-311.pyc,,
145
+ opencensus/trace/__pycache__/tracestate.cpython-311.pyc,,
146
+ opencensus/trace/__pycache__/utils.cpython-311.pyc,,
147
+ opencensus/trace/attributes.py,sha256=YAyn_Fp2V969gOwB_QhQN0iwnlskKV3f4E9rt_xMjOA,2457
148
+ opencensus/trace/attributes_helper.py,sha256=o67kxxUOGwbjUY_Zeorr4AkCsqj0z8FYYvNizgx8z_Y,1522
149
+ opencensus/trace/base_exporter.py,sha256=R2B4vWebq5BxEdxTJ6sXxnRfBTzJI7NBSpsvJC9eaRk,1585
150
+ opencensus/trace/base_span.py,sha256=gMYlnYrsYwBbhmUxT_lC0kcFqAoJTSIU7q35RSAvOlE,3429
151
+ opencensus/trace/blank_span.py,sha256=tcSFY4WtINQy9ygclZncSg5qq1gPW8J2PCIXx9L7Akc,5440
152
+ opencensus/trace/config_integration.py,sha256=DuUsSoIG4Mg0Z3eNjp13bZ0mygabe8GFpnSNj7F2GNs,1330
153
+ opencensus/trace/exceptions_status.py,sha256=YYyGIYnsVNevsg6bdloxzEObxO4WmMmBTyTKPWjCbZQ,940
154
+ opencensus/trace/execution_context.py,sha256=OYQGwRUXSx9Tn9p_wxghrI3MFV-1ME9Wp7pYXyuaaCw,2476
155
+ opencensus/trace/file_exporter.py,sha256=eXYNXEx5dHU5wPFb61ASUDyUZepxJMhKbmcQ9Mt8mJo,2669
156
+ opencensus/trace/integrations.py,sha256=OeyzYjBpA501-96KC1fEccfasTb03njsswbZV0Er230,1448
157
+ opencensus/trace/link.py,sha256=e-Wwrsnk3-zlK9JMaWQ1t80jSmdkPQa-dBM667DnRTQ,2622
158
+ opencensus/trace/logging_exporter.py,sha256=zSby20MJ1o6M4nIB9hCeiA1zhNdZgZcos3_2IZf_83I,3217
159
+ opencensus/trace/print_exporter.py,sha256=T0SvkDcbwIiA5V-UriZLSMDDetuDesLXQ6Ru-xveCYw,1932
160
+ opencensus/trace/propagation/__init__.py,sha256=3cwrGSSn3JMa-Au3B1875PPwj3Ndg-lxgslMZRNsW3s,596
161
+ opencensus/trace/propagation/__pycache__/__init__.cpython-311.pyc,,
162
+ opencensus/trace/propagation/__pycache__/b3_format.cpython-311.pyc,,
163
+ opencensus/trace/propagation/__pycache__/binary_format.cpython-311.pyc,,
164
+ opencensus/trace/propagation/__pycache__/google_cloud_format.cpython-311.pyc,,
165
+ opencensus/trace/propagation/__pycache__/text_format.cpython-311.pyc,,
166
+ opencensus/trace/propagation/__pycache__/trace_context_http_header_format.cpython-311.pyc,,
167
+ opencensus/trace/propagation/__pycache__/tracestate_string_format.cpython-311.pyc,,
168
+ opencensus/trace/propagation/b3_format.py,sha256=7C2dZQgsu2NVTdwsnE8qnU-WfdHgpBfb5g4oSItgVuU,4053
169
+ opencensus/trace/propagation/binary_format.py,sha256=aahWtjg1AUJDkMiE3FUrSQrRZyb-H5KeULsEYaM_QjA,6168
170
+ opencensus/trace/propagation/google_cloud_format.py,sha256=GDkpPWW5r3bubkp86CDSQwKDYeiY6X9i-m57-Ysdwgs,4605
171
+ opencensus/trace/propagation/text_format.py,sha256=nBUZq4_jf5WhdSth1sl30rhrmIaaIabJiIQ1fwiY0GY,3174
172
+ opencensus/trace/propagation/trace_context_http_header_format.py,sha256=y77aO_Hx9WI-pji-7HakeKK_DRnalZg0XAyYiTQ5UGw,3946
173
+ opencensus/trace/propagation/tracestate_string_format.py,sha256=jw_mhR2_NvuAvjrF4B4_bilLiEqw-ocTJBDXGBE1Ryo,1604
174
+ opencensus/trace/samplers/__init__.py,sha256=MQtXRIokFq2UCXR5Xj7K8_Nk_Ga1DCIF7iwAFBpYZ-g,2895
175
+ opencensus/trace/samplers/__pycache__/__init__.cpython-311.pyc,,
176
+ opencensus/trace/span.py,sha256=F6JaUIiyOlOp9soyu8VoKHkx3SbrizKRsLYD2p_TEd4,15598
177
+ opencensus/trace/span_context.py,sha256=KN8newixFHvWZ-JXP_UBFyHtVUSVFm5mRFA5upV2gBE,5548
178
+ opencensus/trace/span_data.py,sha256=aCJm2UwEIcJ2pDI3UXHtNsyVadAUOIvC05eqybIN_yA,6514
179
+ opencensus/trace/stack_trace.py,sha256=rm1kCy2U_sXq9ndW0DVcHfZO98X8jIJo8Fdb2nAr0bQ,6998
180
+ opencensus/trace/status.py,sha256=yLcQgs8gvL0Yb4lqJ3ZFsWFBdU5vkeQtOfSWUyVBqlk,2624
181
+ opencensus/trace/time_event.py,sha256=bEZ8_6zVheAn8V-3OI5lL76mZxJ-9tJUOCJBMAZSXS0,4301
182
+ opencensus/trace/trace_options.py,sha256=I-SOm3_iaPyjjZR69D4XmFcQaEqTu19A-Btk6iaR72o,2651
183
+ opencensus/trace/tracer.py,sha256=bORAnkVg1y__ZJt3keuxqms2dB4NpW6zclKNDYaUK0I,5166
184
+ opencensus/trace/tracers/__init__.py,sha256=3cwrGSSn3JMa-Au3B1875PPwj3Ndg-lxgslMZRNsW3s,596
185
+ opencensus/trace/tracers/__pycache__/__init__.cpython-311.pyc,,
186
+ opencensus/trace/tracers/__pycache__/base.cpython-311.pyc,,
187
+ opencensus/trace/tracers/__pycache__/context_tracer.cpython-311.pyc,,
188
+ opencensus/trace/tracers/__pycache__/noop_tracer.cpython-311.pyc,,
189
+ opencensus/trace/tracers/base.py,sha256=KRi_n-OAvpFHZnhLl8ZTlWofTmvoZEeaa18OMaG9Kr4,2622
190
+ opencensus/trace/tracers/context_tracer.py,sha256=RGTy0IzYEsEgxj5u6eBG7PrmYfs7Pq-0ZCyDSHBJN-Q,5991
191
+ opencensus/trace/tracers/noop_tracer.py,sha256=CotRxATBNii4YSNi--7_fl7-iIFj1z6rQmCzzk2f07k,2686
192
+ opencensus/trace/tracestate.py,sha256=mKFckhglUTxHdwNnoGdHoaQeJcdOvYPCbCX7mIr0MUI,2516
193
+ opencensus/trace/utils.py,sha256=khhyGNpkjOL8Sognf_zdZn_WSiHHKhrgSf31mDAtsBU,4059
.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/WHEEL ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ Wheel-Version: 1.0
2
+ Generator: bdist_wheel (0.36.2)
3
+ Root-Is-Purelib: true
4
+ Tag: py2-none-any
5
+ Tag: py3-none-any
6
+
.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/namespace_packages.txt ADDED
@@ -0,0 +1 @@
 
 
1
+
.venv/lib/python3.11/site-packages/opencensus-0.11.4.dist-info/top_level.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ opencensus
.venv/lib/python3.11/site-packages/pybind11/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import sys
4
+
5
+ if sys.version_info < (3, 7): # noqa: UP036
6
+ msg = "pybind11 does not support Python < 3.7. v2.12 was the last release supporting Python 3.6."
7
+ raise ImportError(msg)
8
+
9
+
10
+ from ._version import __version__, version_info
11
+ from .commands import get_cmake_dir, get_include, get_pkgconfig_dir
12
+
13
+ __all__ = (
14
+ "version_info",
15
+ "__version__",
16
+ "get_include",
17
+ "get_cmake_dir",
18
+ "get_pkgconfig_dir",
19
+ )
.venv/lib/python3.11/site-packages/pybind11/__main__.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pylint: disable=missing-function-docstring
2
+ from __future__ import annotations
3
+
4
+ import argparse
5
+ import re
6
+ import sys
7
+ import sysconfig
8
+
9
+ from ._version import __version__
10
+ from .commands import get_cmake_dir, get_include, get_pkgconfig_dir
11
+
12
+ # This is the conditional used for os.path being posixpath
13
+ if "posix" in sys.builtin_module_names:
14
+ from shlex import quote
15
+ elif "nt" in sys.builtin_module_names:
16
+ # See https://github.com/mesonbuild/meson/blob/db22551ed9d2dd7889abea01cc1c7bba02bf1c75/mesonbuild/utils/universal.py#L1092-L1121
17
+ # and the original documents:
18
+ # https://docs.microsoft.com/en-us/cpp/c-language/parsing-c-command-line-arguments and
19
+ # https://blogs.msdn.microsoft.com/twistylittlepassagesallalike/2011/04/23/everyone-quotes-command-line-arguments-the-wrong-way/
20
+ UNSAFE = re.compile("[ \t\n\r]")
21
+
22
+ def quote(s: str) -> str:
23
+ if s and not UNSAFE.search(s):
24
+ return s
25
+
26
+ # Paths cannot contain a '"' on Windows, so we don't need to worry
27
+ # about nuanced counting here.
28
+ return f'"{s}\\"' if s.endswith("\\") else f'"{s}"'
29
+ else:
30
+
31
+ def quote(s: str) -> str:
32
+ return s
33
+
34
+
35
+ def print_includes() -> None:
36
+ dirs = [
37
+ sysconfig.get_path("include"),
38
+ sysconfig.get_path("platinclude"),
39
+ get_include(),
40
+ ]
41
+
42
+ # Make unique but preserve order
43
+ unique_dirs = []
44
+ for d in dirs:
45
+ if d and d not in unique_dirs:
46
+ unique_dirs.append(d)
47
+
48
+ print(" ".join(quote(f"-I{d}") for d in unique_dirs))
49
+
50
+
51
+ def main() -> None:
52
+ parser = argparse.ArgumentParser()
53
+ parser.add_argument(
54
+ "--version",
55
+ action="version",
56
+ version=__version__,
57
+ help="Print the version and exit.",
58
+ )
59
+ parser.add_argument(
60
+ "--includes",
61
+ action="store_true",
62
+ help="Include flags for both pybind11 and Python headers.",
63
+ )
64
+ parser.add_argument(
65
+ "--cmakedir",
66
+ action="store_true",
67
+ help="Print the CMake module directory, ideal for setting -Dpybind11_ROOT in CMake.",
68
+ )
69
+ parser.add_argument(
70
+ "--pkgconfigdir",
71
+ action="store_true",
72
+ help="Print the pkgconfig directory, ideal for setting $PKG_CONFIG_PATH.",
73
+ )
74
+ args = parser.parse_args()
75
+ if not sys.argv[1:]:
76
+ parser.print_help()
77
+ if args.includes:
78
+ print_includes()
79
+ if args.cmakedir:
80
+ print(quote(get_cmake_dir()))
81
+ if args.pkgconfigdir:
82
+ print(quote(get_pkgconfig_dir()))
83
+
84
+
85
+ if __name__ == "__main__":
86
+ main()
.venv/lib/python3.11/site-packages/pybind11/_version.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+
4
+ def _to_int(s: str) -> int | str:
5
+ try:
6
+ return int(s)
7
+ except ValueError:
8
+ return s
9
+
10
+
11
+ __version__ = "2.13.6"
12
+ version_info = tuple(_to_int(s) for s in __version__.split("."))
.venv/lib/python3.11/site-packages/pybind11/commands.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+
3
+ import os
4
+
5
+ DIR = os.path.abspath(os.path.dirname(__file__))
6
+
7
+
8
+ def get_include(user: bool = False) -> str: # noqa: ARG001
9
+ """
10
+ Return the path to the pybind11 include directory. The historical "user"
11
+ argument is unused, and may be removed.
12
+ """
13
+ installed_path = os.path.join(DIR, "include")
14
+ source_path = os.path.join(os.path.dirname(DIR), "include")
15
+ return installed_path if os.path.exists(installed_path) else source_path
16
+
17
+
18
+ def get_cmake_dir() -> str:
19
+ """
20
+ Return the path to the pybind11 CMake module directory.
21
+ """
22
+ cmake_installed_path = os.path.join(DIR, "share", "cmake", "pybind11")
23
+ if os.path.exists(cmake_installed_path):
24
+ return cmake_installed_path
25
+
26
+ msg = "pybind11 not installed, installation required to access the CMake files"
27
+ raise ImportError(msg)
28
+
29
+
30
+ def get_pkgconfig_dir() -> str:
31
+ """
32
+ Return the path to the pybind11 pkgconfig directory.
33
+ """
34
+ pkgconfig_installed_path = os.path.join(DIR, "share", "pkgconfig")
35
+ if os.path.exists(pkgconfig_installed_path):
36
+ return pkgconfig_installed_path
37
+
38
+ msg = "pybind11 not installed, installation required to access the pkgconfig files"
39
+ raise ImportError(msg)
.venv/lib/python3.11/site-packages/pybind11/include/pybind11/attr.h ADDED
@@ -0,0 +1,690 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ pybind11/attr.h: Infrastructure for processing custom
3
+ type and function attributes
4
+
5
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
6
+
7
+ All rights reserved. Use of this source code is governed by a
8
+ BSD-style license that can be found in the LICENSE file.
9
+ */
10
+
11
+ #pragma once
12
+
13
+ #include "detail/common.h"
14
+ #include "cast.h"
15
+
16
+ #include <functional>
17
+
18
+ PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
19
+
20
+ /// \addtogroup annotations
21
+ /// @{
22
+
23
+ /// Annotation for methods
24
+ struct is_method {
25
+ handle class_;
26
+ explicit is_method(const handle &c) : class_(c) {}
27
+ };
28
+
29
+ /// Annotation for setters
30
+ struct is_setter {};
31
+
32
+ /// Annotation for operators
33
+ struct is_operator {};
34
+
35
+ /// Annotation for classes that cannot be subclassed
36
+ struct is_final {};
37
+
38
+ /// Annotation for parent scope
39
+ struct scope {
40
+ handle value;
41
+ explicit scope(const handle &s) : value(s) {}
42
+ };
43
+
44
+ /// Annotation for documentation
45
+ struct doc {
46
+ const char *value;
47
+ explicit doc(const char *value) : value(value) {}
48
+ };
49
+
50
+ /// Annotation for function names
51
+ struct name {
52
+ const char *value;
53
+ explicit name(const char *value) : value(value) {}
54
+ };
55
+
56
+ /// Annotation indicating that a function is an overload associated with a given "sibling"
57
+ struct sibling {
58
+ handle value;
59
+ explicit sibling(const handle &value) : value(value.ptr()) {}
60
+ };
61
+
62
+ /// Annotation indicating that a class derives from another given type
63
+ template <typename T>
64
+ struct base {
65
+
66
+ PYBIND11_DEPRECATED(
67
+ "base<T>() was deprecated in favor of specifying 'T' as a template argument to class_")
68
+ base() = default;
69
+ };
70
+
71
+ /// Keep patient alive while nurse lives
72
+ template <size_t Nurse, size_t Patient>
73
+ struct keep_alive {};
74
+
75
+ /// Annotation indicating that a class is involved in a multiple inheritance relationship
76
+ struct multiple_inheritance {};
77
+
78
+ /// Annotation which enables dynamic attributes, i.e. adds `__dict__` to a class
79
+ struct dynamic_attr {};
80
+
81
+ /// Annotation which enables the buffer protocol for a type
82
+ struct buffer_protocol {};
83
+
84
+ /// Annotation which requests that a special metaclass is created for a type
85
+ struct metaclass {
86
+ handle value;
87
+
88
+ PYBIND11_DEPRECATED("py::metaclass() is no longer required. It's turned on by default now.")
89
+ metaclass() = default;
90
+
91
+ /// Override pybind11's default metaclass
92
+ explicit metaclass(handle value) : value(value) {}
93
+ };
94
+
95
+ /// Specifies a custom callback with signature `void (PyHeapTypeObject*)` that
96
+ /// may be used to customize the Python type.
97
+ ///
98
+ /// The callback is invoked immediately before `PyType_Ready`.
99
+ ///
100
+ /// Note: This is an advanced interface, and uses of it may require changes to
101
+ /// work with later versions of pybind11. You may wish to consult the
102
+ /// implementation of `make_new_python_type` in `detail/classes.h` to understand
103
+ /// the context in which the callback will be run.
104
+ struct custom_type_setup {
105
+ using callback = std::function<void(PyHeapTypeObject *heap_type)>;
106
+
107
+ explicit custom_type_setup(callback value) : value(std::move(value)) {}
108
+
109
+ callback value;
110
+ };
111
+
112
+ /// Annotation that marks a class as local to the module:
113
+ struct module_local {
114
+ const bool value;
115
+ constexpr explicit module_local(bool v = true) : value(v) {}
116
+ };
117
+
118
+ /// Annotation to mark enums as an arithmetic type
119
+ struct arithmetic {};
120
+
121
+ /// Mark a function for addition at the beginning of the existing overload chain instead of the end
122
+ struct prepend {};
123
+
124
+ /** \rst
125
+ A call policy which places one or more guard variables (``Ts...``) around the function call.
126
+
127
+ For example, this definition:
128
+
129
+ .. code-block:: cpp
130
+
131
+ m.def("foo", foo, py::call_guard<T>());
132
+
133
+ is equivalent to the following pseudocode:
134
+
135
+ .. code-block:: cpp
136
+
137
+ m.def("foo", [](args...) {
138
+ T scope_guard;
139
+ return foo(args...); // forwarded arguments
140
+ });
141
+ \endrst */
142
+ template <typename... Ts>
143
+ struct call_guard;
144
+
145
+ template <>
146
+ struct call_guard<> {
147
+ using type = detail::void_type;
148
+ };
149
+
150
+ template <typename T>
151
+ struct call_guard<T> {
152
+ static_assert(std::is_default_constructible<T>::value,
153
+ "The guard type must be default constructible");
154
+
155
+ using type = T;
156
+ };
157
+
158
+ template <typename T, typename... Ts>
159
+ struct call_guard<T, Ts...> {
160
+ struct type {
161
+ T guard{}; // Compose multiple guard types with left-to-right default-constructor order
162
+ typename call_guard<Ts...>::type next{};
163
+ };
164
+ };
165
+
166
+ /// @} annotations
167
+
168
+ PYBIND11_NAMESPACE_BEGIN(detail)
169
+ /* Forward declarations */
170
+ enum op_id : int;
171
+ enum op_type : int;
172
+ struct undefined_t;
173
+ template <op_id id, op_type ot, typename L = undefined_t, typename R = undefined_t>
174
+ struct op_;
175
+ void keep_alive_impl(size_t Nurse, size_t Patient, function_call &call, handle ret);
176
+
177
+ /// Internal data structure which holds metadata about a keyword argument
178
+ struct argument_record {
179
+ const char *name; ///< Argument name
180
+ const char *descr; ///< Human-readable version of the argument value
181
+ handle value; ///< Associated Python object
182
+ bool convert : 1; ///< True if the argument is allowed to convert when loading
183
+ bool none : 1; ///< True if None is allowed when loading
184
+
185
+ argument_record(const char *name, const char *descr, handle value, bool convert, bool none)
186
+ : name(name), descr(descr), value(value), convert(convert), none(none) {}
187
+ };
188
+
189
+ /// Internal data structure which holds metadata about a bound function (signature, overloads,
190
+ /// etc.)
191
+ struct function_record {
192
+ function_record()
193
+ : is_constructor(false), is_new_style_constructor(false), is_stateless(false),
194
+ is_operator(false), is_method(false), is_setter(false), has_args(false),
195
+ has_kwargs(false), prepend(false) {}
196
+
197
+ /// Function name
198
+ char *name = nullptr; /* why no C++ strings? They generate heavier code.. */
199
+
200
+ // User-specified documentation string
201
+ char *doc = nullptr;
202
+
203
+ /// Human-readable version of the function signature
204
+ char *signature = nullptr;
205
+
206
+ /// List of registered keyword arguments
207
+ std::vector<argument_record> args;
208
+
209
+ /// Pointer to lambda function which converts arguments and performs the actual call
210
+ handle (*impl)(function_call &) = nullptr;
211
+
212
+ /// Storage for the wrapped function pointer and captured data, if any
213
+ void *data[3] = {};
214
+
215
+ /// Pointer to custom destructor for 'data' (if needed)
216
+ void (*free_data)(function_record *ptr) = nullptr;
217
+
218
+ /// Return value policy associated with this function
219
+ return_value_policy policy = return_value_policy::automatic;
220
+
221
+ /// True if name == '__init__'
222
+ bool is_constructor : 1;
223
+
224
+ /// True if this is a new-style `__init__` defined in `detail/init.h`
225
+ bool is_new_style_constructor : 1;
226
+
227
+ /// True if this is a stateless function pointer
228
+ bool is_stateless : 1;
229
+
230
+ /// True if this is an operator (__add__), etc.
231
+ bool is_operator : 1;
232
+
233
+ /// True if this is a method
234
+ bool is_method : 1;
235
+
236
+ /// True if this is a setter
237
+ bool is_setter : 1;
238
+
239
+ /// True if the function has a '*args' argument
240
+ bool has_args : 1;
241
+
242
+ /// True if the function has a '**kwargs' argument
243
+ bool has_kwargs : 1;
244
+
245
+ /// True if this function is to be inserted at the beginning of the overload resolution chain
246
+ bool prepend : 1;
247
+
248
+ /// Number of arguments (including py::args and/or py::kwargs, if present)
249
+ std::uint16_t nargs;
250
+
251
+ /// Number of leading positional arguments, which are terminated by a py::args or py::kwargs
252
+ /// argument or by a py::kw_only annotation.
253
+ std::uint16_t nargs_pos = 0;
254
+
255
+ /// Number of leading arguments (counted in `nargs`) that are positional-only
256
+ std::uint16_t nargs_pos_only = 0;
257
+
258
+ /// Python method object
259
+ PyMethodDef *def = nullptr;
260
+
261
+ /// Python handle to the parent scope (a class or a module)
262
+ handle scope;
263
+
264
+ /// Python handle to the sibling function representing an overload chain
265
+ handle sibling;
266
+
267
+ /// Pointer to next overload
268
+ function_record *next = nullptr;
269
+ };
270
+
271
+ /// Special data structure which (temporarily) holds metadata about a bound class
272
+ struct type_record {
273
+ PYBIND11_NOINLINE type_record()
274
+ : multiple_inheritance(false), dynamic_attr(false), buffer_protocol(false),
275
+ default_holder(true), module_local(false), is_final(false) {}
276
+
277
+ /// Handle to the parent scope
278
+ handle scope;
279
+
280
+ /// Name of the class
281
+ const char *name = nullptr;
282
+
283
+ // Pointer to RTTI type_info data structure
284
+ const std::type_info *type = nullptr;
285
+
286
+ /// How large is the underlying C++ type?
287
+ size_t type_size = 0;
288
+
289
+ /// What is the alignment of the underlying C++ type?
290
+ size_t type_align = 0;
291
+
292
+ /// How large is the type's holder?
293
+ size_t holder_size = 0;
294
+
295
+ /// The global operator new can be overridden with a class-specific variant
296
+ void *(*operator_new)(size_t) = nullptr;
297
+
298
+ /// Function pointer to class_<..>::init_instance
299
+ void (*init_instance)(instance *, const void *) = nullptr;
300
+
301
+ /// Function pointer to class_<..>::dealloc
302
+ void (*dealloc)(detail::value_and_holder &) = nullptr;
303
+
304
+ /// List of base classes of the newly created type
305
+ list bases;
306
+
307
+ /// Optional docstring
308
+ const char *doc = nullptr;
309
+
310
+ /// Custom metaclass (optional)
311
+ handle metaclass;
312
+
313
+ /// Custom type setup.
314
+ custom_type_setup::callback custom_type_setup_callback;
315
+
316
+ /// Multiple inheritance marker
317
+ bool multiple_inheritance : 1;
318
+
319
+ /// Does the class manage a __dict__?
320
+ bool dynamic_attr : 1;
321
+
322
+ /// Does the class implement the buffer protocol?
323
+ bool buffer_protocol : 1;
324
+
325
+ /// Is the default (unique_ptr) holder type used?
326
+ bool default_holder : 1;
327
+
328
+ /// Is the class definition local to the module shared object?
329
+ bool module_local : 1;
330
+
331
+ /// Is the class inheritable from python classes?
332
+ bool is_final : 1;
333
+
334
+ PYBIND11_NOINLINE void add_base(const std::type_info &base, void *(*caster)(void *) ) {
335
+ auto *base_info = detail::get_type_info(base, false);
336
+ if (!base_info) {
337
+ std::string tname(base.name());
338
+ detail::clean_type_id(tname);
339
+ pybind11_fail("generic_type: type \"" + std::string(name)
340
+ + "\" referenced unknown base type \"" + tname + "\"");
341
+ }
342
+
343
+ if (default_holder != base_info->default_holder) {
344
+ std::string tname(base.name());
345
+ detail::clean_type_id(tname);
346
+ pybind11_fail("generic_type: type \"" + std::string(name) + "\" "
347
+ + (default_holder ? "does not have" : "has")
348
+ + " a non-default holder type while its base \"" + tname + "\" "
349
+ + (base_info->default_holder ? "does not" : "does"));
350
+ }
351
+
352
+ bases.append((PyObject *) base_info->type);
353
+
354
+ #if PY_VERSION_HEX < 0x030B0000
355
+ dynamic_attr |= base_info->type->tp_dictoffset != 0;
356
+ #else
357
+ dynamic_attr |= (base_info->type->tp_flags & Py_TPFLAGS_MANAGED_DICT) != 0;
358
+ #endif
359
+
360
+ if (caster) {
361
+ base_info->implicit_casts.emplace_back(type, caster);
362
+ }
363
+ }
364
+ };
365
+
366
+ inline function_call::function_call(const function_record &f, handle p) : func(f), parent(p) {
367
+ args.reserve(f.nargs);
368
+ args_convert.reserve(f.nargs);
369
+ }
370
+
371
+ /// Tag for a new-style `__init__` defined in `detail/init.h`
372
+ struct is_new_style_constructor {};
373
+
374
+ /**
375
+ * Partial template specializations to process custom attributes provided to
376
+ * cpp_function_ and class_. These are either used to initialize the respective
377
+ * fields in the type_record and function_record data structures or executed at
378
+ * runtime to deal with custom call policies (e.g. keep_alive).
379
+ */
380
+ template <typename T, typename SFINAE = void>
381
+ struct process_attribute;
382
+
383
+ template <typename T>
384
+ struct process_attribute_default {
385
+ /// Default implementation: do nothing
386
+ static void init(const T &, function_record *) {}
387
+ static void init(const T &, type_record *) {}
388
+ static void precall(function_call &) {}
389
+ static void postcall(function_call &, handle) {}
390
+ };
391
+
392
+ /// Process an attribute specifying the function's name
393
+ template <>
394
+ struct process_attribute<name> : process_attribute_default<name> {
395
+ static void init(const name &n, function_record *r) { r->name = const_cast<char *>(n.value); }
396
+ };
397
+
398
+ /// Process an attribute specifying the function's docstring
399
+ template <>
400
+ struct process_attribute<doc> : process_attribute_default<doc> {
401
+ static void init(const doc &n, function_record *r) { r->doc = const_cast<char *>(n.value); }
402
+ };
403
+
404
+ /// Process an attribute specifying the function's docstring (provided as a C-style string)
405
+ template <>
406
+ struct process_attribute<const char *> : process_attribute_default<const char *> {
407
+ static void init(const char *d, function_record *r) { r->doc = const_cast<char *>(d); }
408
+ static void init(const char *d, type_record *r) { r->doc = d; }
409
+ };
410
+ template <>
411
+ struct process_attribute<char *> : process_attribute<const char *> {};
412
+
413
+ /// Process an attribute indicating the function's return value policy
414
+ template <>
415
+ struct process_attribute<return_value_policy> : process_attribute_default<return_value_policy> {
416
+ static void init(const return_value_policy &p, function_record *r) { r->policy = p; }
417
+ };
418
+
419
+ /// Process an attribute which indicates that this is an overloaded function associated with a
420
+ /// given sibling
421
+ template <>
422
+ struct process_attribute<sibling> : process_attribute_default<sibling> {
423
+ static void init(const sibling &s, function_record *r) { r->sibling = s.value; }
424
+ };
425
+
426
+ /// Process an attribute which indicates that this function is a method
427
+ template <>
428
+ struct process_attribute<is_method> : process_attribute_default<is_method> {
429
+ static void init(const is_method &s, function_record *r) {
430
+ r->is_method = true;
431
+ r->scope = s.class_;
432
+ }
433
+ };
434
+
435
+ /// Process an attribute which indicates that this function is a setter
436
+ template <>
437
+ struct process_attribute<is_setter> : process_attribute_default<is_setter> {
438
+ static void init(const is_setter &, function_record *r) { r->is_setter = true; }
439
+ };
440
+
441
+ /// Process an attribute which indicates the parent scope of a method
442
+ template <>
443
+ struct process_attribute<scope> : process_attribute_default<scope> {
444
+ static void init(const scope &s, function_record *r) { r->scope = s.value; }
445
+ };
446
+
447
+ /// Process an attribute which indicates that this function is an operator
448
+ template <>
449
+ struct process_attribute<is_operator> : process_attribute_default<is_operator> {
450
+ static void init(const is_operator &, function_record *r) { r->is_operator = true; }
451
+ };
452
+
453
+ template <>
454
+ struct process_attribute<is_new_style_constructor>
455
+ : process_attribute_default<is_new_style_constructor> {
456
+ static void init(const is_new_style_constructor &, function_record *r) {
457
+ r->is_new_style_constructor = true;
458
+ }
459
+ };
460
+
461
+ inline void check_kw_only_arg(const arg &a, function_record *r) {
462
+ if (r->args.size() > r->nargs_pos && (!a.name || a.name[0] == '\0')) {
463
+ pybind11_fail("arg(): cannot specify an unnamed argument after a kw_only() annotation or "
464
+ "args() argument");
465
+ }
466
+ }
467
+
468
+ inline void append_self_arg_if_needed(function_record *r) {
469
+ if (r->is_method && r->args.empty()) {
470
+ r->args.emplace_back("self", nullptr, handle(), /*convert=*/true, /*none=*/false);
471
+ }
472
+ }
473
+
474
+ /// Process a keyword argument attribute (*without* a default value)
475
+ template <>
476
+ struct process_attribute<arg> : process_attribute_default<arg> {
477
+ static void init(const arg &a, function_record *r) {
478
+ append_self_arg_if_needed(r);
479
+ r->args.emplace_back(a.name, nullptr, handle(), !a.flag_noconvert, a.flag_none);
480
+
481
+ check_kw_only_arg(a, r);
482
+ }
483
+ };
484
+
485
+ /// Process a keyword argument attribute (*with* a default value)
486
+ template <>
487
+ struct process_attribute<arg_v> : process_attribute_default<arg_v> {
488
+ static void init(const arg_v &a, function_record *r) {
489
+ if (r->is_method && r->args.empty()) {
490
+ r->args.emplace_back(
491
+ "self", /*descr=*/nullptr, /*parent=*/handle(), /*convert=*/true, /*none=*/false);
492
+ }
493
+
494
+ if (!a.value) {
495
+ #if defined(PYBIND11_DETAILED_ERROR_MESSAGES)
496
+ std::string descr("'");
497
+ if (a.name) {
498
+ descr += std::string(a.name) + ": ";
499
+ }
500
+ descr += a.type + "'";
501
+ if (r->is_method) {
502
+ if (r->name) {
503
+ descr += " in method '" + (std::string) str(r->scope) + "."
504
+ + (std::string) r->name + "'";
505
+ } else {
506
+ descr += " in method of '" + (std::string) str(r->scope) + "'";
507
+ }
508
+ } else if (r->name) {
509
+ descr += " in function '" + (std::string) r->name + "'";
510
+ }
511
+ pybind11_fail("arg(): could not convert default argument " + descr
512
+ + " into a Python object (type not registered yet?)");
513
+ #else
514
+ pybind11_fail("arg(): could not convert default argument "
515
+ "into a Python object (type not registered yet?). "
516
+ "#define PYBIND11_DETAILED_ERROR_MESSAGES or compile in debug mode for "
517
+ "more information.");
518
+ #endif
519
+ }
520
+ r->args.emplace_back(a.name, a.descr, a.value.inc_ref(), !a.flag_noconvert, a.flag_none);
521
+
522
+ check_kw_only_arg(a, r);
523
+ }
524
+ };
525
+
526
+ /// Process a keyword-only-arguments-follow pseudo argument
527
+ template <>
528
+ struct process_attribute<kw_only> : process_attribute_default<kw_only> {
529
+ static void init(const kw_only &, function_record *r) {
530
+ append_self_arg_if_needed(r);
531
+ if (r->has_args && r->nargs_pos != static_cast<std::uint16_t>(r->args.size())) {
532
+ pybind11_fail("Mismatched args() and kw_only(): they must occur at the same relative "
533
+ "argument location (or omit kw_only() entirely)");
534
+ }
535
+ r->nargs_pos = static_cast<std::uint16_t>(r->args.size());
536
+ }
537
+ };
538
+
539
+ /// Process a positional-only-argument maker
540
+ template <>
541
+ struct process_attribute<pos_only> : process_attribute_default<pos_only> {
542
+ static void init(const pos_only &, function_record *r) {
543
+ append_self_arg_if_needed(r);
544
+ r->nargs_pos_only = static_cast<std::uint16_t>(r->args.size());
545
+ if (r->nargs_pos_only > r->nargs_pos) {
546
+ pybind11_fail("pos_only(): cannot follow a py::args() argument");
547
+ }
548
+ // It also can't follow a kw_only, but a static_assert in pybind11.h checks that
549
+ }
550
+ };
551
+
552
+ /// Process a parent class attribute. Single inheritance only (class_ itself already guarantees
553
+ /// that)
554
+ template <typename T>
555
+ struct process_attribute<T, enable_if_t<is_pyobject<T>::value>>
556
+ : process_attribute_default<handle> {
557
+ static void init(const handle &h, type_record *r) { r->bases.append(h); }
558
+ };
559
+
560
+ /// Process a parent class attribute (deprecated, does not support multiple inheritance)
561
+ template <typename T>
562
+ struct process_attribute<base<T>> : process_attribute_default<base<T>> {
563
+ static void init(const base<T> &, type_record *r) { r->add_base(typeid(T), nullptr); }
564
+ };
565
+
566
+ /// Process a multiple inheritance attribute
567
+ template <>
568
+ struct process_attribute<multiple_inheritance> : process_attribute_default<multiple_inheritance> {
569
+ static void init(const multiple_inheritance &, type_record *r) {
570
+ r->multiple_inheritance = true;
571
+ }
572
+ };
573
+
574
+ template <>
575
+ struct process_attribute<dynamic_attr> : process_attribute_default<dynamic_attr> {
576
+ static void init(const dynamic_attr &, type_record *r) { r->dynamic_attr = true; }
577
+ };
578
+
579
+ template <>
580
+ struct process_attribute<custom_type_setup> {
581
+ static void init(const custom_type_setup &value, type_record *r) {
582
+ r->custom_type_setup_callback = value.value;
583
+ }
584
+ };
585
+
586
+ template <>
587
+ struct process_attribute<is_final> : process_attribute_default<is_final> {
588
+ static void init(const is_final &, type_record *r) { r->is_final = true; }
589
+ };
590
+
591
+ template <>
592
+ struct process_attribute<buffer_protocol> : process_attribute_default<buffer_protocol> {
593
+ static void init(const buffer_protocol &, type_record *r) { r->buffer_protocol = true; }
594
+ };
595
+
596
+ template <>
597
+ struct process_attribute<metaclass> : process_attribute_default<metaclass> {
598
+ static void init(const metaclass &m, type_record *r) { r->metaclass = m.value; }
599
+ };
600
+
601
+ template <>
602
+ struct process_attribute<module_local> : process_attribute_default<module_local> {
603
+ static void init(const module_local &l, type_record *r) { r->module_local = l.value; }
604
+ };
605
+
606
+ /// Process a 'prepend' attribute, putting this at the beginning of the overload chain
607
+ template <>
608
+ struct process_attribute<prepend> : process_attribute_default<prepend> {
609
+ static void init(const prepend &, function_record *r) { r->prepend = true; }
610
+ };
611
+
612
+ /// Process an 'arithmetic' attribute for enums (does nothing here)
613
+ template <>
614
+ struct process_attribute<arithmetic> : process_attribute_default<arithmetic> {};
615
+
616
+ template <typename... Ts>
617
+ struct process_attribute<call_guard<Ts...>> : process_attribute_default<call_guard<Ts...>> {};
618
+
619
+ /**
620
+ * Process a keep_alive call policy -- invokes keep_alive_impl during the
621
+ * pre-call handler if both Nurse, Patient != 0 and use the post-call handler
622
+ * otherwise
623
+ */
624
+ template <size_t Nurse, size_t Patient>
625
+ struct process_attribute<keep_alive<Nurse, Patient>>
626
+ : public process_attribute_default<keep_alive<Nurse, Patient>> {
627
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
628
+ static void precall(function_call &call) {
629
+ keep_alive_impl(Nurse, Patient, call, handle());
630
+ }
631
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N != 0 && P != 0, int> = 0>
632
+ static void postcall(function_call &, handle) {}
633
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
634
+ static void precall(function_call &) {}
635
+ template <size_t N = Nurse, size_t P = Patient, enable_if_t<N == 0 || P == 0, int> = 0>
636
+ static void postcall(function_call &call, handle ret) {
637
+ keep_alive_impl(Nurse, Patient, call, ret);
638
+ }
639
+ };
640
+
641
+ /// Recursively iterate over variadic template arguments
642
+ template <typename... Args>
643
+ struct process_attributes {
644
+ static void init(const Args &...args, function_record *r) {
645
+ PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
646
+ PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
647
+ using expander = int[];
648
+ (void) expander{
649
+ 0, ((void) process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
650
+ }
651
+ static void init(const Args &...args, type_record *r) {
652
+ PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(r);
653
+ PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(r);
654
+ using expander = int[];
655
+ (void) expander{0,
656
+ (process_attribute<typename std::decay<Args>::type>::init(args, r), 0)...};
657
+ }
658
+ static void precall(function_call &call) {
659
+ PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call);
660
+ using expander = int[];
661
+ (void) expander{0,
662
+ (process_attribute<typename std::decay<Args>::type>::precall(call), 0)...};
663
+ }
664
+ static void postcall(function_call &call, handle fn_ret) {
665
+ PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(call, fn_ret);
666
+ PYBIND11_WORKAROUND_INCORRECT_GCC_UNUSED_BUT_SET_PARAMETER(fn_ret);
667
+ using expander = int[];
668
+ (void) expander{
669
+ 0, (process_attribute<typename std::decay<Args>::type>::postcall(call, fn_ret), 0)...};
670
+ }
671
+ };
672
+
673
+ template <typename T>
674
+ using is_call_guard = is_instantiation<call_guard, T>;
675
+
676
+ /// Extract the ``type`` from the first `call_guard` in `Extras...` (or `void_type` if none found)
677
+ template <typename... Extra>
678
+ using extract_guard_t = typename exactly_one_t<is_call_guard, call_guard<>, Extra...>::type;
679
+
680
+ /// Check the number of named arguments at compile time
681
+ template <typename... Extra,
682
+ size_t named = constexpr_sum(std::is_base_of<arg, Extra>::value...),
683
+ size_t self = constexpr_sum(std::is_same<is_method, Extra>::value...)>
684
+ constexpr bool expected_num_args(size_t nargs, bool has_args, bool has_kwargs) {
685
+ PYBIND11_WORKAROUND_INCORRECT_MSVC_C4100(nargs, has_args, has_kwargs);
686
+ return named == 0 || (self + named + size_t(has_args) + size_t(has_kwargs)) == nargs;
687
+ }
688
+
689
+ PYBIND11_NAMESPACE_END(detail)
690
+ PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)
.venv/lib/python3.11/site-packages/pybind11/include/pybind11/buffer_info.h ADDED
@@ -0,0 +1,208 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /*
2
+ pybind11/buffer_info.h: Python buffer object interface
3
+
4
+ Copyright (c) 2016 Wenzel Jakob <wenzel.jakob@epfl.ch>
5
+
6
+ All rights reserved. Use of this source code is governed by a
7
+ BSD-style license that can be found in the LICENSE file.
8
+ */
9
+
10
+ #pragma once
11
+
12
+ #include "detail/common.h"
13
+
14
+ PYBIND11_NAMESPACE_BEGIN(PYBIND11_NAMESPACE)
15
+
16
+ PYBIND11_NAMESPACE_BEGIN(detail)
17
+
18
+ // Default, C-style strides
19
+ inline std::vector<ssize_t> c_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
20
+ auto ndim = shape.size();
21
+ std::vector<ssize_t> strides(ndim, itemsize);
22
+ if (ndim > 0) {
23
+ for (size_t i = ndim - 1; i > 0; --i) {
24
+ strides[i - 1] = strides[i] * shape[i];
25
+ }
26
+ }
27
+ return strides;
28
+ }
29
+
30
+ // F-style strides; default when constructing an array_t with `ExtraFlags & f_style`
31
+ inline std::vector<ssize_t> f_strides(const std::vector<ssize_t> &shape, ssize_t itemsize) {
32
+ auto ndim = shape.size();
33
+ std::vector<ssize_t> strides(ndim, itemsize);
34
+ for (size_t i = 1; i < ndim; ++i) {
35
+ strides[i] = strides[i - 1] * shape[i - 1];
36
+ }
37
+ return strides;
38
+ }
39
+
40
+ template <typename T, typename SFINAE = void>
41
+ struct compare_buffer_info;
42
+
43
+ PYBIND11_NAMESPACE_END(detail)
44
+
45
+ /// Information record describing a Python buffer object
46
+ struct buffer_info {
47
+ void *ptr = nullptr; // Pointer to the underlying storage
48
+ ssize_t itemsize = 0; // Size of individual items in bytes
49
+ ssize_t size = 0; // Total number of entries
50
+ std::string format; // For homogeneous buffers, this should be set to
51
+ // format_descriptor<T>::format()
52
+ ssize_t ndim = 0; // Number of dimensions
53
+ std::vector<ssize_t> shape; // Shape of the tensor (1 entry per dimension)
54
+ std::vector<ssize_t> strides; // Number of bytes between adjacent entries
55
+ // (for each per dimension)
56
+ bool readonly = false; // flag to indicate if the underlying storage may be written to
57
+
58
+ buffer_info() = default;
59
+
60
+ buffer_info(void *ptr,
61
+ ssize_t itemsize,
62
+ const std::string &format,
63
+ ssize_t ndim,
64
+ detail::any_container<ssize_t> shape_in,
65
+ detail::any_container<ssize_t> strides_in,
66
+ bool readonly = false)
67
+ : ptr(ptr), itemsize(itemsize), size(1), format(format), ndim(ndim),
68
+ shape(std::move(shape_in)), strides(std::move(strides_in)), readonly(readonly) {
69
+ if (ndim != (ssize_t) shape.size() || ndim != (ssize_t) strides.size()) {
70
+ pybind11_fail("buffer_info: ndim doesn't match shape and/or strides length");
71
+ }
72
+ for (size_t i = 0; i < (size_t) ndim; ++i) {
73
+ size *= shape[i];
74
+ }
75
+ }
76
+
77
+ template <typename T>
78
+ buffer_info(T *ptr,
79
+ detail::any_container<ssize_t> shape_in,
80
+ detail::any_container<ssize_t> strides_in,
81
+ bool readonly = false)
82
+ : buffer_info(private_ctr_tag(),
83
+ ptr,
84
+ sizeof(T),
85
+ format_descriptor<T>::format(),
86
+ static_cast<ssize_t>(shape_in->size()),
87
+ std::move(shape_in),
88
+ std::move(strides_in),
89
+ readonly) {}
90
+
91
+ buffer_info(void *ptr,
92
+ ssize_t itemsize,
93
+ const std::string &format,
94
+ ssize_t size,
95
+ bool readonly = false)
96
+ : buffer_info(ptr, itemsize, format, 1, {size}, {itemsize}, readonly) {}
97
+
98
+ template <typename T>
99
+ buffer_info(T *ptr, ssize_t size, bool readonly = false)
100
+ : buffer_info(ptr, sizeof(T), format_descriptor<T>::format(), size, readonly) {}
101
+
102
+ template <typename T>
103
+ buffer_info(const T *ptr, ssize_t size, bool readonly = true)
104
+ : buffer_info(
105
+ const_cast<T *>(ptr), sizeof(T), format_descriptor<T>::format(), size, readonly) {}
106
+
107
+ explicit buffer_info(Py_buffer *view, bool ownview = true)
108
+ : buffer_info(
109
+ view->buf,
110
+ view->itemsize,
111
+ view->format,
112
+ view->ndim,
113
+ {view->shape, view->shape + view->ndim},
114
+ /* Though buffer::request() requests PyBUF_STRIDES, ctypes objects
115
+ * ignore this flag and return a view with NULL strides.
116
+ * When strides are NULL, build them manually. */
117
+ view->strides
118
+ ? std::vector<ssize_t>(view->strides, view->strides + view->ndim)
119
+ : detail::c_strides({view->shape, view->shape + view->ndim}, view->itemsize),
120
+ (view->readonly != 0)) {
121
+ // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
122
+ this->m_view = view;
123
+ // NOLINTNEXTLINE(cppcoreguidelines-prefer-member-initializer)
124
+ this->ownview = ownview;
125
+ }
126
+
127
+ buffer_info(const buffer_info &) = delete;
128
+ buffer_info &operator=(const buffer_info &) = delete;
129
+
130
+ buffer_info(buffer_info &&other) noexcept { (*this) = std::move(other); }
131
+
132
+ buffer_info &operator=(buffer_info &&rhs) noexcept {
133
+ ptr = rhs.ptr;
134
+ itemsize = rhs.itemsize;
135
+ size = rhs.size;
136
+ format = std::move(rhs.format);
137
+ ndim = rhs.ndim;
138
+ shape = std::move(rhs.shape);
139
+ strides = std::move(rhs.strides);
140
+ std::swap(m_view, rhs.m_view);
141
+ std::swap(ownview, rhs.ownview);
142
+ readonly = rhs.readonly;
143
+ return *this;
144
+ }
145
+
146
+ ~buffer_info() {
147
+ if (m_view && ownview) {
148
+ PyBuffer_Release(m_view);
149
+ delete m_view;
150
+ }
151
+ }
152
+
153
+ Py_buffer *view() const { return m_view; }
154
+ Py_buffer *&view() { return m_view; }
155
+
156
+ /* True if the buffer item type is equivalent to `T`. */
157
+ // To define "equivalent" by example:
158
+ // `buffer_info::item_type_is_equivalent_to<int>(b)` and
159
+ // `buffer_info::item_type_is_equivalent_to<long>(b)` may both be true
160
+ // on some platforms, but `int` and `unsigned` will never be equivalent.
161
+ // For the ground truth, please inspect `detail::compare_buffer_info<>`.
162
+ template <typename T>
163
+ bool item_type_is_equivalent_to() const {
164
+ return detail::compare_buffer_info<T>::compare(*this);
165
+ }
166
+
167
+ private:
168
+ struct private_ctr_tag {};
169
+
170
+ buffer_info(private_ctr_tag,
171
+ void *ptr,
172
+ ssize_t itemsize,
173
+ const std::string &format,
174
+ ssize_t ndim,
175
+ detail::any_container<ssize_t> &&shape_in,
176
+ detail::any_container<ssize_t> &&strides_in,
177
+ bool readonly)
178
+ : buffer_info(
179
+ ptr, itemsize, format, ndim, std::move(shape_in), std::move(strides_in), readonly) {}
180
+
181
+ Py_buffer *m_view = nullptr;
182
+ bool ownview = false;
183
+ };
184
+
185
+ PYBIND11_NAMESPACE_BEGIN(detail)
186
+
187
+ template <typename T, typename SFINAE>
188
+ struct compare_buffer_info {
189
+ static bool compare(const buffer_info &b) {
190
+ // NOLINTNEXTLINE(bugprone-sizeof-expression) Needed for `PyObject *`
191
+ return b.format == format_descriptor<T>::format() && b.itemsize == (ssize_t) sizeof(T);
192
+ }
193
+ };
194
+
195
+ template <typename T>
196
+ struct compare_buffer_info<T, detail::enable_if_t<std::is_integral<T>::value>> {
197
+ static bool compare(const buffer_info &b) {
198
+ return (size_t) b.itemsize == sizeof(T)
199
+ && (b.format == format_descriptor<T>::value
200
+ || ((sizeof(T) == sizeof(long))
201
+ && b.format == (std::is_unsigned<T>::value ? "L" : "l"))
202
+ || ((sizeof(T) == sizeof(size_t))
203
+ && b.format == (std::is_unsigned<T>::value ? "N" : "n")));
204
+ }
205
+ };
206
+
207
+ PYBIND11_NAMESPACE_END(detail)
208
+ PYBIND11_NAMESPACE_END(PYBIND11_NAMESPACE)