ZTWHHH commited on
Commit
10f9506
·
verified ·
1 Parent(s): 36a70b1

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. llava_next/lib/python3.10/site-packages/numpy/ma/API_CHANGES.txt +135 -0
  2. llava_next/lib/python3.10/site-packages/numpy/ma/LICENSE +24 -0
  3. llava_next/lib/python3.10/site-packages/numpy/ma/README.rst +236 -0
  4. llava_next/lib/python3.10/site-packages/numpy/ma/__init__.py +54 -0
  5. llava_next/lib/python3.10/site-packages/numpy/ma/__init__.pyi +234 -0
  6. llava_next/lib/python3.10/site-packages/numpy/ma/core.py +0 -0
  7. llava_next/lib/python3.10/site-packages/numpy/ma/core.pyi +471 -0
  8. llava_next/lib/python3.10/site-packages/numpy/ma/extras.py +2133 -0
  9. llava_next/lib/python3.10/site-packages/numpy/ma/extras.pyi +85 -0
  10. llava_next/lib/python3.10/site-packages/numpy/ma/mrecords.py +783 -0
  11. llava_next/lib/python3.10/site-packages/numpy/ma/mrecords.pyi +90 -0
  12. llava_next/lib/python3.10/site-packages/numpy/ma/setup.py +12 -0
  13. llava_next/lib/python3.10/site-packages/numpy/ma/tests/test_old_ma.py +874 -0
  14. llava_next/lib/python3.10/site-packages/numpy/ma/testutils.py +288 -0
  15. llava_next/lib/python3.10/site-packages/numpy/ma/timer_comparison.py +443 -0
  16. parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/abstract_nodes.cpython-310.pyc +0 -0
  17. parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/approximations.cpython-310.pyc +0 -0
  18. parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/cfunctions.cpython-310.pyc +0 -0
  19. parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/cnodes.cpython-310.pyc +0 -0
  20. parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/cutils.cpython-310.pyc +0 -0
  21. parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/fnodes.cpython-310.pyc +0 -0
  22. parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/matrix_nodes.cpython-310.pyc +0 -0
  23. parrot/lib/python3.10/site-packages/sympy/codegen/cutils.py +8 -0
  24. parrot/lib/python3.10/site-packages/sympy/core/operations.py +718 -0
  25. parrot/lib/python3.10/site-packages/sympy/core/random.py +227 -0
  26. parrot/lib/python3.10/site-packages/sympy/core/sorting.py +312 -0
  27. parrot/lib/python3.10/site-packages/sympy/holonomic/__init__.py +18 -0
  28. parrot/lib/python3.10/site-packages/sympy/holonomic/holonomic.py +2790 -0
  29. parrot/lib/python3.10/site-packages/sympy/holonomic/holonomicerrors.py +49 -0
  30. parrot/lib/python3.10/site-packages/sympy/holonomic/numerical.py +109 -0
  31. parrot/lib/python3.10/site-packages/sympy/holonomic/recurrence.py +365 -0
  32. parrot/lib/python3.10/site-packages/sympy/holonomic/tests/__init__.py +0 -0
  33. parrot/lib/python3.10/site-packages/sympy/holonomic/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  34. parrot/lib/python3.10/site-packages/sympy/holonomic/tests/__pycache__/test_holonomic.cpython-310.pyc +0 -0
  35. parrot/lib/python3.10/site-packages/sympy/holonomic/tests/test_recurrence.py +41 -0
  36. parrot/lib/python3.10/site-packages/sympy/stats/__init__.py +202 -0
  37. parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/crv.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/frv_types.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/joint_rv.cpython-310.pyc +0 -0
  40. parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/random_matrix_models.cpython-310.pyc +0 -0
  41. parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/rv_interface.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/stochastic_process.cpython-310.pyc +0 -0
  43. parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/symbolic_probability.cpython-310.pyc +0 -0
  44. parrot/lib/python3.10/site-packages/sympy/stats/crv.py +570 -0
  45. parrot/lib/python3.10/site-packages/sympy/stats/drv_types.py +835 -0
  46. parrot/lib/python3.10/site-packages/sympy/stats/joint_rv.py +426 -0
  47. parrot/lib/python3.10/site-packages/sympy/stats/joint_rv_types.py +945 -0
  48. parrot/lib/python3.10/site-packages/sympy/stats/matrix_distributions.py +610 -0
  49. parrot/lib/python3.10/site-packages/sympy/stats/random_matrix.py +30 -0
  50. parrot/lib/python3.10/site-packages/sympy/stats/random_matrix_models.py +457 -0
llava_next/lib/python3.10/site-packages/numpy/ma/API_CHANGES.txt ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ .. -*- rest -*-
2
+
3
+ ==================================================
4
+ API changes in the new masked array implementation
5
+ ==================================================
6
+
7
+ Masked arrays are subclasses of ndarray
8
+ ---------------------------------------
9
+
10
+ Contrary to the original implementation, masked arrays are now regular
11
+ ndarrays::
12
+
13
+ >>> x = masked_array([1,2,3],mask=[0,0,1])
14
+ >>> print isinstance(x, numpy.ndarray)
15
+ True
16
+
17
+
18
+ ``_data`` returns a view of the masked array
19
+ --------------------------------------------
20
+
21
+ Masked arrays are composed of a ``_data`` part and a ``_mask``. Accessing the
22
+ ``_data`` part will return a regular ndarray or any of its subclass, depending
23
+ on the initial data::
24
+
25
+ >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]])
26
+ >>> print x._data
27
+ [[1 2]
28
+ [3 4]]
29
+ >>> print type(x._data)
30
+ <class 'numpy.matrixlib.defmatrix.matrix'>
31
+
32
+
33
+ In practice, ``_data`` is implemented as a property, not as an attribute.
34
+ Therefore, you cannot access it directly, and some simple tests such as the
35
+ following one will fail::
36
+
37
+ >>>x._data is x._data
38
+ False
39
+
40
+
41
+ ``filled(x)`` can return a subclass of ndarray
42
+ ----------------------------------------------
43
+ The function ``filled(a)`` returns an array of the same type as ``a._data``::
44
+
45
+ >>> x = masked_array(numpy.matrix([[1,2],[3,4]]),mask=[[0,0],[0,1]])
46
+ >>> y = filled(x)
47
+ >>> print type(y)
48
+ <class 'numpy.matrixlib.defmatrix.matrix'>
49
+ >>> print y
50
+ matrix([[ 1, 2],
51
+ [ 3, 999999]])
52
+
53
+
54
+ ``put``, ``putmask`` behave like their ndarray counterparts
55
+ -----------------------------------------------------------
56
+
57
+ Previously, ``putmask`` was used like this::
58
+
59
+ mask = [False,True,True]
60
+ x = array([1,4,7],mask=mask)
61
+ putmask(x,mask,[3])
62
+
63
+ which translated to::
64
+
65
+ x[~mask] = [3]
66
+
67
+ (Note that a ``True``-value in a mask suppresses a value.)
68
+
69
+ In other words, the mask had the same length as ``x``, whereas
70
+ ``values`` had ``sum(~mask)`` elements.
71
+
72
+ Now, the behaviour is similar to that of ``ndarray.putmask``, where
73
+ the mask and the values are both the same length as ``x``, i.e.
74
+
75
+ ::
76
+
77
+ putmask(x,mask,[3,0,0])
78
+
79
+
80
+ ``fill_value`` is a property
81
+ ----------------------------
82
+
83
+ ``fill_value`` is no longer a method, but a property::
84
+
85
+ >>> print x.fill_value
86
+ 999999
87
+
88
+ ``cumsum`` and ``cumprod`` ignore missing values
89
+ ------------------------------------------------
90
+
91
+ Missing values are assumed to be the identity element, i.e. 0 for
92
+ ``cumsum`` and 1 for ``cumprod``::
93
+
94
+ >>> x = N.ma.array([1,2,3,4],mask=[False,True,False,False])
95
+ >>> print x
96
+ [1 -- 3 4]
97
+ >>> print x.cumsum()
98
+ [1 -- 4 8]
99
+ >> print x.cumprod()
100
+ [1 -- 3 12]
101
+
102
+ ``bool(x)`` raises a ValueError
103
+ -------------------------------
104
+
105
+ Masked arrays now behave like regular ``ndarrays``, in that they cannot be
106
+ converted to booleans:
107
+
108
+ ::
109
+
110
+ >>> x = N.ma.array([1,2,3])
111
+ >>> bool(x)
112
+ Traceback (most recent call last):
113
+ File "<stdin>", line 1, in <module>
114
+ ValueError: The truth value of an array with more than one element is ambiguous. Use a.any() or a.all()
115
+
116
+
117
+ ==================================
118
+ New features (non exhaustive list)
119
+ ==================================
120
+
121
+ ``mr_``
122
+ -------
123
+
124
+ ``mr_`` mimics the behavior of ``r_`` for masked arrays::
125
+
126
+ >>> np.ma.mr_[3,4,5]
127
+ masked_array(data = [3 4 5],
128
+ mask = False,
129
+ fill_value=999999)
130
+
131
+
132
+ ``anom``
133
+ --------
134
+
135
+ The ``anom`` method returns the deviations from the average (anomalies).
llava_next/lib/python3.10/site-packages/numpy/ma/LICENSE ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ * Copyright (c) 2006, University of Georgia and Pierre G.F. Gerard-Marchant
2
+ * All rights reserved.
3
+ * Redistribution and use in source and binary forms, with or without
4
+ * modification, are permitted provided that the following conditions are met:
5
+ *
6
+ * * Redistributions of source code must retain the above copyright
7
+ * notice, this list of conditions and the following disclaimer.
8
+ * * Redistributions in binary form must reproduce the above copyright
9
+ * notice, this list of conditions and the following disclaimer in the
10
+ * documentation and/or other materials provided with the distribution.
11
+ * * Neither the name of the University of Georgia nor the
12
+ * names of its contributors may be used to endorse or promote products
13
+ * derived from this software without specific prior written permission.
14
+ *
15
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND ANY
16
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
17
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
18
+ * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE FOR ANY
19
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
20
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
21
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
22
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
24
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
llava_next/lib/python3.10/site-packages/numpy/ma/README.rst ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ==================================
2
+ A Guide to Masked Arrays in NumPy
3
+ ==================================
4
+
5
+ .. Contents::
6
+
7
+ See http://www.scipy.org/scipy/numpy/wiki/MaskedArray (dead link)
8
+ for updates of this document.
9
+
10
+
11
+ History
12
+ -------
13
+
14
+ As a regular user of MaskedArray, I (Pierre G.F. Gerard-Marchant) became
15
+ increasingly frustrated with the subclassing of masked arrays (even if
16
+ I can only blame my inexperience). I needed to develop a class of arrays
17
+ that could store some additional information along with numerical values,
18
+ while keeping the possibility for missing data (picture storing a series
19
+ of dates along with measurements, what would later become the `TimeSeries
20
+ Scikit <http://projects.scipy.org/scipy/scikits/wiki/TimeSeries>`__
21
+ (dead link).
22
+
23
+ I started to implement such a class, but then quickly realized that
24
+ any additional information disappeared when processing these subarrays
25
+ (for example, adding a constant value to a subarray would erase its
26
+ dates). I ended up writing the equivalent of *numpy.core.ma* for my
27
+ particular class, ufuncs included. Everything went fine until I needed to
28
+ subclass my new class, when more problems showed up: some attributes of
29
+ the new subclass were lost during processing. I identified the culprit as
30
+ MaskedArray, which returns masked ndarrays when I expected masked
31
+ arrays of my class. I was preparing myself to rewrite *numpy.core.ma*
32
+ when I forced myself to learn how to subclass ndarrays. As I became more
33
+ familiar with the *__new__* and *__array_finalize__* methods,
34
+ I started to wonder why masked arrays were objects, and not ndarrays,
35
+ and whether it wouldn't be more convenient for subclassing if they did
36
+ behave like regular ndarrays.
37
+
38
+ The new *maskedarray* is what I eventually come up with. The
39
+ main differences with the initial *numpy.core.ma* package are
40
+ that MaskedArray is now a subclass of *ndarray* and that the
41
+ *_data* section can now be any subclass of *ndarray*. Apart from a
42
+ couple of issues listed below, the behavior of the new MaskedArray
43
+ class reproduces the old one. Initially the *maskedarray*
44
+ implementation was marginally slower than *numpy.ma* in some areas,
45
+ but work is underway to speed it up; the expectation is that it can be
46
+ made substantially faster than the present *numpy.ma*.
47
+
48
+
49
+ Note that if the subclass has some special methods and
50
+ attributes, they are not propagated to the masked version:
51
+ this would require a modification of the *__getattribute__*
52
+ method (first trying *ndarray.__getattribute__*, then trying
53
+ *self._data.__getattribute__* if an exception is raised in the first
54
+ place), which really slows things down.
55
+
56
+ Main differences
57
+ ----------------
58
+
59
+ * The *_data* part of the masked array can be any subclass of ndarray (but not recarray, cf below).
60
+ * *fill_value* is now a property, not a function.
61
+ * in the majority of cases, the mask is forced to *nomask* when no value is actually masked. A notable exception is when a masked array (with no masked values) has just been unpickled.
62
+ * I got rid of the *share_mask* flag, I never understood its purpose.
63
+ * *put*, *putmask* and *take* now mimic the ndarray methods, to avoid unpleasant surprises. Moreover, *put* and *putmask* both update the mask when needed. * if *a* is a masked array, *bool(a)* raises a *ValueError*, as it does with ndarrays.
64
+ * in the same way, the comparison of two masked arrays is a masked array, not a boolean
65
+ * *filled(a)* returns an array of the same subclass as *a._data*, and no test is performed on whether it is contiguous or not.
66
+ * the mask is always printed, even if it's *nomask*, which makes things easy (for me at least) to remember that a masked array is used.
67
+ * *cumsum* works as if the *_data* array was filled with 0. The mask is preserved, but not updated.
68
+ * *cumprod* works as if the *_data* array was filled with 1. The mask is preserved, but not updated.
69
+
70
+ New features
71
+ ------------
72
+
73
+ This list is non-exhaustive...
74
+
75
+ * the *mr_* function mimics *r_* for masked arrays.
76
+ * the *anom* method returns the anomalies (deviations from the average)
77
+
78
+ Using the new package with numpy.core.ma
79
+ ----------------------------------------
80
+
81
+ I tried to make sure that the new package can understand old masked
82
+ arrays. Unfortunately, there's no upward compatibility.
83
+
84
+ For example:
85
+
86
+ >>> import numpy.core.ma as old_ma
87
+ >>> import maskedarray as new_ma
88
+ >>> x = old_ma.array([1,2,3,4,5], mask=[0,0,1,0,0])
89
+ >>> x
90
+ array(data =
91
+ [ 1 2 999999 4 5],
92
+ mask =
93
+ [False False True False False],
94
+ fill_value=999999)
95
+ >>> y = new_ma.array([1,2,3,4,5], mask=[0,0,1,0,0])
96
+ >>> y
97
+ array(data = [1 2 -- 4 5],
98
+ mask = [False False True False False],
99
+ fill_value=999999)
100
+ >>> x==y
101
+ array(data =
102
+ [True True True True True],
103
+ mask =
104
+ [False False True False False],
105
+ fill_value=?)
106
+ >>> old_ma.getmask(x) == new_ma.getmask(x)
107
+ array([True, True, True, True, True])
108
+ >>> old_ma.getmask(y) == new_ma.getmask(y)
109
+ array([True, True, False, True, True])
110
+ >>> old_ma.getmask(y)
111
+ False
112
+
113
+
114
+ Using maskedarray with matplotlib
115
+ ---------------------------------
116
+
117
+ Starting with matplotlib 0.91.2, the masked array importing will work with
118
+ the maskedarray branch) as well as with earlier versions.
119
+
120
+ By default matplotlib still uses numpy.ma, but there is an rcParams setting
121
+ that you can use to select maskedarray instead. In the matplotlibrc file
122
+ you will find::
123
+
124
+ #maskedarray : False # True to use external maskedarray module
125
+ # instead of numpy.ma; this is a temporary #
126
+ setting for testing maskedarray.
127
+
128
+
129
+ Uncomment and set to True to select maskedarray everywhere.
130
+ Alternatively, you can test a script with maskedarray by using a
131
+ command-line option, e.g.::
132
+
133
+ python simple_plot.py --maskedarray
134
+
135
+
136
+ Masked records
137
+ --------------
138
+
139
+ Like *numpy.core.ma*, the *ndarray*-based implementation
140
+ of MaskedArray is limited when working with records: you can
141
+ mask any record of the array, but not a field in a record. If you
142
+ need this feature, you may want to give the *mrecords* package
143
+ a try (available in the *maskedarray* directory in the scipy
144
+ sandbox). This module defines a new class, *MaskedRecord*. An
145
+ instance of this class accepts a *recarray* as data, and uses two
146
+ masks: the *fieldmask* has as many entries as records in the array,
147
+ each entry with the same fields as a record, but of boolean types:
148
+ they indicate whether the field is masked or not; a record entry
149
+ is flagged as masked in the *mask* array if all the fields are
150
+ masked. A few examples in the file should give you an idea of what
151
+ can be done. Note that *mrecords* is still experimental...
152
+
153
+ Optimizing maskedarray
154
+ ----------------------
155
+
156
+ Should masked arrays be filled before processing or not?
157
+ --------------------------------------------------------
158
+
159
+ In the current implementation, most operations on masked arrays involve
160
+ the following steps:
161
+
162
+ * the input arrays are filled
163
+ * the operation is performed on the filled arrays
164
+ * the mask is set for the results, from the combination of the input masks and the mask corresponding to the domain of the operation.
165
+
166
+ For example, consider the division of two masked arrays::
167
+
168
+ import numpy
169
+ import maskedarray as ma
170
+ x = ma.array([1,2,3,4],mask=[1,0,0,0], dtype=numpy.float_)
171
+ y = ma.array([-1,0,1,2], mask=[0,0,0,1], dtype=numpy.float_)
172
+
173
+ The division of x by y is then computed as::
174
+
175
+ d1 = x.filled(0) # d1 = array([0., 2., 3., 4.])
176
+ d2 = y.filled(1) # array([-1., 0., 1., 1.])
177
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m =
178
+ array([True,False,False,True])
179
+ dm = ma.divide.domain(d1,d2) # array([False, True, False, False])
180
+ result = (d1/d2).view(MaskedArray) # masked_array([-0. inf, 3., 4.])
181
+ result._mask = logical_or(m, dm)
182
+
183
+ Note that a division by zero takes place. To avoid it, we can consider
184
+ to fill the input arrays, taking the domain mask into account, so that::
185
+
186
+ d1 = x._data.copy() # d1 = array([1., 2., 3., 4.])
187
+ d2 = y._data.copy() # array([-1., 0., 1., 2.])
188
+ dm = ma.divide.domain(d1,d2) # array([False, True, False, False])
189
+ numpy.putmask(d2, dm, 1) # d2 = array([-1., 1., 1., 2.])
190
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m =
191
+ array([True,False,False,True])
192
+ result = (d1/d2).view(MaskedArray) # masked_array([-1. 0., 3., 2.])
193
+ result._mask = logical_or(m, dm)
194
+
195
+ Note that the *.copy()* is required to avoid updating the inputs with
196
+ *putmask*. The *.filled()* method also involves a *.copy()*.
197
+
198
+ A third possibility consists in avoid filling the arrays::
199
+
200
+ d1 = x._data # d1 = array([1., 2., 3., 4.])
201
+ d2 = y._data # array([-1., 0., 1., 2.])
202
+ dm = ma.divide.domain(d1,d2) # array([False, True, False, False])
203
+ m = ma.mask_or(ma.getmask(x), ma.getmask(y)) # m =
204
+ array([True,False,False,True])
205
+ result = (d1/d2).view(MaskedArray) # masked_array([-1. inf, 3., 2.])
206
+ result._mask = logical_or(m, dm)
207
+
208
+ Note that here again the division by zero takes place.
209
+
210
+ A quick benchmark gives the following results:
211
+
212
+ * *numpy.ma.divide* : 2.69 ms per loop
213
+ * classical division : 2.21 ms per loop
214
+ * division w/ prefilling : 2.34 ms per loop
215
+ * division w/o filling : 1.55 ms per loop
216
+
217
+ So, is it worth filling the arrays beforehand ? Yes, if we are interested
218
+ in avoiding floating-point exceptions that may fill the result with infs
219
+ and nans. No, if we are only interested into speed...
220
+
221
+
222
+ Thanks
223
+ ------
224
+
225
+ I'd like to thank Paul Dubois, Travis Oliphant and Sasha for the
226
+ original masked array package: without you, I would never have started
227
+ that (it might be argued that I shouldn't have anyway, but that's
228
+ another story...). I also wish to extend these thanks to Reggie Dugard
229
+ and Eric Firing for their suggestions and numerous improvements.
230
+
231
+
232
+ Revision notes
233
+ --------------
234
+
235
+ * 08/25/2007 : Creation of this page
236
+ * 01/23/2007 : The package has been moved to the SciPy sandbox, and is regularly updated: please check out your SVN version!
llava_next/lib/python3.10/site-packages/numpy/ma/__init__.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =============
3
+ Masked Arrays
4
+ =============
5
+
6
+ Arrays sometimes contain invalid or missing data. When doing operations
7
+ on such arrays, we wish to suppress invalid values, which is the purpose masked
8
+ arrays fulfill (an example of typical use is given below).
9
+
10
+ For example, examine the following array:
11
+
12
+ >>> x = np.array([2, 1, 3, np.nan, 5, 2, 3, np.nan])
13
+
14
+ When we try to calculate the mean of the data, the result is undetermined:
15
+
16
+ >>> np.mean(x)
17
+ nan
18
+
19
+ The mean is calculated using roughly ``np.sum(x)/len(x)``, but since
20
+ any number added to ``NaN`` [1]_ produces ``NaN``, this doesn't work. Enter
21
+ masked arrays:
22
+
23
+ >>> m = np.ma.masked_array(x, np.isnan(x))
24
+ >>> m
25
+ masked_array(data = [2.0 1.0 3.0 -- 5.0 2.0 3.0 --],
26
+ mask = [False False False True False False False True],
27
+ fill_value=1e+20)
28
+
29
+ Here, we construct a masked array that suppress all ``NaN`` values. We
30
+ may now proceed to calculate the mean of the other values:
31
+
32
+ >>> np.mean(m)
33
+ 2.6666666666666665
34
+
35
+ .. [1] Not-a-Number, a floating point value that is the result of an
36
+ invalid operation.
37
+
38
+ .. moduleauthor:: Pierre Gerard-Marchant
39
+ .. moduleauthor:: Jarrod Millman
40
+
41
+ """
42
+ from . import core
43
+ from .core import *
44
+
45
+ from . import extras
46
+ from .extras import *
47
+
48
+ __all__ = ['core', 'extras']
49
+ __all__ += core.__all__
50
+ __all__ += extras.__all__
51
+
52
+ from numpy._pytesttester import PytestTester
53
+ test = PytestTester(__name__)
54
+ del PytestTester
llava_next/lib/python3.10/site-packages/numpy/ma/__init__.pyi ADDED
@@ -0,0 +1,234 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy._pytesttester import PytestTester
2
+
3
+ from numpy.ma import extras as extras
4
+
5
+ from numpy.ma.core import (
6
+ MAError as MAError,
7
+ MaskError as MaskError,
8
+ MaskType as MaskType,
9
+ MaskedArray as MaskedArray,
10
+ abs as abs,
11
+ absolute as absolute,
12
+ add as add,
13
+ all as all,
14
+ allclose as allclose,
15
+ allequal as allequal,
16
+ alltrue as alltrue,
17
+ amax as amax,
18
+ amin as amin,
19
+ angle as angle,
20
+ anom as anom,
21
+ anomalies as anomalies,
22
+ any as any,
23
+ append as append,
24
+ arange as arange,
25
+ arccos as arccos,
26
+ arccosh as arccosh,
27
+ arcsin as arcsin,
28
+ arcsinh as arcsinh,
29
+ arctan as arctan,
30
+ arctan2 as arctan2,
31
+ arctanh as arctanh,
32
+ argmax as argmax,
33
+ argmin as argmin,
34
+ argsort as argsort,
35
+ around as around,
36
+ array as array,
37
+ asanyarray as asanyarray,
38
+ asarray as asarray,
39
+ bitwise_and as bitwise_and,
40
+ bitwise_or as bitwise_or,
41
+ bitwise_xor as bitwise_xor,
42
+ bool_ as bool_,
43
+ ceil as ceil,
44
+ choose as choose,
45
+ clip as clip,
46
+ common_fill_value as common_fill_value,
47
+ compress as compress,
48
+ compressed as compressed,
49
+ concatenate as concatenate,
50
+ conjugate as conjugate,
51
+ convolve as convolve,
52
+ copy as copy,
53
+ correlate as correlate,
54
+ cos as cos,
55
+ cosh as cosh,
56
+ count as count,
57
+ cumprod as cumprod,
58
+ cumsum as cumsum,
59
+ default_fill_value as default_fill_value,
60
+ diag as diag,
61
+ diagonal as diagonal,
62
+ diff as diff,
63
+ divide as divide,
64
+ empty as empty,
65
+ empty_like as empty_like,
66
+ equal as equal,
67
+ exp as exp,
68
+ expand_dims as expand_dims,
69
+ fabs as fabs,
70
+ filled as filled,
71
+ fix_invalid as fix_invalid,
72
+ flatten_mask as flatten_mask,
73
+ flatten_structured_array as flatten_structured_array,
74
+ floor as floor,
75
+ floor_divide as floor_divide,
76
+ fmod as fmod,
77
+ frombuffer as frombuffer,
78
+ fromflex as fromflex,
79
+ fromfunction as fromfunction,
80
+ getdata as getdata,
81
+ getmask as getmask,
82
+ getmaskarray as getmaskarray,
83
+ greater as greater,
84
+ greater_equal as greater_equal,
85
+ harden_mask as harden_mask,
86
+ hypot as hypot,
87
+ identity as identity,
88
+ ids as ids,
89
+ indices as indices,
90
+ inner as inner,
91
+ innerproduct as innerproduct,
92
+ isMA as isMA,
93
+ isMaskedArray as isMaskedArray,
94
+ is_mask as is_mask,
95
+ is_masked as is_masked,
96
+ isarray as isarray,
97
+ left_shift as left_shift,
98
+ less as less,
99
+ less_equal as less_equal,
100
+ log as log,
101
+ log10 as log10,
102
+ log2 as log2,
103
+ logical_and as logical_and,
104
+ logical_not as logical_not,
105
+ logical_or as logical_or,
106
+ logical_xor as logical_xor,
107
+ make_mask as make_mask,
108
+ make_mask_descr as make_mask_descr,
109
+ make_mask_none as make_mask_none,
110
+ mask_or as mask_or,
111
+ masked as masked,
112
+ masked_array as masked_array,
113
+ masked_equal as masked_equal,
114
+ masked_greater as masked_greater,
115
+ masked_greater_equal as masked_greater_equal,
116
+ masked_inside as masked_inside,
117
+ masked_invalid as masked_invalid,
118
+ masked_less as masked_less,
119
+ masked_less_equal as masked_less_equal,
120
+ masked_not_equal as masked_not_equal,
121
+ masked_object as masked_object,
122
+ masked_outside as masked_outside,
123
+ masked_print_option as masked_print_option,
124
+ masked_singleton as masked_singleton,
125
+ masked_values as masked_values,
126
+ masked_where as masked_where,
127
+ max as max,
128
+ maximum as maximum,
129
+ maximum_fill_value as maximum_fill_value,
130
+ mean as mean,
131
+ min as min,
132
+ minimum as minimum,
133
+ minimum_fill_value as minimum_fill_value,
134
+ mod as mod,
135
+ multiply as multiply,
136
+ mvoid as mvoid,
137
+ ndim as ndim,
138
+ negative as negative,
139
+ nomask as nomask,
140
+ nonzero as nonzero,
141
+ not_equal as not_equal,
142
+ ones as ones,
143
+ outer as outer,
144
+ outerproduct as outerproduct,
145
+ power as power,
146
+ prod as prod,
147
+ product as product,
148
+ ptp as ptp,
149
+ put as put,
150
+ putmask as putmask,
151
+ ravel as ravel,
152
+ remainder as remainder,
153
+ repeat as repeat,
154
+ reshape as reshape,
155
+ resize as resize,
156
+ right_shift as right_shift,
157
+ round as round,
158
+ set_fill_value as set_fill_value,
159
+ shape as shape,
160
+ sin as sin,
161
+ sinh as sinh,
162
+ size as size,
163
+ soften_mask as soften_mask,
164
+ sometrue as sometrue,
165
+ sort as sort,
166
+ sqrt as sqrt,
167
+ squeeze as squeeze,
168
+ std as std,
169
+ subtract as subtract,
170
+ sum as sum,
171
+ swapaxes as swapaxes,
172
+ take as take,
173
+ tan as tan,
174
+ tanh as tanh,
175
+ trace as trace,
176
+ transpose as transpose,
177
+ true_divide as true_divide,
178
+ var as var,
179
+ where as where,
180
+ zeros as zeros,
181
+ )
182
+
183
+ from numpy.ma.extras import (
184
+ apply_along_axis as apply_along_axis,
185
+ apply_over_axes as apply_over_axes,
186
+ atleast_1d as atleast_1d,
187
+ atleast_2d as atleast_2d,
188
+ atleast_3d as atleast_3d,
189
+ average as average,
190
+ clump_masked as clump_masked,
191
+ clump_unmasked as clump_unmasked,
192
+ column_stack as column_stack,
193
+ compress_cols as compress_cols,
194
+ compress_nd as compress_nd,
195
+ compress_rowcols as compress_rowcols,
196
+ compress_rows as compress_rows,
197
+ count_masked as count_masked,
198
+ corrcoef as corrcoef,
199
+ cov as cov,
200
+ diagflat as diagflat,
201
+ dot as dot,
202
+ dstack as dstack,
203
+ ediff1d as ediff1d,
204
+ flatnotmasked_contiguous as flatnotmasked_contiguous,
205
+ flatnotmasked_edges as flatnotmasked_edges,
206
+ hsplit as hsplit,
207
+ hstack as hstack,
208
+ isin as isin,
209
+ in1d as in1d,
210
+ intersect1d as intersect1d,
211
+ mask_cols as mask_cols,
212
+ mask_rowcols as mask_rowcols,
213
+ mask_rows as mask_rows,
214
+ masked_all as masked_all,
215
+ masked_all_like as masked_all_like,
216
+ median as median,
217
+ mr_ as mr_,
218
+ ndenumerate as ndenumerate,
219
+ notmasked_contiguous as notmasked_contiguous,
220
+ notmasked_edges as notmasked_edges,
221
+ polyfit as polyfit,
222
+ row_stack as row_stack,
223
+ setdiff1d as setdiff1d,
224
+ setxor1d as setxor1d,
225
+ stack as stack,
226
+ unique as unique,
227
+ union1d as union1d,
228
+ vander as vander,
229
+ vstack as vstack,
230
+ )
231
+
232
+ __all__: list[str]
233
+ __path__: list[str]
234
+ test: PytestTester
llava_next/lib/python3.10/site-packages/numpy/ma/core.py ADDED
The diff for this file is too large to render. See raw diff
 
llava_next/lib/python3.10/site-packages/numpy/ma/core.pyi ADDED
@@ -0,0 +1,471 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections.abc import Callable
2
+ from typing import Any, TypeVar
3
+ from numpy import ndarray, dtype, float64
4
+
5
+ from numpy import (
6
+ amax as amax,
7
+ amin as amin,
8
+ bool_ as bool_,
9
+ expand_dims as expand_dims,
10
+ clip as clip,
11
+ indices as indices,
12
+ ones_like as ones_like,
13
+ squeeze as squeeze,
14
+ zeros_like as zeros_like,
15
+ )
16
+
17
+ from numpy.lib.function_base import (
18
+ angle as angle,
19
+ )
20
+
21
+ # TODO: Set the `bound` to something more suitable once we
22
+ # have proper shape support
23
+ _ShapeType = TypeVar("_ShapeType", bound=Any)
24
+ _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True)
25
+
26
+ __all__: list[str]
27
+
28
+ MaskType = bool_
29
+ nomask: bool_
30
+
31
+ class MaskedArrayFutureWarning(FutureWarning): ...
32
+ class MAError(Exception): ...
33
+ class MaskError(MAError): ...
34
+
35
+ def default_fill_value(obj): ...
36
+ def minimum_fill_value(obj): ...
37
+ def maximum_fill_value(obj): ...
38
+ def set_fill_value(a, fill_value): ...
39
+ def common_fill_value(a, b): ...
40
+ def filled(a, fill_value=...): ...
41
+ def getdata(a, subok=...): ...
42
+ get_data = getdata
43
+
44
+ def fix_invalid(a, mask=..., copy=..., fill_value=...): ...
45
+
46
+ class _MaskedUFunc:
47
+ f: Any
48
+ __doc__: Any
49
+ __name__: Any
50
+ def __init__(self, ufunc): ...
51
+
52
+ class _MaskedUnaryOperation(_MaskedUFunc):
53
+ fill: Any
54
+ domain: Any
55
+ def __init__(self, mufunc, fill=..., domain=...): ...
56
+ def __call__(self, a, *args, **kwargs): ...
57
+
58
+ class _MaskedBinaryOperation(_MaskedUFunc):
59
+ fillx: Any
60
+ filly: Any
61
+ def __init__(self, mbfunc, fillx=..., filly=...): ...
62
+ def __call__(self, a, b, *args, **kwargs): ...
63
+ def reduce(self, target, axis=..., dtype=...): ...
64
+ def outer(self, a, b): ...
65
+ def accumulate(self, target, axis=...): ...
66
+
67
+ class _DomainedBinaryOperation(_MaskedUFunc):
68
+ domain: Any
69
+ fillx: Any
70
+ filly: Any
71
+ def __init__(self, dbfunc, domain, fillx=..., filly=...): ...
72
+ def __call__(self, a, b, *args, **kwargs): ...
73
+
74
+ exp: _MaskedUnaryOperation
75
+ conjugate: _MaskedUnaryOperation
76
+ sin: _MaskedUnaryOperation
77
+ cos: _MaskedUnaryOperation
78
+ arctan: _MaskedUnaryOperation
79
+ arcsinh: _MaskedUnaryOperation
80
+ sinh: _MaskedUnaryOperation
81
+ cosh: _MaskedUnaryOperation
82
+ tanh: _MaskedUnaryOperation
83
+ abs: _MaskedUnaryOperation
84
+ absolute: _MaskedUnaryOperation
85
+ fabs: _MaskedUnaryOperation
86
+ negative: _MaskedUnaryOperation
87
+ floor: _MaskedUnaryOperation
88
+ ceil: _MaskedUnaryOperation
89
+ around: _MaskedUnaryOperation
90
+ logical_not: _MaskedUnaryOperation
91
+ sqrt: _MaskedUnaryOperation
92
+ log: _MaskedUnaryOperation
93
+ log2: _MaskedUnaryOperation
94
+ log10: _MaskedUnaryOperation
95
+ tan: _MaskedUnaryOperation
96
+ arcsin: _MaskedUnaryOperation
97
+ arccos: _MaskedUnaryOperation
98
+ arccosh: _MaskedUnaryOperation
99
+ arctanh: _MaskedUnaryOperation
100
+
101
+ add: _MaskedBinaryOperation
102
+ subtract: _MaskedBinaryOperation
103
+ multiply: _MaskedBinaryOperation
104
+ arctan2: _MaskedBinaryOperation
105
+ equal: _MaskedBinaryOperation
106
+ not_equal: _MaskedBinaryOperation
107
+ less_equal: _MaskedBinaryOperation
108
+ greater_equal: _MaskedBinaryOperation
109
+ less: _MaskedBinaryOperation
110
+ greater: _MaskedBinaryOperation
111
+ logical_and: _MaskedBinaryOperation
112
+ alltrue: _MaskedBinaryOperation
113
+ logical_or: _MaskedBinaryOperation
114
+ sometrue: Callable[..., Any]
115
+ logical_xor: _MaskedBinaryOperation
116
+ bitwise_and: _MaskedBinaryOperation
117
+ bitwise_or: _MaskedBinaryOperation
118
+ bitwise_xor: _MaskedBinaryOperation
119
+ hypot: _MaskedBinaryOperation
120
+ divide: _MaskedBinaryOperation
121
+ true_divide: _MaskedBinaryOperation
122
+ floor_divide: _MaskedBinaryOperation
123
+ remainder: _MaskedBinaryOperation
124
+ fmod: _MaskedBinaryOperation
125
+ mod: _MaskedBinaryOperation
126
+
127
+ def make_mask_descr(ndtype): ...
128
+ def getmask(a): ...
129
+ get_mask = getmask
130
+
131
+ def getmaskarray(arr): ...
132
+ def is_mask(m): ...
133
+ def make_mask(m, copy=..., shrink=..., dtype=...): ...
134
+ def make_mask_none(newshape, dtype=...): ...
135
+ def mask_or(m1, m2, copy=..., shrink=...): ...
136
+ def flatten_mask(mask): ...
137
+ def masked_where(condition, a, copy=...): ...
138
+ def masked_greater(x, value, copy=...): ...
139
+ def masked_greater_equal(x, value, copy=...): ...
140
+ def masked_less(x, value, copy=...): ...
141
+ def masked_less_equal(x, value, copy=...): ...
142
+ def masked_not_equal(x, value, copy=...): ...
143
+ def masked_equal(x, value, copy=...): ...
144
+ def masked_inside(x, v1, v2, copy=...): ...
145
+ def masked_outside(x, v1, v2, copy=...): ...
146
+ def masked_object(x, value, copy=..., shrink=...): ...
147
+ def masked_values(x, value, rtol=..., atol=..., copy=..., shrink=...): ...
148
+ def masked_invalid(a, copy=...): ...
149
+
150
+ class _MaskedPrintOption:
151
+ def __init__(self, display): ...
152
+ def display(self): ...
153
+ def set_display(self, s): ...
154
+ def enabled(self): ...
155
+ def enable(self, shrink=...): ...
156
+
157
+ masked_print_option: _MaskedPrintOption
158
+
159
+ def flatten_structured_array(a): ...
160
+
161
+ class MaskedIterator:
162
+ ma: Any
163
+ dataiter: Any
164
+ maskiter: Any
165
+ def __init__(self, ma): ...
166
+ def __iter__(self): ...
167
+ def __getitem__(self, indx): ...
168
+ def __setitem__(self, index, value): ...
169
+ def __next__(self): ...
170
+
171
+ class MaskedArray(ndarray[_ShapeType, _DType_co]):
172
+ __array_priority__: Any
173
+ def __new__(cls, data=..., mask=..., dtype=..., copy=..., subok=..., ndmin=..., fill_value=..., keep_mask=..., hard_mask=..., shrink=..., order=...): ...
174
+ def __array_finalize__(self, obj): ...
175
+ def __array_wrap__(self, obj, context=...): ...
176
+ def view(self, dtype=..., type=..., fill_value=...): ...
177
+ def __getitem__(self, indx): ...
178
+ def __setitem__(self, indx, value): ...
179
+ @property
180
+ def dtype(self): ...
181
+ @dtype.setter
182
+ def dtype(self, dtype): ...
183
+ @property
184
+ def shape(self): ...
185
+ @shape.setter
186
+ def shape(self, shape): ...
187
+ def __setmask__(self, mask, copy=...): ...
188
+ @property
189
+ def mask(self): ...
190
+ @mask.setter
191
+ def mask(self, value): ...
192
+ @property
193
+ def recordmask(self): ...
194
+ @recordmask.setter
195
+ def recordmask(self, mask): ...
196
+ def harden_mask(self): ...
197
+ def soften_mask(self): ...
198
+ @property
199
+ def hardmask(self): ...
200
+ def unshare_mask(self): ...
201
+ @property
202
+ def sharedmask(self): ...
203
+ def shrink_mask(self): ...
204
+ @property
205
+ def baseclass(self): ...
206
+ data: Any
207
+ @property
208
+ def flat(self): ...
209
+ @flat.setter
210
+ def flat(self, value): ...
211
+ @property
212
+ def fill_value(self): ...
213
+ @fill_value.setter
214
+ def fill_value(self, value=...): ...
215
+ get_fill_value: Any
216
+ set_fill_value: Any
217
+ def filled(self, fill_value=...): ...
218
+ def compressed(self): ...
219
+ def compress(self, condition, axis=..., out=...): ...
220
+ def __eq__(self, other): ...
221
+ def __ne__(self, other): ...
222
+ def __ge__(self, other): ...
223
+ def __gt__(self, other): ...
224
+ def __le__(self, other): ...
225
+ def __lt__(self, other): ...
226
+ def __add__(self, other): ...
227
+ def __radd__(self, other): ...
228
+ def __sub__(self, other): ...
229
+ def __rsub__(self, other): ...
230
+ def __mul__(self, other): ...
231
+ def __rmul__(self, other): ...
232
+ def __div__(self, other): ...
233
+ def __truediv__(self, other): ...
234
+ def __rtruediv__(self, other): ...
235
+ def __floordiv__(self, other): ...
236
+ def __rfloordiv__(self, other): ...
237
+ def __pow__(self, other): ...
238
+ def __rpow__(self, other): ...
239
+ def __iadd__(self, other): ...
240
+ def __isub__(self, other): ...
241
+ def __imul__(self, other): ...
242
+ def __idiv__(self, other): ...
243
+ def __ifloordiv__(self, other): ...
244
+ def __itruediv__(self, other): ...
245
+ def __ipow__(self, other): ...
246
+ def __float__(self): ...
247
+ def __int__(self): ...
248
+ @property # type: ignore[misc]
249
+ def imag(self): ...
250
+ get_imag: Any
251
+ @property # type: ignore[misc]
252
+ def real(self): ...
253
+ get_real: Any
254
+ def count(self, axis=..., keepdims=...): ...
255
+ def ravel(self, order=...): ...
256
+ def reshape(self, *s, **kwargs): ...
257
+ def resize(self, newshape, refcheck=..., order=...): ...
258
+ def put(self, indices, values, mode=...): ...
259
+ def ids(self): ...
260
+ def iscontiguous(self): ...
261
+ def all(self, axis=..., out=..., keepdims=...): ...
262
+ def any(self, axis=..., out=..., keepdims=...): ...
263
+ def nonzero(self): ...
264
+ def trace(self, offset=..., axis1=..., axis2=..., dtype=..., out=...): ...
265
+ def dot(self, b, out=..., strict=...): ...
266
+ def sum(self, axis=..., dtype=..., out=..., keepdims=...): ...
267
+ def cumsum(self, axis=..., dtype=..., out=...): ...
268
+ def prod(self, axis=..., dtype=..., out=..., keepdims=...): ...
269
+ product: Any
270
+ def cumprod(self, axis=..., dtype=..., out=...): ...
271
+ def mean(self, axis=..., dtype=..., out=..., keepdims=...): ...
272
+ def anom(self, axis=..., dtype=...): ...
273
+ def var(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
274
+ def std(self, axis=..., dtype=..., out=..., ddof=..., keepdims=...): ...
275
+ def round(self, decimals=..., out=...): ...
276
+ def argsort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
277
+ def argmin(self, axis=..., fill_value=..., out=..., *, keepdims=...): ...
278
+ def argmax(self, axis=..., fill_value=..., out=..., *, keepdims=...): ...
279
+ def sort(self, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
280
+ def min(self, axis=..., out=..., fill_value=..., keepdims=...): ...
281
+ # NOTE: deprecated
282
+ # def tostring(self, fill_value=..., order=...): ...
283
+ def max(self, axis=..., out=..., fill_value=..., keepdims=...): ...
284
+ def ptp(self, axis=..., out=..., fill_value=..., keepdims=...): ...
285
+ def partition(self, *args, **kwargs): ...
286
+ def argpartition(self, *args, **kwargs): ...
287
+ def take(self, indices, axis=..., out=..., mode=...): ...
288
+ copy: Any
289
+ diagonal: Any
290
+ flatten: Any
291
+ repeat: Any
292
+ squeeze: Any
293
+ swapaxes: Any
294
+ T: Any
295
+ transpose: Any
296
+ def tolist(self, fill_value=...): ...
297
+ def tobytes(self, fill_value=..., order=...): ...
298
+ def tofile(self, fid, sep=..., format=...): ...
299
+ def toflex(self): ...
300
+ torecords: Any
301
+ def __reduce__(self): ...
302
+ def __deepcopy__(self, memo=...): ...
303
+
304
+ class mvoid(MaskedArray[_ShapeType, _DType_co]):
305
+ def __new__(
306
+ self,
307
+ data,
308
+ mask=...,
309
+ dtype=...,
310
+ fill_value=...,
311
+ hardmask=...,
312
+ copy=...,
313
+ subok=...,
314
+ ): ...
315
+ def __getitem__(self, indx): ...
316
+ def __setitem__(self, indx, value): ...
317
+ def __iter__(self): ...
318
+ def __len__(self): ...
319
+ def filled(self, fill_value=...): ...
320
+ def tolist(self): ...
321
+
322
+ def isMaskedArray(x): ...
323
+ isarray = isMaskedArray
324
+ isMA = isMaskedArray
325
+
326
+ # 0D float64 array
327
+ class MaskedConstant(MaskedArray[Any, dtype[float64]]):
328
+ def __new__(cls): ...
329
+ __class__: Any
330
+ def __array_finalize__(self, obj): ...
331
+ def __array_prepare__(self, obj, context=...): ...
332
+ def __array_wrap__(self, obj, context=...): ...
333
+ def __format__(self, format_spec): ...
334
+ def __reduce__(self): ...
335
+ def __iop__(self, other): ...
336
+ __iadd__: Any
337
+ __isub__: Any
338
+ __imul__: Any
339
+ __ifloordiv__: Any
340
+ __itruediv__: Any
341
+ __ipow__: Any
342
+ def copy(self, *args, **kwargs): ...
343
+ def __copy__(self): ...
344
+ def __deepcopy__(self, memo): ...
345
+ def __setattr__(self, attr, value): ...
346
+
347
+ masked: MaskedConstant
348
+ masked_singleton: MaskedConstant
349
+ masked_array = MaskedArray
350
+
351
+ def array(
352
+ data,
353
+ dtype=...,
354
+ copy=...,
355
+ order=...,
356
+ mask=...,
357
+ fill_value=...,
358
+ keep_mask=...,
359
+ hard_mask=...,
360
+ shrink=...,
361
+ subok=...,
362
+ ndmin=...,
363
+ ): ...
364
+ def is_masked(x): ...
365
+
366
+ class _extrema_operation(_MaskedUFunc):
367
+ compare: Any
368
+ fill_value_func: Any
369
+ def __init__(self, ufunc, compare, fill_value): ...
370
+ # NOTE: in practice `b` has a default value, but users should
371
+ # explicitly provide a value here as the default is deprecated
372
+ def __call__(self, a, b): ...
373
+ def reduce(self, target, axis=...): ...
374
+ def outer(self, a, b): ...
375
+
376
+ def min(obj, axis=..., out=..., fill_value=..., keepdims=...): ...
377
+ def max(obj, axis=..., out=..., fill_value=..., keepdims=...): ...
378
+ def ptp(obj, axis=..., out=..., fill_value=..., keepdims=...): ...
379
+
380
+ class _frommethod:
381
+ __name__: Any
382
+ __doc__: Any
383
+ reversed: Any
384
+ def __init__(self, methodname, reversed=...): ...
385
+ def getdoc(self): ...
386
+ def __call__(self, a, *args, **params): ...
387
+
388
+ all: _frommethod
389
+ anomalies: _frommethod
390
+ anom: _frommethod
391
+ any: _frommethod
392
+ compress: _frommethod
393
+ cumprod: _frommethod
394
+ cumsum: _frommethod
395
+ copy: _frommethod
396
+ diagonal: _frommethod
397
+ harden_mask: _frommethod
398
+ ids: _frommethod
399
+ mean: _frommethod
400
+ nonzero: _frommethod
401
+ prod: _frommethod
402
+ product: _frommethod
403
+ ravel: _frommethod
404
+ repeat: _frommethod
405
+ soften_mask: _frommethod
406
+ std: _frommethod
407
+ sum: _frommethod
408
+ swapaxes: _frommethod
409
+ trace: _frommethod
410
+ var: _frommethod
411
+ count: _frommethod
412
+ argmin: _frommethod
413
+ argmax: _frommethod
414
+
415
+ minimum: _extrema_operation
416
+ maximum: _extrema_operation
417
+
418
+ def take(a, indices, axis=..., out=..., mode=...): ...
419
+ def power(a, b, third=...): ...
420
+ def argsort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
421
+ def sort(a, axis=..., kind=..., order=..., endwith=..., fill_value=...): ...
422
+ def compressed(x): ...
423
+ def concatenate(arrays, axis=...): ...
424
+ def diag(v, k=...): ...
425
+ def left_shift(a, n): ...
426
+ def right_shift(a, n): ...
427
+ def put(a, indices, values, mode=...): ...
428
+ def putmask(a, mask, values): ...
429
+ def transpose(a, axes=...): ...
430
+ def reshape(a, new_shape, order=...): ...
431
+ def resize(x, new_shape): ...
432
+ def ndim(obj): ...
433
+ def shape(obj): ...
434
+ def size(obj, axis=...): ...
435
+ def diff(a, /, n=..., axis=..., prepend=..., append=...): ...
436
+ def where(condition, x=..., y=...): ...
437
+ def choose(indices, choices, out=..., mode=...): ...
438
+ def round(a, decimals=..., out=...): ...
439
+
440
+ def inner(a, b): ...
441
+ innerproduct = inner
442
+
443
+ def outer(a, b): ...
444
+ outerproduct = outer
445
+
446
+ def correlate(a, v, mode=..., propagate_mask=...): ...
447
+ def convolve(a, v, mode=..., propagate_mask=...): ...
448
+ def allequal(a, b, fill_value=...): ...
449
+ def allclose(a, b, masked_equal=..., rtol=..., atol=...): ...
450
+ def asarray(a, dtype=..., order=...): ...
451
+ def asanyarray(a, dtype=...): ...
452
+ def fromflex(fxarray): ...
453
+
454
+ class _convert2ma:
455
+ __doc__: Any
456
+ def __init__(self, funcname, params=...): ...
457
+ def getdoc(self): ...
458
+ def __call__(self, *args, **params): ...
459
+
460
+ arange: _convert2ma
461
+ empty: _convert2ma
462
+ empty_like: _convert2ma
463
+ frombuffer: _convert2ma
464
+ fromfunction: _convert2ma
465
+ identity: _convert2ma
466
+ ones: _convert2ma
467
+ zeros: _convert2ma
468
+
469
+ def append(a, b, axis=...): ...
470
+ def dot(a, b, strict=..., out=...): ...
471
+ def mask_rowcols(a, axis=...): ...
llava_next/lib/python3.10/site-packages/numpy/ma/extras.py ADDED
@@ -0,0 +1,2133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Masked arrays add-ons.
3
+
4
+ A collection of utilities for `numpy.ma`.
5
+
6
+ :author: Pierre Gerard-Marchant
7
+ :contact: pierregm_at_uga_dot_edu
8
+ :version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
9
+
10
+ """
11
+ __all__ = [
12
+ 'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
13
+ 'atleast_3d', 'average', 'clump_masked', 'clump_unmasked', 'column_stack',
14
+ 'compress_cols', 'compress_nd', 'compress_rowcols', 'compress_rows',
15
+ 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot', 'dstack', 'ediff1d',
16
+ 'flatnotmasked_contiguous', 'flatnotmasked_edges', 'hsplit', 'hstack',
17
+ 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols', 'mask_rows',
18
+ 'masked_all', 'masked_all_like', 'median', 'mr_', 'ndenumerate',
19
+ 'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
20
+ 'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
21
+ ]
22
+
23
+ import itertools
24
+ import warnings
25
+
26
+ from . import core as ma
27
+ from .core import (
28
+ MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
29
+ getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
30
+ nomask, ones, sort, zeros, getdata, get_masked_subclass, dot
31
+ )
32
+
33
+ import numpy as np
34
+ from numpy import ndarray, array as nxarray
35
+ from numpy.core.multiarray import normalize_axis_index
36
+ from numpy.core.numeric import normalize_axis_tuple
37
+ from numpy.lib.function_base import _ureduce
38
+ from numpy.lib.index_tricks import AxisConcatenator
39
+
40
+
41
+ def issequence(seq):
42
+ """
43
+ Is seq a sequence (ndarray, list or tuple)?
44
+
45
+ """
46
+ return isinstance(seq, (ndarray, tuple, list))
47
+
48
+
49
+ def count_masked(arr, axis=None):
50
+ """
51
+ Count the number of masked elements along the given axis.
52
+
53
+ Parameters
54
+ ----------
55
+ arr : array_like
56
+ An array with (possibly) masked elements.
57
+ axis : int, optional
58
+ Axis along which to count. If None (default), a flattened
59
+ version of the array is used.
60
+
61
+ Returns
62
+ -------
63
+ count : int, ndarray
64
+ The total number of masked elements (axis=None) or the number
65
+ of masked elements along each slice of the given axis.
66
+
67
+ See Also
68
+ --------
69
+ MaskedArray.count : Count non-masked elements.
70
+
71
+ Examples
72
+ --------
73
+ >>> import numpy.ma as ma
74
+ >>> a = np.arange(9).reshape((3,3))
75
+ >>> a = ma.array(a)
76
+ >>> a[1, 0] = ma.masked
77
+ >>> a[1, 2] = ma.masked
78
+ >>> a[2, 1] = ma.masked
79
+ >>> a
80
+ masked_array(
81
+ data=[[0, 1, 2],
82
+ [--, 4, --],
83
+ [6, --, 8]],
84
+ mask=[[False, False, False],
85
+ [ True, False, True],
86
+ [False, True, False]],
87
+ fill_value=999999)
88
+ >>> ma.count_masked(a)
89
+ 3
90
+
91
+ When the `axis` keyword is used an array is returned.
92
+
93
+ >>> ma.count_masked(a, axis=0)
94
+ array([1, 1, 1])
95
+ >>> ma.count_masked(a, axis=1)
96
+ array([0, 2, 1])
97
+
98
+ """
99
+ m = getmaskarray(arr)
100
+ return m.sum(axis)
101
+
102
+
103
+ def masked_all(shape, dtype=float):
104
+ """
105
+ Empty masked array with all elements masked.
106
+
107
+ Return an empty masked array of the given shape and dtype, where all the
108
+ data are masked.
109
+
110
+ Parameters
111
+ ----------
112
+ shape : int or tuple of ints
113
+ Shape of the required MaskedArray, e.g., ``(2, 3)`` or ``2``.
114
+ dtype : dtype, optional
115
+ Data type of the output.
116
+
117
+ Returns
118
+ -------
119
+ a : MaskedArray
120
+ A masked array with all data masked.
121
+
122
+ See Also
123
+ --------
124
+ masked_all_like : Empty masked array modelled on an existing array.
125
+
126
+ Examples
127
+ --------
128
+ >>> import numpy.ma as ma
129
+ >>> ma.masked_all((3, 3))
130
+ masked_array(
131
+ data=[[--, --, --],
132
+ [--, --, --],
133
+ [--, --, --]],
134
+ mask=[[ True, True, True],
135
+ [ True, True, True],
136
+ [ True, True, True]],
137
+ fill_value=1e+20,
138
+ dtype=float64)
139
+
140
+ The `dtype` parameter defines the underlying data type.
141
+
142
+ >>> a = ma.masked_all((3, 3))
143
+ >>> a.dtype
144
+ dtype('float64')
145
+ >>> a = ma.masked_all((3, 3), dtype=np.int32)
146
+ >>> a.dtype
147
+ dtype('int32')
148
+
149
+ """
150
+ a = masked_array(np.empty(shape, dtype),
151
+ mask=np.ones(shape, make_mask_descr(dtype)))
152
+ return a
153
+
154
+
155
+ def masked_all_like(arr):
156
+ """
157
+ Empty masked array with the properties of an existing array.
158
+
159
+ Return an empty masked array of the same shape and dtype as
160
+ the array `arr`, where all the data are masked.
161
+
162
+ Parameters
163
+ ----------
164
+ arr : ndarray
165
+ An array describing the shape and dtype of the required MaskedArray.
166
+
167
+ Returns
168
+ -------
169
+ a : MaskedArray
170
+ A masked array with all data masked.
171
+
172
+ Raises
173
+ ------
174
+ AttributeError
175
+ If `arr` doesn't have a shape attribute (i.e. not an ndarray)
176
+
177
+ See Also
178
+ --------
179
+ masked_all : Empty masked array with all elements masked.
180
+
181
+ Examples
182
+ --------
183
+ >>> import numpy.ma as ma
184
+ >>> arr = np.zeros((2, 3), dtype=np.float32)
185
+ >>> arr
186
+ array([[0., 0., 0.],
187
+ [0., 0., 0.]], dtype=float32)
188
+ >>> ma.masked_all_like(arr)
189
+ masked_array(
190
+ data=[[--, --, --],
191
+ [--, --, --]],
192
+ mask=[[ True, True, True],
193
+ [ True, True, True]],
194
+ fill_value=1e+20,
195
+ dtype=float32)
196
+
197
+ The dtype of the masked array matches the dtype of `arr`.
198
+
199
+ >>> arr.dtype
200
+ dtype('float32')
201
+ >>> ma.masked_all_like(arr).dtype
202
+ dtype('float32')
203
+
204
+ """
205
+ a = np.empty_like(arr).view(MaskedArray)
206
+ a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
207
+ return a
208
+
209
+
210
+ #####--------------------------------------------------------------------------
211
+ #---- --- Standard functions ---
212
+ #####--------------------------------------------------------------------------
213
+ class _fromnxfunction:
214
+ """
215
+ Defines a wrapper to adapt NumPy functions to masked arrays.
216
+
217
+
218
+ An instance of `_fromnxfunction` can be called with the same parameters
219
+ as the wrapped NumPy function. The docstring of `newfunc` is adapted from
220
+ the wrapped function as well, see `getdoc`.
221
+
222
+ This class should not be used directly. Instead, one of its extensions that
223
+ provides support for a specific type of input should be used.
224
+
225
+ Parameters
226
+ ----------
227
+ funcname : str
228
+ The name of the function to be adapted. The function should be
229
+ in the NumPy namespace (i.e. ``np.funcname``).
230
+
231
+ """
232
+
233
+ def __init__(self, funcname):
234
+ self.__name__ = funcname
235
+ self.__doc__ = self.getdoc()
236
+
237
+ def getdoc(self):
238
+ """
239
+ Retrieve the docstring and signature from the function.
240
+
241
+ The ``__doc__`` attribute of the function is used as the docstring for
242
+ the new masked array version of the function. A note on application
243
+ of the function to the mask is appended.
244
+
245
+ Parameters
246
+ ----------
247
+ None
248
+
249
+ """
250
+ npfunc = getattr(np, self.__name__, None)
251
+ doc = getattr(npfunc, '__doc__', None)
252
+ if doc:
253
+ sig = self.__name__ + ma.get_object_signature(npfunc)
254
+ doc = ma.doc_note(doc, "The function is applied to both the _data "
255
+ "and the _mask, if any.")
256
+ return '\n\n'.join((sig, doc))
257
+ return
258
+
259
+ def __call__(self, *args, **params):
260
+ pass
261
+
262
+
263
+ class _fromnxfunction_single(_fromnxfunction):
264
+ """
265
+ A version of `_fromnxfunction` that is called with a single array
266
+ argument followed by auxiliary args that are passed verbatim for
267
+ both the data and mask calls.
268
+ """
269
+ def __call__(self, x, *args, **params):
270
+ func = getattr(np, self.__name__)
271
+ if isinstance(x, ndarray):
272
+ _d = func(x.__array__(), *args, **params)
273
+ _m = func(getmaskarray(x), *args, **params)
274
+ return masked_array(_d, mask=_m)
275
+ else:
276
+ _d = func(np.asarray(x), *args, **params)
277
+ _m = func(getmaskarray(x), *args, **params)
278
+ return masked_array(_d, mask=_m)
279
+
280
+
281
+ class _fromnxfunction_seq(_fromnxfunction):
282
+ """
283
+ A version of `_fromnxfunction` that is called with a single sequence
284
+ of arrays followed by auxiliary args that are passed verbatim for
285
+ both the data and mask calls.
286
+ """
287
+ def __call__(self, x, *args, **params):
288
+ func = getattr(np, self.__name__)
289
+ _d = func(tuple([np.asarray(a) for a in x]), *args, **params)
290
+ _m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
291
+ return masked_array(_d, mask=_m)
292
+
293
+
294
+ class _fromnxfunction_args(_fromnxfunction):
295
+ """
296
+ A version of `_fromnxfunction` that is called with multiple array
297
+ arguments. The first non-array-like input marks the beginning of the
298
+ arguments that are passed verbatim for both the data and mask calls.
299
+ Array arguments are processed independently and the results are
300
+ returned in a list. If only one array is found, the return value is
301
+ just the processed array instead of a list.
302
+ """
303
+ def __call__(self, *args, **params):
304
+ func = getattr(np, self.__name__)
305
+ arrays = []
306
+ args = list(args)
307
+ while len(args) > 0 and issequence(args[0]):
308
+ arrays.append(args.pop(0))
309
+ res = []
310
+ for x in arrays:
311
+ _d = func(np.asarray(x), *args, **params)
312
+ _m = func(getmaskarray(x), *args, **params)
313
+ res.append(masked_array(_d, mask=_m))
314
+ if len(arrays) == 1:
315
+ return res[0]
316
+ return res
317
+
318
+
319
+ class _fromnxfunction_allargs(_fromnxfunction):
320
+ """
321
+ A version of `_fromnxfunction` that is called with multiple array
322
+ arguments. Similar to `_fromnxfunction_args` except that all args
323
+ are converted to arrays even if they are not so already. This makes
324
+ it possible to process scalars as 1-D arrays. Only keyword arguments
325
+ are passed through verbatim for the data and mask calls. Arrays
326
+ arguments are processed independently and the results are returned
327
+ in a list. If only one arg is present, the return value is just the
328
+ processed array instead of a list.
329
+ """
330
+ def __call__(self, *args, **params):
331
+ func = getattr(np, self.__name__)
332
+ res = []
333
+ for x in args:
334
+ _d = func(np.asarray(x), **params)
335
+ _m = func(getmaskarray(x), **params)
336
+ res.append(masked_array(_d, mask=_m))
337
+ if len(args) == 1:
338
+ return res[0]
339
+ return res
340
+
341
+
342
+ atleast_1d = _fromnxfunction_allargs('atleast_1d')
343
+ atleast_2d = _fromnxfunction_allargs('atleast_2d')
344
+ atleast_3d = _fromnxfunction_allargs('atleast_3d')
345
+
346
+ vstack = row_stack = _fromnxfunction_seq('vstack')
347
+ hstack = _fromnxfunction_seq('hstack')
348
+ column_stack = _fromnxfunction_seq('column_stack')
349
+ dstack = _fromnxfunction_seq('dstack')
350
+ stack = _fromnxfunction_seq('stack')
351
+
352
+ hsplit = _fromnxfunction_single('hsplit')
353
+
354
+ diagflat = _fromnxfunction_single('diagflat')
355
+
356
+
357
+ #####--------------------------------------------------------------------------
358
+ #----
359
+ #####--------------------------------------------------------------------------
360
+ def flatten_inplace(seq):
361
+ """Flatten a sequence in place."""
362
+ k = 0
363
+ while (k != len(seq)):
364
+ while hasattr(seq[k], '__iter__'):
365
+ seq[k:(k + 1)] = seq[k]
366
+ k += 1
367
+ return seq
368
+
369
+
370
+ def apply_along_axis(func1d, axis, arr, *args, **kwargs):
371
+ """
372
+ (This docstring should be overwritten)
373
+ """
374
+ arr = array(arr, copy=False, subok=True)
375
+ nd = arr.ndim
376
+ axis = normalize_axis_index(axis, nd)
377
+ ind = [0] * (nd - 1)
378
+ i = np.zeros(nd, 'O')
379
+ indlist = list(range(nd))
380
+ indlist.remove(axis)
381
+ i[axis] = slice(None, None)
382
+ outshape = np.asarray(arr.shape).take(indlist)
383
+ i.put(indlist, ind)
384
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
385
+ # if res is a number, then we have a smaller output array
386
+ asscalar = np.isscalar(res)
387
+ if not asscalar:
388
+ try:
389
+ len(res)
390
+ except TypeError:
391
+ asscalar = True
392
+ # Note: we shouldn't set the dtype of the output from the first result
393
+ # so we force the type to object, and build a list of dtypes. We'll
394
+ # just take the largest, to avoid some downcasting
395
+ dtypes = []
396
+ if asscalar:
397
+ dtypes.append(np.asarray(res).dtype)
398
+ outarr = zeros(outshape, object)
399
+ outarr[tuple(ind)] = res
400
+ Ntot = np.prod(outshape)
401
+ k = 1
402
+ while k < Ntot:
403
+ # increment the index
404
+ ind[-1] += 1
405
+ n = -1
406
+ while (ind[n] >= outshape[n]) and (n > (1 - nd)):
407
+ ind[n - 1] += 1
408
+ ind[n] = 0
409
+ n -= 1
410
+ i.put(indlist, ind)
411
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
412
+ outarr[tuple(ind)] = res
413
+ dtypes.append(asarray(res).dtype)
414
+ k += 1
415
+ else:
416
+ res = array(res, copy=False, subok=True)
417
+ j = i.copy()
418
+ j[axis] = ([slice(None, None)] * res.ndim)
419
+ j.put(indlist, ind)
420
+ Ntot = np.prod(outshape)
421
+ holdshape = outshape
422
+ outshape = list(arr.shape)
423
+ outshape[axis] = res.shape
424
+ dtypes.append(asarray(res).dtype)
425
+ outshape = flatten_inplace(outshape)
426
+ outarr = zeros(outshape, object)
427
+ outarr[tuple(flatten_inplace(j.tolist()))] = res
428
+ k = 1
429
+ while k < Ntot:
430
+ # increment the index
431
+ ind[-1] += 1
432
+ n = -1
433
+ while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
434
+ ind[n - 1] += 1
435
+ ind[n] = 0
436
+ n -= 1
437
+ i.put(indlist, ind)
438
+ j.put(indlist, ind)
439
+ res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
440
+ outarr[tuple(flatten_inplace(j.tolist()))] = res
441
+ dtypes.append(asarray(res).dtype)
442
+ k += 1
443
+ max_dtypes = np.dtype(np.asarray(dtypes).max())
444
+ if not hasattr(arr, '_mask'):
445
+ result = np.asarray(outarr, dtype=max_dtypes)
446
+ else:
447
+ result = asarray(outarr, dtype=max_dtypes)
448
+ result.fill_value = ma.default_fill_value(result)
449
+ return result
450
+ apply_along_axis.__doc__ = np.apply_along_axis.__doc__
451
+
452
+
453
+ def apply_over_axes(func, a, axes):
454
+ """
455
+ (This docstring will be overwritten)
456
+ """
457
+ val = asarray(a)
458
+ N = a.ndim
459
+ if array(axes).ndim == 0:
460
+ axes = (axes,)
461
+ for axis in axes:
462
+ if axis < 0:
463
+ axis = N + axis
464
+ args = (val, axis)
465
+ res = func(*args)
466
+ if res.ndim == val.ndim:
467
+ val = res
468
+ else:
469
+ res = ma.expand_dims(res, axis)
470
+ if res.ndim == val.ndim:
471
+ val = res
472
+ else:
473
+ raise ValueError("function is not returning "
474
+ "an array of the correct shape")
475
+ return val
476
+
477
+
478
+ if apply_over_axes.__doc__ is not None:
479
+ apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
480
+ :np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
481
+ """
482
+
483
+ Examples
484
+ --------
485
+ >>> a = np.ma.arange(24).reshape(2,3,4)
486
+ >>> a[:,0,1] = np.ma.masked
487
+ >>> a[:,1,:] = np.ma.masked
488
+ >>> a
489
+ masked_array(
490
+ data=[[[0, --, 2, 3],
491
+ [--, --, --, --],
492
+ [8, 9, 10, 11]],
493
+ [[12, --, 14, 15],
494
+ [--, --, --, --],
495
+ [20, 21, 22, 23]]],
496
+ mask=[[[False, True, False, False],
497
+ [ True, True, True, True],
498
+ [False, False, False, False]],
499
+ [[False, True, False, False],
500
+ [ True, True, True, True],
501
+ [False, False, False, False]]],
502
+ fill_value=999999)
503
+ >>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
504
+ masked_array(
505
+ data=[[[46],
506
+ [--],
507
+ [124]]],
508
+ mask=[[[False],
509
+ [ True],
510
+ [False]]],
511
+ fill_value=999999)
512
+
513
+ Tuple axis arguments to ufuncs are equivalent:
514
+
515
+ >>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
516
+ masked_array(
517
+ data=[[[46],
518
+ [--],
519
+ [124]]],
520
+ mask=[[[False],
521
+ [ True],
522
+ [False]]],
523
+ fill_value=999999)
524
+ """
525
+
526
+
527
+ def average(a, axis=None, weights=None, returned=False, *,
528
+ keepdims=np._NoValue):
529
+ """
530
+ Return the weighted average of array over the given axis.
531
+
532
+ Parameters
533
+ ----------
534
+ a : array_like
535
+ Data to be averaged.
536
+ Masked entries are not taken into account in the computation.
537
+ axis : int, optional
538
+ Axis along which to average `a`. If None, averaging is done over
539
+ the flattened array.
540
+ weights : array_like, optional
541
+ The importance that each element has in the computation of the average.
542
+ The weights array can either be 1-D (in which case its length must be
543
+ the size of `a` along the given axis) or of the same shape as `a`.
544
+ If ``weights=None``, then all data in `a` are assumed to have a
545
+ weight equal to one. The 1-D calculation is::
546
+
547
+ avg = sum(a * weights) / sum(weights)
548
+
549
+ The only constraint on `weights` is that `sum(weights)` must not be 0.
550
+ returned : bool, optional
551
+ Flag indicating whether a tuple ``(result, sum of weights)``
552
+ should be returned as output (True), or just the result (False).
553
+ Default is False.
554
+ keepdims : bool, optional
555
+ If this is set to True, the axes which are reduced are left
556
+ in the result as dimensions with size one. With this option,
557
+ the result will broadcast correctly against the original `a`.
558
+ *Note:* `keepdims` will not work with instances of `numpy.matrix`
559
+ or other classes whose methods do not support `keepdims`.
560
+
561
+ .. versionadded:: 1.23.0
562
+
563
+ Returns
564
+ -------
565
+ average, [sum_of_weights] : (tuple of) scalar or MaskedArray
566
+ The average along the specified axis. When returned is `True`,
567
+ return a tuple with the average as the first element and the sum
568
+ of the weights as the second element. The return type is `np.float64`
569
+ if `a` is of integer type and floats smaller than `float64`, or the
570
+ input data-type, otherwise. If returned, `sum_of_weights` is always
571
+ `float64`.
572
+
573
+ Examples
574
+ --------
575
+ >>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
576
+ >>> np.ma.average(a, weights=[3, 1, 0, 0])
577
+ 1.25
578
+
579
+ >>> x = np.ma.arange(6.).reshape(3, 2)
580
+ >>> x
581
+ masked_array(
582
+ data=[[0., 1.],
583
+ [2., 3.],
584
+ [4., 5.]],
585
+ mask=False,
586
+ fill_value=1e+20)
587
+ >>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
588
+ ... returned=True)
589
+ >>> avg
590
+ masked_array(data=[2.6666666666666665, 3.6666666666666665],
591
+ mask=[False, False],
592
+ fill_value=1e+20)
593
+
594
+ With ``keepdims=True``, the following result has shape (3, 1).
595
+
596
+ >>> np.ma.average(x, axis=1, keepdims=True)
597
+ masked_array(
598
+ data=[[0.5],
599
+ [2.5],
600
+ [4.5]],
601
+ mask=False,
602
+ fill_value=1e+20)
603
+ """
604
+ a = asarray(a)
605
+ m = getmask(a)
606
+
607
+ # inspired by 'average' in numpy/lib/function_base.py
608
+
609
+ if keepdims is np._NoValue:
610
+ # Don't pass on the keepdims argument if one wasn't given.
611
+ keepdims_kw = {}
612
+ else:
613
+ keepdims_kw = {'keepdims': keepdims}
614
+
615
+ if weights is None:
616
+ avg = a.mean(axis, **keepdims_kw)
617
+ scl = avg.dtype.type(a.count(axis))
618
+ else:
619
+ wgt = asarray(weights)
620
+
621
+ if issubclass(a.dtype.type, (np.integer, np.bool_)):
622
+ result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
623
+ else:
624
+ result_dtype = np.result_type(a.dtype, wgt.dtype)
625
+
626
+ # Sanity checks
627
+ if a.shape != wgt.shape:
628
+ if axis is None:
629
+ raise TypeError(
630
+ "Axis must be specified when shapes of a and weights "
631
+ "differ.")
632
+ if wgt.ndim != 1:
633
+ raise TypeError(
634
+ "1D weights expected when shapes of a and weights differ.")
635
+ if wgt.shape[0] != a.shape[axis]:
636
+ raise ValueError(
637
+ "Length of weights not compatible with specified axis.")
638
+
639
+ # setup wgt to broadcast along axis
640
+ wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape, subok=True)
641
+ wgt = wgt.swapaxes(-1, axis)
642
+
643
+ if m is not nomask:
644
+ wgt = wgt*(~a.mask)
645
+ wgt.mask |= a.mask
646
+
647
+ scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw)
648
+ avg = np.multiply(a, wgt,
649
+ dtype=result_dtype).sum(axis, **keepdims_kw) / scl
650
+
651
+ if returned:
652
+ if scl.shape != avg.shape:
653
+ scl = np.broadcast_to(scl, avg.shape).copy()
654
+ return avg, scl
655
+ else:
656
+ return avg
657
+
658
+
659
+ def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
660
+ """
661
+ Compute the median along the specified axis.
662
+
663
+ Returns the median of the array elements.
664
+
665
+ Parameters
666
+ ----------
667
+ a : array_like
668
+ Input array or object that can be converted to an array.
669
+ axis : int, optional
670
+ Axis along which the medians are computed. The default (None) is
671
+ to compute the median along a flattened version of the array.
672
+ out : ndarray, optional
673
+ Alternative output array in which to place the result. It must
674
+ have the same shape and buffer length as the expected output
675
+ but the type will be cast if necessary.
676
+ overwrite_input : bool, optional
677
+ If True, then allow use of memory of input array (a) for
678
+ calculations. The input array will be modified by the call to
679
+ median. This will save memory when you do not need to preserve
680
+ the contents of the input array. Treat the input as undefined,
681
+ but it will probably be fully or partially sorted. Default is
682
+ False. Note that, if `overwrite_input` is True, and the input
683
+ is not already an `ndarray`, an error will be raised.
684
+ keepdims : bool, optional
685
+ If this is set to True, the axes which are reduced are left
686
+ in the result as dimensions with size one. With this option,
687
+ the result will broadcast correctly against the input array.
688
+
689
+ .. versionadded:: 1.10.0
690
+
691
+ Returns
692
+ -------
693
+ median : ndarray
694
+ A new array holding the result is returned unless out is
695
+ specified, in which case a reference to out is returned.
696
+ Return data-type is `float64` for integers and floats smaller than
697
+ `float64`, or the input data-type, otherwise.
698
+
699
+ See Also
700
+ --------
701
+ mean
702
+
703
+ Notes
704
+ -----
705
+ Given a vector ``V`` with ``N`` non masked values, the median of ``V``
706
+ is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
707
+ ``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
708
+ when ``N`` is even.
709
+
710
+ Examples
711
+ --------
712
+ >>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
713
+ >>> np.ma.median(x)
714
+ 1.5
715
+
716
+ >>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
717
+ >>> np.ma.median(x)
718
+ 2.5
719
+ >>> np.ma.median(x, axis=-1, overwrite_input=True)
720
+ masked_array(data=[2.0, 5.0],
721
+ mask=[False, False],
722
+ fill_value=1e+20)
723
+
724
+ """
725
+ if not hasattr(a, 'mask'):
726
+ m = np.median(getdata(a, subok=True), axis=axis,
727
+ out=out, overwrite_input=overwrite_input,
728
+ keepdims=keepdims)
729
+ if isinstance(m, np.ndarray) and 1 <= m.ndim:
730
+ return masked_array(m, copy=False)
731
+ else:
732
+ return m
733
+
734
+ return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out,
735
+ overwrite_input=overwrite_input)
736
+
737
+
738
+ def _median(a, axis=None, out=None, overwrite_input=False):
739
+ # when an unmasked NaN is present return it, so we need to sort the NaN
740
+ # values behind the mask
741
+ if np.issubdtype(a.dtype, np.inexact):
742
+ fill_value = np.inf
743
+ else:
744
+ fill_value = None
745
+ if overwrite_input:
746
+ if axis is None:
747
+ asorted = a.ravel()
748
+ asorted.sort(fill_value=fill_value)
749
+ else:
750
+ a.sort(axis=axis, fill_value=fill_value)
751
+ asorted = a
752
+ else:
753
+ asorted = sort(a, axis=axis, fill_value=fill_value)
754
+
755
+ if axis is None:
756
+ axis = 0
757
+ else:
758
+ axis = normalize_axis_index(axis, asorted.ndim)
759
+
760
+ if asorted.shape[axis] == 0:
761
+ # for empty axis integer indices fail so use slicing to get same result
762
+ # as median (which is mean of empty slice = nan)
763
+ indexer = [slice(None)] * asorted.ndim
764
+ indexer[axis] = slice(0, 0)
765
+ indexer = tuple(indexer)
766
+ return np.ma.mean(asorted[indexer], axis=axis, out=out)
767
+
768
+ if asorted.ndim == 1:
769
+ idx, odd = divmod(count(asorted), 2)
770
+ mid = asorted[idx + odd - 1:idx + 1]
771
+ if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
772
+ # avoid inf / x = masked
773
+ s = mid.sum(out=out)
774
+ if not odd:
775
+ s = np.true_divide(s, 2., casting='safe', out=out)
776
+ s = np.lib.utils._median_nancheck(asorted, s, axis)
777
+ else:
778
+ s = mid.mean(out=out)
779
+
780
+ # if result is masked either the input contained enough
781
+ # minimum_fill_value so that it would be the median or all values
782
+ # masked
783
+ if np.ma.is_masked(s) and not np.all(asorted.mask):
784
+ return np.ma.minimum_fill_value(asorted)
785
+ return s
786
+
787
+ counts = count(asorted, axis=axis, keepdims=True)
788
+ h = counts // 2
789
+
790
+ # duplicate high if odd number of elements so mean does nothing
791
+ odd = counts % 2 == 1
792
+ l = np.where(odd, h, h-1)
793
+
794
+ lh = np.concatenate([l,h], axis=axis)
795
+
796
+ # get low and high median
797
+ low_high = np.take_along_axis(asorted, lh, axis=axis)
798
+
799
+ def replace_masked(s):
800
+ # Replace masked entries with minimum_full_value unless it all values
801
+ # are masked. This is required as the sort order of values equal or
802
+ # larger than the fill value is undefined and a valid value placed
803
+ # elsewhere, e.g. [4, --, inf].
804
+ if np.ma.is_masked(s):
805
+ rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
806
+ s.data[rep] = np.ma.minimum_fill_value(asorted)
807
+ s.mask[rep] = False
808
+
809
+ replace_masked(low_high)
810
+
811
+ if np.issubdtype(asorted.dtype, np.inexact):
812
+ # avoid inf / x = masked
813
+ s = np.ma.sum(low_high, axis=axis, out=out)
814
+ np.true_divide(s.data, 2., casting='unsafe', out=s.data)
815
+
816
+ s = np.lib.utils._median_nancheck(asorted, s, axis)
817
+ else:
818
+ s = np.ma.mean(low_high, axis=axis, out=out)
819
+
820
+ return s
821
+
822
+
823
+ def compress_nd(x, axis=None):
824
+ """Suppress slices from multiple dimensions which contain masked values.
825
+
826
+ Parameters
827
+ ----------
828
+ x : array_like, MaskedArray
829
+ The array to operate on. If not a MaskedArray instance (or if no array
830
+ elements are masked), `x` is interpreted as a MaskedArray with `mask`
831
+ set to `nomask`.
832
+ axis : tuple of ints or int, optional
833
+ Which dimensions to suppress slices from can be configured with this
834
+ parameter.
835
+ - If axis is a tuple of ints, those are the axes to suppress slices from.
836
+ - If axis is an int, then that is the only axis to suppress slices from.
837
+ - If axis is None, all axis are selected.
838
+
839
+ Returns
840
+ -------
841
+ compress_array : ndarray
842
+ The compressed array.
843
+ """
844
+ x = asarray(x)
845
+ m = getmask(x)
846
+ # Set axis to tuple of ints
847
+ if axis is None:
848
+ axis = tuple(range(x.ndim))
849
+ else:
850
+ axis = normalize_axis_tuple(axis, x.ndim)
851
+
852
+ # Nothing is masked: return x
853
+ if m is nomask or not m.any():
854
+ return x._data
855
+ # All is masked: return empty
856
+ if m.all():
857
+ return nxarray([])
858
+ # Filter elements through boolean indexing
859
+ data = x._data
860
+ for ax in axis:
861
+ axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
862
+ data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
863
+ return data
864
+
865
+
866
+ def compress_rowcols(x, axis=None):
867
+ """
868
+ Suppress the rows and/or columns of a 2-D array that contain
869
+ masked values.
870
+
871
+ The suppression behavior is selected with the `axis` parameter.
872
+
873
+ - If axis is None, both rows and columns are suppressed.
874
+ - If axis is 0, only rows are suppressed.
875
+ - If axis is 1 or -1, only columns are suppressed.
876
+
877
+ Parameters
878
+ ----------
879
+ x : array_like, MaskedArray
880
+ The array to operate on. If not a MaskedArray instance (or if no array
881
+ elements are masked), `x` is interpreted as a MaskedArray with
882
+ `mask` set to `nomask`. Must be a 2D array.
883
+ axis : int, optional
884
+ Axis along which to perform the operation. Default is None.
885
+
886
+ Returns
887
+ -------
888
+ compressed_array : ndarray
889
+ The compressed array.
890
+
891
+ Examples
892
+ --------
893
+ >>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
894
+ ... [1, 0, 0],
895
+ ... [0, 0, 0]])
896
+ >>> x
897
+ masked_array(
898
+ data=[[--, 1, 2],
899
+ [--, 4, 5],
900
+ [6, 7, 8]],
901
+ mask=[[ True, False, False],
902
+ [ True, False, False],
903
+ [False, False, False]],
904
+ fill_value=999999)
905
+
906
+ >>> np.ma.compress_rowcols(x)
907
+ array([[7, 8]])
908
+ >>> np.ma.compress_rowcols(x, 0)
909
+ array([[6, 7, 8]])
910
+ >>> np.ma.compress_rowcols(x, 1)
911
+ array([[1, 2],
912
+ [4, 5],
913
+ [7, 8]])
914
+
915
+ """
916
+ if asarray(x).ndim != 2:
917
+ raise NotImplementedError("compress_rowcols works for 2D arrays only.")
918
+ return compress_nd(x, axis=axis)
919
+
920
+
921
+ def compress_rows(a):
922
+ """
923
+ Suppress whole rows of a 2-D array that contain masked values.
924
+
925
+ This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
926
+ `compress_rowcols` for details.
927
+
928
+ See Also
929
+ --------
930
+ compress_rowcols
931
+
932
+ """
933
+ a = asarray(a)
934
+ if a.ndim != 2:
935
+ raise NotImplementedError("compress_rows works for 2D arrays only.")
936
+ return compress_rowcols(a, 0)
937
+
938
+
939
+ def compress_cols(a):
940
+ """
941
+ Suppress whole columns of a 2-D array that contain masked values.
942
+
943
+ This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
944
+ `compress_rowcols` for details.
945
+
946
+ See Also
947
+ --------
948
+ compress_rowcols
949
+
950
+ """
951
+ a = asarray(a)
952
+ if a.ndim != 2:
953
+ raise NotImplementedError("compress_cols works for 2D arrays only.")
954
+ return compress_rowcols(a, 1)
955
+
956
+
957
+ def mask_rowcols(a, axis=None):
958
+ """
959
+ Mask rows and/or columns of a 2D array that contain masked values.
960
+
961
+ Mask whole rows and/or columns of a 2D array that contain
962
+ masked values. The masking behavior is selected using the
963
+ `axis` parameter.
964
+
965
+ - If `axis` is None, rows *and* columns are masked.
966
+ - If `axis` is 0, only rows are masked.
967
+ - If `axis` is 1 or -1, only columns are masked.
968
+
969
+ Parameters
970
+ ----------
971
+ a : array_like, MaskedArray
972
+ The array to mask. If not a MaskedArray instance (or if no array
973
+ elements are masked), the result is a MaskedArray with `mask` set
974
+ to `nomask` (False). Must be a 2D array.
975
+ axis : int, optional
976
+ Axis along which to perform the operation. If None, applies to a
977
+ flattened version of the array.
978
+
979
+ Returns
980
+ -------
981
+ a : MaskedArray
982
+ A modified version of the input array, masked depending on the value
983
+ of the `axis` parameter.
984
+
985
+ Raises
986
+ ------
987
+ NotImplementedError
988
+ If input array `a` is not 2D.
989
+
990
+ See Also
991
+ --------
992
+ mask_rows : Mask rows of a 2D array that contain masked values.
993
+ mask_cols : Mask cols of a 2D array that contain masked values.
994
+ masked_where : Mask where a condition is met.
995
+
996
+ Notes
997
+ -----
998
+ The input array's mask is modified by this function.
999
+
1000
+ Examples
1001
+ --------
1002
+ >>> import numpy.ma as ma
1003
+ >>> a = np.zeros((3, 3), dtype=int)
1004
+ >>> a[1, 1] = 1
1005
+ >>> a
1006
+ array([[0, 0, 0],
1007
+ [0, 1, 0],
1008
+ [0, 0, 0]])
1009
+ >>> a = ma.masked_equal(a, 1)
1010
+ >>> a
1011
+ masked_array(
1012
+ data=[[0, 0, 0],
1013
+ [0, --, 0],
1014
+ [0, 0, 0]],
1015
+ mask=[[False, False, False],
1016
+ [False, True, False],
1017
+ [False, False, False]],
1018
+ fill_value=1)
1019
+ >>> ma.mask_rowcols(a)
1020
+ masked_array(
1021
+ data=[[0, --, 0],
1022
+ [--, --, --],
1023
+ [0, --, 0]],
1024
+ mask=[[False, True, False],
1025
+ [ True, True, True],
1026
+ [False, True, False]],
1027
+ fill_value=1)
1028
+
1029
+ """
1030
+ a = array(a, subok=False)
1031
+ if a.ndim != 2:
1032
+ raise NotImplementedError("mask_rowcols works for 2D arrays only.")
1033
+ m = getmask(a)
1034
+ # Nothing is masked: return a
1035
+ if m is nomask or not m.any():
1036
+ return a
1037
+ maskedval = m.nonzero()
1038
+ a._mask = a._mask.copy()
1039
+ if not axis:
1040
+ a[np.unique(maskedval[0])] = masked
1041
+ if axis in [None, 1, -1]:
1042
+ a[:, np.unique(maskedval[1])] = masked
1043
+ return a
1044
+
1045
+
1046
+ def mask_rows(a, axis=np._NoValue):
1047
+ """
1048
+ Mask rows of a 2D array that contain masked values.
1049
+
1050
+ This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
1051
+
1052
+ See Also
1053
+ --------
1054
+ mask_rowcols : Mask rows and/or columns of a 2D array.
1055
+ masked_where : Mask where a condition is met.
1056
+
1057
+ Examples
1058
+ --------
1059
+ >>> import numpy.ma as ma
1060
+ >>> a = np.zeros((3, 3), dtype=int)
1061
+ >>> a[1, 1] = 1
1062
+ >>> a
1063
+ array([[0, 0, 0],
1064
+ [0, 1, 0],
1065
+ [0, 0, 0]])
1066
+ >>> a = ma.masked_equal(a, 1)
1067
+ >>> a
1068
+ masked_array(
1069
+ data=[[0, 0, 0],
1070
+ [0, --, 0],
1071
+ [0, 0, 0]],
1072
+ mask=[[False, False, False],
1073
+ [False, True, False],
1074
+ [False, False, False]],
1075
+ fill_value=1)
1076
+
1077
+ >>> ma.mask_rows(a)
1078
+ masked_array(
1079
+ data=[[0, 0, 0],
1080
+ [--, --, --],
1081
+ [0, 0, 0]],
1082
+ mask=[[False, False, False],
1083
+ [ True, True, True],
1084
+ [False, False, False]],
1085
+ fill_value=1)
1086
+
1087
+ """
1088
+ if axis is not np._NoValue:
1089
+ # remove the axis argument when this deprecation expires
1090
+ # NumPy 1.18.0, 2019-11-28
1091
+ warnings.warn(
1092
+ "The axis argument has always been ignored, in future passing it "
1093
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
1094
+ return mask_rowcols(a, 0)
1095
+
1096
+
1097
+ def mask_cols(a, axis=np._NoValue):
1098
+ """
1099
+ Mask columns of a 2D array that contain masked values.
1100
+
1101
+ This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
1102
+
1103
+ See Also
1104
+ --------
1105
+ mask_rowcols : Mask rows and/or columns of a 2D array.
1106
+ masked_where : Mask where a condition is met.
1107
+
1108
+ Examples
1109
+ --------
1110
+ >>> import numpy.ma as ma
1111
+ >>> a = np.zeros((3, 3), dtype=int)
1112
+ >>> a[1, 1] = 1
1113
+ >>> a
1114
+ array([[0, 0, 0],
1115
+ [0, 1, 0],
1116
+ [0, 0, 0]])
1117
+ >>> a = ma.masked_equal(a, 1)
1118
+ >>> a
1119
+ masked_array(
1120
+ data=[[0, 0, 0],
1121
+ [0, --, 0],
1122
+ [0, 0, 0]],
1123
+ mask=[[False, False, False],
1124
+ [False, True, False],
1125
+ [False, False, False]],
1126
+ fill_value=1)
1127
+ >>> ma.mask_cols(a)
1128
+ masked_array(
1129
+ data=[[0, --, 0],
1130
+ [0, --, 0],
1131
+ [0, --, 0]],
1132
+ mask=[[False, True, False],
1133
+ [False, True, False],
1134
+ [False, True, False]],
1135
+ fill_value=1)
1136
+
1137
+ """
1138
+ if axis is not np._NoValue:
1139
+ # remove the axis argument when this deprecation expires
1140
+ # NumPy 1.18.0, 2019-11-28
1141
+ warnings.warn(
1142
+ "The axis argument has always been ignored, in future passing it "
1143
+ "will raise TypeError", DeprecationWarning, stacklevel=2)
1144
+ return mask_rowcols(a, 1)
1145
+
1146
+
1147
+ #####--------------------------------------------------------------------------
1148
+ #---- --- arraysetops ---
1149
+ #####--------------------------------------------------------------------------
1150
+
1151
+ def ediff1d(arr, to_end=None, to_begin=None):
1152
+ """
1153
+ Compute the differences between consecutive elements of an array.
1154
+
1155
+ This function is the equivalent of `numpy.ediff1d` that takes masked
1156
+ values into account, see `numpy.ediff1d` for details.
1157
+
1158
+ See Also
1159
+ --------
1160
+ numpy.ediff1d : Equivalent function for ndarrays.
1161
+
1162
+ """
1163
+ arr = ma.asanyarray(arr).flat
1164
+ ed = arr[1:] - arr[:-1]
1165
+ arrays = [ed]
1166
+ #
1167
+ if to_begin is not None:
1168
+ arrays.insert(0, to_begin)
1169
+ if to_end is not None:
1170
+ arrays.append(to_end)
1171
+ #
1172
+ if len(arrays) != 1:
1173
+ # We'll save ourselves a copy of a potentially large array in the common
1174
+ # case where neither to_begin or to_end was given.
1175
+ ed = hstack(arrays)
1176
+ #
1177
+ return ed
1178
+
1179
+
1180
+ def unique(ar1, return_index=False, return_inverse=False):
1181
+ """
1182
+ Finds the unique elements of an array.
1183
+
1184
+ Masked values are considered the same element (masked). The output array
1185
+ is always a masked array. See `numpy.unique` for more details.
1186
+
1187
+ See Also
1188
+ --------
1189
+ numpy.unique : Equivalent function for ndarrays.
1190
+
1191
+ Examples
1192
+ --------
1193
+ >>> import numpy.ma as ma
1194
+ >>> a = [1, 2, 1000, 2, 3]
1195
+ >>> mask = [0, 0, 1, 0, 0]
1196
+ >>> masked_a = ma.masked_array(a, mask)
1197
+ >>> masked_a
1198
+ masked_array(data=[1, 2, --, 2, 3],
1199
+ mask=[False, False, True, False, False],
1200
+ fill_value=999999)
1201
+ >>> ma.unique(masked_a)
1202
+ masked_array(data=[1, 2, 3, --],
1203
+ mask=[False, False, False, True],
1204
+ fill_value=999999)
1205
+ >>> ma.unique(masked_a, return_index=True)
1206
+ (masked_array(data=[1, 2, 3, --],
1207
+ mask=[False, False, False, True],
1208
+ fill_value=999999), array([0, 1, 4, 2]))
1209
+ >>> ma.unique(masked_a, return_inverse=True)
1210
+ (masked_array(data=[1, 2, 3, --],
1211
+ mask=[False, False, False, True],
1212
+ fill_value=999999), array([0, 1, 3, 1, 2]))
1213
+ >>> ma.unique(masked_a, return_index=True, return_inverse=True)
1214
+ (masked_array(data=[1, 2, 3, --],
1215
+ mask=[False, False, False, True],
1216
+ fill_value=999999), array([0, 1, 4, 2]), array([0, 1, 3, 1, 2]))
1217
+ """
1218
+ output = np.unique(ar1,
1219
+ return_index=return_index,
1220
+ return_inverse=return_inverse)
1221
+ if isinstance(output, tuple):
1222
+ output = list(output)
1223
+ output[0] = output[0].view(MaskedArray)
1224
+ output = tuple(output)
1225
+ else:
1226
+ output = output.view(MaskedArray)
1227
+ return output
1228
+
1229
+
1230
+ def intersect1d(ar1, ar2, assume_unique=False):
1231
+ """
1232
+ Returns the unique elements common to both arrays.
1233
+
1234
+ Masked values are considered equal one to the other.
1235
+ The output is always a masked array.
1236
+
1237
+ See `numpy.intersect1d` for more details.
1238
+
1239
+ See Also
1240
+ --------
1241
+ numpy.intersect1d : Equivalent function for ndarrays.
1242
+
1243
+ Examples
1244
+ --------
1245
+ >>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
1246
+ >>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
1247
+ >>> np.ma.intersect1d(x, y)
1248
+ masked_array(data=[1, 3, --],
1249
+ mask=[False, False, True],
1250
+ fill_value=999999)
1251
+
1252
+ """
1253
+ if assume_unique:
1254
+ aux = ma.concatenate((ar1, ar2))
1255
+ else:
1256
+ # Might be faster than unique( intersect1d( ar1, ar2 ) )?
1257
+ aux = ma.concatenate((unique(ar1), unique(ar2)))
1258
+ aux.sort()
1259
+ return aux[:-1][aux[1:] == aux[:-1]]
1260
+
1261
+
1262
+ def setxor1d(ar1, ar2, assume_unique=False):
1263
+ """
1264
+ Set exclusive-or of 1-D arrays with unique elements.
1265
+
1266
+ The output is always a masked array. See `numpy.setxor1d` for more details.
1267
+
1268
+ See Also
1269
+ --------
1270
+ numpy.setxor1d : Equivalent function for ndarrays.
1271
+
1272
+ """
1273
+ if not assume_unique:
1274
+ ar1 = unique(ar1)
1275
+ ar2 = unique(ar2)
1276
+
1277
+ aux = ma.concatenate((ar1, ar2))
1278
+ if aux.size == 0:
1279
+ return aux
1280
+ aux.sort()
1281
+ auxf = aux.filled()
1282
+ # flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
1283
+ flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
1284
+ # flag2 = ediff1d( flag ) == 0
1285
+ flag2 = (flag[1:] == flag[:-1])
1286
+ return aux[flag2]
1287
+
1288
+
1289
+ def in1d(ar1, ar2, assume_unique=False, invert=False):
1290
+ """
1291
+ Test whether each element of an array is also present in a second
1292
+ array.
1293
+
1294
+ The output is always a masked array. See `numpy.in1d` for more details.
1295
+
1296
+ We recommend using :func:`isin` instead of `in1d` for new code.
1297
+
1298
+ See Also
1299
+ --------
1300
+ isin : Version of this function that preserves the shape of ar1.
1301
+ numpy.in1d : Equivalent function for ndarrays.
1302
+
1303
+ Notes
1304
+ -----
1305
+ .. versionadded:: 1.4.0
1306
+
1307
+ """
1308
+ if not assume_unique:
1309
+ ar1, rev_idx = unique(ar1, return_inverse=True)
1310
+ ar2 = unique(ar2)
1311
+
1312
+ ar = ma.concatenate((ar1, ar2))
1313
+ # We need this to be a stable sort, so always use 'mergesort'
1314
+ # here. The values from the first array should always come before
1315
+ # the values from the second array.
1316
+ order = ar.argsort(kind='mergesort')
1317
+ sar = ar[order]
1318
+ if invert:
1319
+ bool_ar = (sar[1:] != sar[:-1])
1320
+ else:
1321
+ bool_ar = (sar[1:] == sar[:-1])
1322
+ flag = ma.concatenate((bool_ar, [invert]))
1323
+ indx = order.argsort(kind='mergesort')[:len(ar1)]
1324
+
1325
+ if assume_unique:
1326
+ return flag[indx]
1327
+ else:
1328
+ return flag[indx][rev_idx]
1329
+
1330
+
1331
+ def isin(element, test_elements, assume_unique=False, invert=False):
1332
+ """
1333
+ Calculates `element in test_elements`, broadcasting over
1334
+ `element` only.
1335
+
1336
+ The output is always a masked array of the same shape as `element`.
1337
+ See `numpy.isin` for more details.
1338
+
1339
+ See Also
1340
+ --------
1341
+ in1d : Flattened version of this function.
1342
+ numpy.isin : Equivalent function for ndarrays.
1343
+
1344
+ Notes
1345
+ -----
1346
+ .. versionadded:: 1.13.0
1347
+
1348
+ """
1349
+ element = ma.asarray(element)
1350
+ return in1d(element, test_elements, assume_unique=assume_unique,
1351
+ invert=invert).reshape(element.shape)
1352
+
1353
+
1354
+ def union1d(ar1, ar2):
1355
+ """
1356
+ Union of two arrays.
1357
+
1358
+ The output is always a masked array. See `numpy.union1d` for more details.
1359
+
1360
+ See Also
1361
+ --------
1362
+ numpy.union1d : Equivalent function for ndarrays.
1363
+
1364
+ """
1365
+ return unique(ma.concatenate((ar1, ar2), axis=None))
1366
+
1367
+
1368
+ def setdiff1d(ar1, ar2, assume_unique=False):
1369
+ """
1370
+ Set difference of 1D arrays with unique elements.
1371
+
1372
+ The output is always a masked array. See `numpy.setdiff1d` for more
1373
+ details.
1374
+
1375
+ See Also
1376
+ --------
1377
+ numpy.setdiff1d : Equivalent function for ndarrays.
1378
+
1379
+ Examples
1380
+ --------
1381
+ >>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
1382
+ >>> np.ma.setdiff1d(x, [1, 2])
1383
+ masked_array(data=[3, --],
1384
+ mask=[False, True],
1385
+ fill_value=999999)
1386
+
1387
+ """
1388
+ if assume_unique:
1389
+ ar1 = ma.asarray(ar1).ravel()
1390
+ else:
1391
+ ar1 = unique(ar1)
1392
+ ar2 = unique(ar2)
1393
+ return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
1394
+
1395
+
1396
+ ###############################################################################
1397
+ # Covariance #
1398
+ ###############################################################################
1399
+
1400
+
1401
+ def _covhelper(x, y=None, rowvar=True, allow_masked=True):
1402
+ """
1403
+ Private function for the computation of covariance and correlation
1404
+ coefficients.
1405
+
1406
+ """
1407
+ x = ma.array(x, ndmin=2, copy=True, dtype=float)
1408
+ xmask = ma.getmaskarray(x)
1409
+ # Quick exit if we can't process masked data
1410
+ if not allow_masked and xmask.any():
1411
+ raise ValueError("Cannot process masked data.")
1412
+ #
1413
+ if x.shape[0] == 1:
1414
+ rowvar = True
1415
+ # Make sure that rowvar is either 0 or 1
1416
+ rowvar = int(bool(rowvar))
1417
+ axis = 1 - rowvar
1418
+ if rowvar:
1419
+ tup = (slice(None), None)
1420
+ else:
1421
+ tup = (None, slice(None))
1422
+ #
1423
+ if y is None:
1424
+ xnotmask = np.logical_not(xmask).astype(int)
1425
+ else:
1426
+ y = array(y, copy=False, ndmin=2, dtype=float)
1427
+ ymask = ma.getmaskarray(y)
1428
+ if not allow_masked and ymask.any():
1429
+ raise ValueError("Cannot process masked data.")
1430
+ if xmask.any() or ymask.any():
1431
+ if y.shape == x.shape:
1432
+ # Define some common mask
1433
+ common_mask = np.logical_or(xmask, ymask)
1434
+ if common_mask is not nomask:
1435
+ xmask = x._mask = y._mask = ymask = common_mask
1436
+ x._sharedmask = False
1437
+ y._sharedmask = False
1438
+ x = ma.concatenate((x, y), axis)
1439
+ xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
1440
+ x -= x.mean(axis=rowvar)[tup]
1441
+ return (x, xnotmask, rowvar)
1442
+
1443
+
1444
+ def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
1445
+ """
1446
+ Estimate the covariance matrix.
1447
+
1448
+ Except for the handling of missing data this function does the same as
1449
+ `numpy.cov`. For more details and examples, see `numpy.cov`.
1450
+
1451
+ By default, masked values are recognized as such. If `x` and `y` have the
1452
+ same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
1453
+ ``y[i,j]`` will also be masked.
1454
+ Setting `allow_masked` to False will raise an exception if values are
1455
+ missing in either of the input arrays.
1456
+
1457
+ Parameters
1458
+ ----------
1459
+ x : array_like
1460
+ A 1-D or 2-D array containing multiple variables and observations.
1461
+ Each row of `x` represents a variable, and each column a single
1462
+ observation of all those variables. Also see `rowvar` below.
1463
+ y : array_like, optional
1464
+ An additional set of variables and observations. `y` has the same
1465
+ shape as `x`.
1466
+ rowvar : bool, optional
1467
+ If `rowvar` is True (default), then each row represents a
1468
+ variable, with observations in the columns. Otherwise, the relationship
1469
+ is transposed: each column represents a variable, while the rows
1470
+ contain observations.
1471
+ bias : bool, optional
1472
+ Default normalization (False) is by ``(N-1)``, where ``N`` is the
1473
+ number of observations given (unbiased estimate). If `bias` is True,
1474
+ then normalization is by ``N``. This keyword can be overridden by
1475
+ the keyword ``ddof`` in numpy versions >= 1.5.
1476
+ allow_masked : bool, optional
1477
+ If True, masked values are propagated pair-wise: if a value is masked
1478
+ in `x`, the corresponding value is masked in `y`.
1479
+ If False, raises a `ValueError` exception when some values are missing.
1480
+ ddof : {None, int}, optional
1481
+ If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
1482
+ the number of observations; this overrides the value implied by
1483
+ ``bias``. The default value is ``None``.
1484
+
1485
+ .. versionadded:: 1.5
1486
+
1487
+ Raises
1488
+ ------
1489
+ ValueError
1490
+ Raised if some values are missing and `allow_masked` is False.
1491
+
1492
+ See Also
1493
+ --------
1494
+ numpy.cov
1495
+
1496
+ """
1497
+ # Check inputs
1498
+ if ddof is not None and ddof != int(ddof):
1499
+ raise ValueError("ddof must be an integer")
1500
+ # Set up ddof
1501
+ if ddof is None:
1502
+ if bias:
1503
+ ddof = 0
1504
+ else:
1505
+ ddof = 1
1506
+
1507
+ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
1508
+ if not rowvar:
1509
+ fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
1510
+ result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
1511
+ else:
1512
+ fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
1513
+ result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
1514
+ return result
1515
+
1516
+
1517
+ def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
1518
+ ddof=np._NoValue):
1519
+ """
1520
+ Return Pearson product-moment correlation coefficients.
1521
+
1522
+ Except for the handling of missing data this function does the same as
1523
+ `numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
1524
+
1525
+ Parameters
1526
+ ----------
1527
+ x : array_like
1528
+ A 1-D or 2-D array containing multiple variables and observations.
1529
+ Each row of `x` represents a variable, and each column a single
1530
+ observation of all those variables. Also see `rowvar` below.
1531
+ y : array_like, optional
1532
+ An additional set of variables and observations. `y` has the same
1533
+ shape as `x`.
1534
+ rowvar : bool, optional
1535
+ If `rowvar` is True (default), then each row represents a
1536
+ variable, with observations in the columns. Otherwise, the relationship
1537
+ is transposed: each column represents a variable, while the rows
1538
+ contain observations.
1539
+ bias : _NoValue, optional
1540
+ Has no effect, do not use.
1541
+
1542
+ .. deprecated:: 1.10.0
1543
+ allow_masked : bool, optional
1544
+ If True, masked values are propagated pair-wise: if a value is masked
1545
+ in `x`, the corresponding value is masked in `y`.
1546
+ If False, raises an exception. Because `bias` is deprecated, this
1547
+ argument needs to be treated as keyword only to avoid a warning.
1548
+ ddof : _NoValue, optional
1549
+ Has no effect, do not use.
1550
+
1551
+ .. deprecated:: 1.10.0
1552
+
1553
+ See Also
1554
+ --------
1555
+ numpy.corrcoef : Equivalent function in top-level NumPy module.
1556
+ cov : Estimate the covariance matrix.
1557
+
1558
+ Notes
1559
+ -----
1560
+ This function accepts but discards arguments `bias` and `ddof`. This is
1561
+ for backwards compatibility with previous versions of this function. These
1562
+ arguments had no effect on the return values of the function and can be
1563
+ safely ignored in this and previous versions of numpy.
1564
+ """
1565
+ msg = 'bias and ddof have no effect and are deprecated'
1566
+ if bias is not np._NoValue or ddof is not np._NoValue:
1567
+ # 2015-03-15, 1.10
1568
+ warnings.warn(msg, DeprecationWarning, stacklevel=2)
1569
+ # Get the data
1570
+ (x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
1571
+ # Compute the covariance matrix
1572
+ if not rowvar:
1573
+ fact = np.dot(xnotmask.T, xnotmask) * 1.
1574
+ c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
1575
+ else:
1576
+ fact = np.dot(xnotmask, xnotmask.T) * 1.
1577
+ c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
1578
+ # Check whether we have a scalar
1579
+ try:
1580
+ diag = ma.diagonal(c)
1581
+ except ValueError:
1582
+ return 1
1583
+ #
1584
+ if xnotmask.all():
1585
+ _denom = ma.sqrt(ma.multiply.outer(diag, diag))
1586
+ else:
1587
+ _denom = diagflat(diag)
1588
+ _denom._sharedmask = False # We know return is always a copy
1589
+ n = x.shape[1 - rowvar]
1590
+ if rowvar:
1591
+ for i in range(n - 1):
1592
+ for j in range(i + 1, n):
1593
+ _x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
1594
+ _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
1595
+ else:
1596
+ for i in range(n - 1):
1597
+ for j in range(i + 1, n):
1598
+ _x = mask_cols(
1599
+ vstack((x[:, i], x[:, j]))).var(axis=1)
1600
+ _denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
1601
+ return c / _denom
1602
+
1603
+ #####--------------------------------------------------------------------------
1604
+ #---- --- Concatenation helpers ---
1605
+ #####--------------------------------------------------------------------------
1606
+
1607
+ class MAxisConcatenator(AxisConcatenator):
1608
+ """
1609
+ Translate slice objects to concatenation along an axis.
1610
+
1611
+ For documentation on usage, see `mr_class`.
1612
+
1613
+ See Also
1614
+ --------
1615
+ mr_class
1616
+
1617
+ """
1618
+ concatenate = staticmethod(concatenate)
1619
+
1620
+ @classmethod
1621
+ def makemat(cls, arr):
1622
+ # There used to be a view as np.matrix here, but we may eventually
1623
+ # deprecate that class. In preparation, we use the unmasked version
1624
+ # to construct the matrix (with copy=False for backwards compatibility
1625
+ # with the .view)
1626
+ data = super().makemat(arr.data, copy=False)
1627
+ return array(data, mask=arr.mask)
1628
+
1629
+ def __getitem__(self, key):
1630
+ # matrix builder syntax, like 'a, b; c, d'
1631
+ if isinstance(key, str):
1632
+ raise MAError("Unavailable for masked array.")
1633
+
1634
+ return super().__getitem__(key)
1635
+
1636
+
1637
+ class mr_class(MAxisConcatenator):
1638
+ """
1639
+ Translate slice objects to concatenation along the first axis.
1640
+
1641
+ This is the masked array version of `lib.index_tricks.RClass`.
1642
+
1643
+ See Also
1644
+ --------
1645
+ lib.index_tricks.RClass
1646
+
1647
+ Examples
1648
+ --------
1649
+ >>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
1650
+ masked_array(data=[1, 2, 3, ..., 4, 5, 6],
1651
+ mask=False,
1652
+ fill_value=999999)
1653
+
1654
+ """
1655
+ def __init__(self):
1656
+ MAxisConcatenator.__init__(self, 0)
1657
+
1658
+ mr_ = mr_class()
1659
+
1660
+
1661
+ #####--------------------------------------------------------------------------
1662
+ #---- Find unmasked data ---
1663
+ #####--------------------------------------------------------------------------
1664
+
1665
+ def ndenumerate(a, compressed=True):
1666
+ """
1667
+ Multidimensional index iterator.
1668
+
1669
+ Return an iterator yielding pairs of array coordinates and values,
1670
+ skipping elements that are masked. With `compressed=False`,
1671
+ `ma.masked` is yielded as the value of masked elements. This
1672
+ behavior differs from that of `numpy.ndenumerate`, which yields the
1673
+ value of the underlying data array.
1674
+
1675
+ Notes
1676
+ -----
1677
+ .. versionadded:: 1.23.0
1678
+
1679
+ Parameters
1680
+ ----------
1681
+ a : array_like
1682
+ An array with (possibly) masked elements.
1683
+ compressed : bool, optional
1684
+ If True (default), masked elements are skipped.
1685
+
1686
+ See Also
1687
+ --------
1688
+ numpy.ndenumerate : Equivalent function ignoring any mask.
1689
+
1690
+ Examples
1691
+ --------
1692
+ >>> a = np.ma.arange(9).reshape((3, 3))
1693
+ >>> a[1, 0] = np.ma.masked
1694
+ >>> a[1, 2] = np.ma.masked
1695
+ >>> a[2, 1] = np.ma.masked
1696
+ >>> a
1697
+ masked_array(
1698
+ data=[[0, 1, 2],
1699
+ [--, 4, --],
1700
+ [6, --, 8]],
1701
+ mask=[[False, False, False],
1702
+ [ True, False, True],
1703
+ [False, True, False]],
1704
+ fill_value=999999)
1705
+ >>> for index, x in np.ma.ndenumerate(a):
1706
+ ... print(index, x)
1707
+ (0, 0) 0
1708
+ (0, 1) 1
1709
+ (0, 2) 2
1710
+ (1, 1) 4
1711
+ (2, 0) 6
1712
+ (2, 2) 8
1713
+
1714
+ >>> for index, x in np.ma.ndenumerate(a, compressed=False):
1715
+ ... print(index, x)
1716
+ (0, 0) 0
1717
+ (0, 1) 1
1718
+ (0, 2) 2
1719
+ (1, 0) --
1720
+ (1, 1) 4
1721
+ (1, 2) --
1722
+ (2, 0) 6
1723
+ (2, 1) --
1724
+ (2, 2) 8
1725
+ """
1726
+ for it, mask in zip(np.ndenumerate(a), getmaskarray(a).flat):
1727
+ if not mask:
1728
+ yield it
1729
+ elif not compressed:
1730
+ yield it[0], masked
1731
+
1732
+
1733
+ def flatnotmasked_edges(a):
1734
+ """
1735
+ Find the indices of the first and last unmasked values.
1736
+
1737
+ Expects a 1-D `MaskedArray`, returns None if all values are masked.
1738
+
1739
+ Parameters
1740
+ ----------
1741
+ a : array_like
1742
+ Input 1-D `MaskedArray`
1743
+
1744
+ Returns
1745
+ -------
1746
+ edges : ndarray or None
1747
+ The indices of first and last non-masked value in the array.
1748
+ Returns None if all values are masked.
1749
+
1750
+ See Also
1751
+ --------
1752
+ flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
1753
+ clump_masked, clump_unmasked
1754
+
1755
+ Notes
1756
+ -----
1757
+ Only accepts 1-D arrays.
1758
+
1759
+ Examples
1760
+ --------
1761
+ >>> a = np.ma.arange(10)
1762
+ >>> np.ma.flatnotmasked_edges(a)
1763
+ array([0, 9])
1764
+
1765
+ >>> mask = (a < 3) | (a > 8) | (a == 5)
1766
+ >>> a[mask] = np.ma.masked
1767
+ >>> np.array(a[~a.mask])
1768
+ array([3, 4, 6, 7, 8])
1769
+
1770
+ >>> np.ma.flatnotmasked_edges(a)
1771
+ array([3, 8])
1772
+
1773
+ >>> a[:] = np.ma.masked
1774
+ >>> print(np.ma.flatnotmasked_edges(a))
1775
+ None
1776
+
1777
+ """
1778
+ m = getmask(a)
1779
+ if m is nomask or not np.any(m):
1780
+ return np.array([0, a.size - 1])
1781
+ unmasked = np.flatnonzero(~m)
1782
+ if len(unmasked) > 0:
1783
+ return unmasked[[0, -1]]
1784
+ else:
1785
+ return None
1786
+
1787
+
1788
+ def notmasked_edges(a, axis=None):
1789
+ """
1790
+ Find the indices of the first and last unmasked values along an axis.
1791
+
1792
+ If all values are masked, return None. Otherwise, return a list
1793
+ of two tuples, corresponding to the indices of the first and last
1794
+ unmasked values respectively.
1795
+
1796
+ Parameters
1797
+ ----------
1798
+ a : array_like
1799
+ The input array.
1800
+ axis : int, optional
1801
+ Axis along which to perform the operation.
1802
+ If None (default), applies to a flattened version of the array.
1803
+
1804
+ Returns
1805
+ -------
1806
+ edges : ndarray or list
1807
+ An array of start and end indexes if there are any masked data in
1808
+ the array. If there are no masked data in the array, `edges` is a
1809
+ list of the first and last index.
1810
+
1811
+ See Also
1812
+ --------
1813
+ flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
1814
+ clump_masked, clump_unmasked
1815
+
1816
+ Examples
1817
+ --------
1818
+ >>> a = np.arange(9).reshape((3, 3))
1819
+ >>> m = np.zeros_like(a)
1820
+ >>> m[1:, 1:] = 1
1821
+
1822
+ >>> am = np.ma.array(a, mask=m)
1823
+ >>> np.array(am[~am.mask])
1824
+ array([0, 1, 2, 3, 6])
1825
+
1826
+ >>> np.ma.notmasked_edges(am)
1827
+ array([0, 6])
1828
+
1829
+ """
1830
+ a = asarray(a)
1831
+ if axis is None or a.ndim == 1:
1832
+ return flatnotmasked_edges(a)
1833
+ m = getmaskarray(a)
1834
+ idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
1835
+ return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
1836
+ tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
1837
+
1838
+
1839
+ def flatnotmasked_contiguous(a):
1840
+ """
1841
+ Find contiguous unmasked data in a masked array.
1842
+
1843
+ Parameters
1844
+ ----------
1845
+ a : array_like
1846
+ The input array.
1847
+
1848
+ Returns
1849
+ -------
1850
+ slice_list : list
1851
+ A sorted sequence of `slice` objects (start index, end index).
1852
+
1853
+ .. versionchanged:: 1.15.0
1854
+ Now returns an empty list instead of None for a fully masked array
1855
+
1856
+ See Also
1857
+ --------
1858
+ flatnotmasked_edges, notmasked_contiguous, notmasked_edges
1859
+ clump_masked, clump_unmasked
1860
+
1861
+ Notes
1862
+ -----
1863
+ Only accepts 2-D arrays at most.
1864
+
1865
+ Examples
1866
+ --------
1867
+ >>> a = np.ma.arange(10)
1868
+ >>> np.ma.flatnotmasked_contiguous(a)
1869
+ [slice(0, 10, None)]
1870
+
1871
+ >>> mask = (a < 3) | (a > 8) | (a == 5)
1872
+ >>> a[mask] = np.ma.masked
1873
+ >>> np.array(a[~a.mask])
1874
+ array([3, 4, 6, 7, 8])
1875
+
1876
+ >>> np.ma.flatnotmasked_contiguous(a)
1877
+ [slice(3, 5, None), slice(6, 9, None)]
1878
+ >>> a[:] = np.ma.masked
1879
+ >>> np.ma.flatnotmasked_contiguous(a)
1880
+ []
1881
+
1882
+ """
1883
+ m = getmask(a)
1884
+ if m is nomask:
1885
+ return [slice(0, a.size)]
1886
+ i = 0
1887
+ result = []
1888
+ for (k, g) in itertools.groupby(m.ravel()):
1889
+ n = len(list(g))
1890
+ if not k:
1891
+ result.append(slice(i, i + n))
1892
+ i += n
1893
+ return result
1894
+
1895
+
1896
+ def notmasked_contiguous(a, axis=None):
1897
+ """
1898
+ Find contiguous unmasked data in a masked array along the given axis.
1899
+
1900
+ Parameters
1901
+ ----------
1902
+ a : array_like
1903
+ The input array.
1904
+ axis : int, optional
1905
+ Axis along which to perform the operation.
1906
+ If None (default), applies to a flattened version of the array, and this
1907
+ is the same as `flatnotmasked_contiguous`.
1908
+
1909
+ Returns
1910
+ -------
1911
+ endpoints : list
1912
+ A list of slices (start and end indexes) of unmasked indexes
1913
+ in the array.
1914
+
1915
+ If the input is 2d and axis is specified, the result is a list of lists.
1916
+
1917
+ See Also
1918
+ --------
1919
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
1920
+ clump_masked, clump_unmasked
1921
+
1922
+ Notes
1923
+ -----
1924
+ Only accepts 2-D arrays at most.
1925
+
1926
+ Examples
1927
+ --------
1928
+ >>> a = np.arange(12).reshape((3, 4))
1929
+ >>> mask = np.zeros_like(a)
1930
+ >>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
1931
+ >>> ma = np.ma.array(a, mask=mask)
1932
+ >>> ma
1933
+ masked_array(
1934
+ data=[[0, --, 2, 3],
1935
+ [--, --, --, 7],
1936
+ [8, --, --, 11]],
1937
+ mask=[[False, True, False, False],
1938
+ [ True, True, True, False],
1939
+ [False, True, True, False]],
1940
+ fill_value=999999)
1941
+ >>> np.array(ma[~ma.mask])
1942
+ array([ 0, 2, 3, 7, 8, 11])
1943
+
1944
+ >>> np.ma.notmasked_contiguous(ma)
1945
+ [slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
1946
+
1947
+ >>> np.ma.notmasked_contiguous(ma, axis=0)
1948
+ [[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
1949
+
1950
+ >>> np.ma.notmasked_contiguous(ma, axis=1)
1951
+ [[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
1952
+
1953
+ """
1954
+ a = asarray(a)
1955
+ nd = a.ndim
1956
+ if nd > 2:
1957
+ raise NotImplementedError("Currently limited to at most 2D array.")
1958
+ if axis is None or nd == 1:
1959
+ return flatnotmasked_contiguous(a)
1960
+ #
1961
+ result = []
1962
+ #
1963
+ other = (axis + 1) % 2
1964
+ idx = [0, 0]
1965
+ idx[axis] = slice(None, None)
1966
+ #
1967
+ for i in range(a.shape[other]):
1968
+ idx[other] = i
1969
+ result.append(flatnotmasked_contiguous(a[tuple(idx)]))
1970
+ return result
1971
+
1972
+
1973
+ def _ezclump(mask):
1974
+ """
1975
+ Finds the clumps (groups of data with the same values) for a 1D bool array.
1976
+
1977
+ Returns a series of slices.
1978
+ """
1979
+ if mask.ndim > 1:
1980
+ mask = mask.ravel()
1981
+ idx = (mask[1:] ^ mask[:-1]).nonzero()
1982
+ idx = idx[0] + 1
1983
+
1984
+ if mask[0]:
1985
+ if len(idx) == 0:
1986
+ return [slice(0, mask.size)]
1987
+
1988
+ r = [slice(0, idx[0])]
1989
+ r.extend((slice(left, right)
1990
+ for left, right in zip(idx[1:-1:2], idx[2::2])))
1991
+ else:
1992
+ if len(idx) == 0:
1993
+ return []
1994
+
1995
+ r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
1996
+
1997
+ if mask[-1]:
1998
+ r.append(slice(idx[-1], mask.size))
1999
+ return r
2000
+
2001
+
2002
+ def clump_unmasked(a):
2003
+ """
2004
+ Return list of slices corresponding to the unmasked clumps of a 1-D array.
2005
+ (A "clump" is defined as a contiguous region of the array).
2006
+
2007
+ Parameters
2008
+ ----------
2009
+ a : ndarray
2010
+ A one-dimensional masked array.
2011
+
2012
+ Returns
2013
+ -------
2014
+ slices : list of slice
2015
+ The list of slices, one for each continuous region of unmasked
2016
+ elements in `a`.
2017
+
2018
+ Notes
2019
+ -----
2020
+ .. versionadded:: 1.4.0
2021
+
2022
+ See Also
2023
+ --------
2024
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
2025
+ notmasked_contiguous, clump_masked
2026
+
2027
+ Examples
2028
+ --------
2029
+ >>> a = np.ma.masked_array(np.arange(10))
2030
+ >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
2031
+ >>> np.ma.clump_unmasked(a)
2032
+ [slice(3, 6, None), slice(7, 8, None)]
2033
+
2034
+ """
2035
+ mask = getattr(a, '_mask', nomask)
2036
+ if mask is nomask:
2037
+ return [slice(0, a.size)]
2038
+ return _ezclump(~mask)
2039
+
2040
+
2041
+ def clump_masked(a):
2042
+ """
2043
+ Returns a list of slices corresponding to the masked clumps of a 1-D array.
2044
+ (A "clump" is defined as a contiguous region of the array).
2045
+
2046
+ Parameters
2047
+ ----------
2048
+ a : ndarray
2049
+ A one-dimensional masked array.
2050
+
2051
+ Returns
2052
+ -------
2053
+ slices : list of slice
2054
+ The list of slices, one for each continuous region of masked elements
2055
+ in `a`.
2056
+
2057
+ Notes
2058
+ -----
2059
+ .. versionadded:: 1.4.0
2060
+
2061
+ See Also
2062
+ --------
2063
+ flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
2064
+ notmasked_contiguous, clump_unmasked
2065
+
2066
+ Examples
2067
+ --------
2068
+ >>> a = np.ma.masked_array(np.arange(10))
2069
+ >>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
2070
+ >>> np.ma.clump_masked(a)
2071
+ [slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
2072
+
2073
+ """
2074
+ mask = ma.getmask(a)
2075
+ if mask is nomask:
2076
+ return []
2077
+ return _ezclump(mask)
2078
+
2079
+
2080
+ ###############################################################################
2081
+ # Polynomial fit #
2082
+ ###############################################################################
2083
+
2084
+
2085
+ def vander(x, n=None):
2086
+ """
2087
+ Masked values in the input array result in rows of zeros.
2088
+
2089
+ """
2090
+ _vander = np.vander(x, n)
2091
+ m = getmask(x)
2092
+ if m is not nomask:
2093
+ _vander[m] = 0
2094
+ return _vander
2095
+
2096
+ vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
2097
+
2098
+
2099
+ def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
2100
+ """
2101
+ Any masked values in x is propagated in y, and vice-versa.
2102
+
2103
+ """
2104
+ x = asarray(x)
2105
+ y = asarray(y)
2106
+
2107
+ m = getmask(x)
2108
+ if y.ndim == 1:
2109
+ m = mask_or(m, getmask(y))
2110
+ elif y.ndim == 2:
2111
+ my = getmask(mask_rows(y))
2112
+ if my is not nomask:
2113
+ m = mask_or(m, my[:, 0])
2114
+ else:
2115
+ raise TypeError("Expected a 1D or 2D array for y!")
2116
+
2117
+ if w is not None:
2118
+ w = asarray(w)
2119
+ if w.ndim != 1:
2120
+ raise TypeError("expected a 1-d array for weights")
2121
+ if w.shape[0] != y.shape[0]:
2122
+ raise TypeError("expected w and y to have the same length")
2123
+ m = mask_or(m, getmask(w))
2124
+
2125
+ if m is not nomask:
2126
+ not_m = ~m
2127
+ if w is not None:
2128
+ w = w[not_m]
2129
+ return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
2130
+ else:
2131
+ return np.polyfit(x, y, deg, rcond, full, w, cov)
2132
+
2133
+ polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
llava_next/lib/python3.10/site-packages/numpy/ma/extras.pyi ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any
2
+ from numpy.lib.index_tricks import AxisConcatenator
3
+
4
+ from numpy.ma.core import (
5
+ dot as dot,
6
+ mask_rowcols as mask_rowcols,
7
+ )
8
+
9
+ __all__: list[str]
10
+
11
+ def count_masked(arr, axis=...): ...
12
+ def masked_all(shape, dtype = ...): ...
13
+ def masked_all_like(arr): ...
14
+
15
+ class _fromnxfunction:
16
+ __name__: Any
17
+ __doc__: Any
18
+ def __init__(self, funcname): ...
19
+ def getdoc(self): ...
20
+ def __call__(self, *args, **params): ...
21
+
22
+ class _fromnxfunction_single(_fromnxfunction):
23
+ def __call__(self, x, *args, **params): ...
24
+
25
+ class _fromnxfunction_seq(_fromnxfunction):
26
+ def __call__(self, x, *args, **params): ...
27
+
28
+ class _fromnxfunction_allargs(_fromnxfunction):
29
+ def __call__(self, *args, **params): ...
30
+
31
+ atleast_1d: _fromnxfunction_allargs
32
+ atleast_2d: _fromnxfunction_allargs
33
+ atleast_3d: _fromnxfunction_allargs
34
+
35
+ vstack: _fromnxfunction_seq
36
+ row_stack: _fromnxfunction_seq
37
+ hstack: _fromnxfunction_seq
38
+ column_stack: _fromnxfunction_seq
39
+ dstack: _fromnxfunction_seq
40
+ stack: _fromnxfunction_seq
41
+
42
+ hsplit: _fromnxfunction_single
43
+ diagflat: _fromnxfunction_single
44
+
45
+ def apply_along_axis(func1d, axis, arr, *args, **kwargs): ...
46
+ def apply_over_axes(func, a, axes): ...
47
+ def average(a, axis=..., weights=..., returned=..., keepdims=...): ...
48
+ def median(a, axis=..., out=..., overwrite_input=..., keepdims=...): ...
49
+ def compress_nd(x, axis=...): ...
50
+ def compress_rowcols(x, axis=...): ...
51
+ def compress_rows(a): ...
52
+ def compress_cols(a): ...
53
+ def mask_rows(a, axis = ...): ...
54
+ def mask_cols(a, axis = ...): ...
55
+ def ediff1d(arr, to_end=..., to_begin=...): ...
56
+ def unique(ar1, return_index=..., return_inverse=...): ...
57
+ def intersect1d(ar1, ar2, assume_unique=...): ...
58
+ def setxor1d(ar1, ar2, assume_unique=...): ...
59
+ def in1d(ar1, ar2, assume_unique=..., invert=...): ...
60
+ def isin(element, test_elements, assume_unique=..., invert=...): ...
61
+ def union1d(ar1, ar2): ...
62
+ def setdiff1d(ar1, ar2, assume_unique=...): ...
63
+ def cov(x, y=..., rowvar=..., bias=..., allow_masked=..., ddof=...): ...
64
+ def corrcoef(x, y=..., rowvar=..., bias = ..., allow_masked=..., ddof = ...): ...
65
+
66
+ class MAxisConcatenator(AxisConcatenator):
67
+ concatenate: Any
68
+ @classmethod
69
+ def makemat(cls, arr): ...
70
+ def __getitem__(self, key): ...
71
+
72
+ class mr_class(MAxisConcatenator):
73
+ def __init__(self): ...
74
+
75
+ mr_: mr_class
76
+
77
+ def ndenumerate(a, compressed=...): ...
78
+ def flatnotmasked_edges(a): ...
79
+ def notmasked_edges(a, axis=...): ...
80
+ def flatnotmasked_contiguous(a): ...
81
+ def notmasked_contiguous(a, axis=...): ...
82
+ def clump_unmasked(a): ...
83
+ def clump_masked(a): ...
84
+ def vander(x, n=...): ...
85
+ def polyfit(x, y, deg, rcond=..., full=..., w=..., cov=...): ...
llava_next/lib/python3.10/site-packages/numpy/ma/mrecords.py ADDED
@@ -0,0 +1,783 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """:mod:`numpy.ma..mrecords`
2
+
3
+ Defines the equivalent of :class:`numpy.recarrays` for masked arrays,
4
+ where fields can be accessed as attributes.
5
+ Note that :class:`numpy.ma.MaskedArray` already supports structured datatypes
6
+ and the masking of individual fields.
7
+
8
+ .. moduleauthor:: Pierre Gerard-Marchant
9
+
10
+ """
11
+ # We should make sure that no field is called '_mask','mask','_fieldmask',
12
+ # or whatever restricted keywords. An idea would be to no bother in the
13
+ # first place, and then rename the invalid fields with a trailing
14
+ # underscore. Maybe we could just overload the parser function ?
15
+
16
+ from numpy.ma import (
17
+ MAError, MaskedArray, masked, nomask, masked_array, getdata,
18
+ getmaskarray, filled
19
+ )
20
+ import numpy.ma as ma
21
+ import warnings
22
+
23
+ import numpy as np
24
+ from numpy import (
25
+ bool_, dtype, ndarray, recarray, array as narray
26
+ )
27
+ from numpy.core.records import (
28
+ fromarrays as recfromarrays, fromrecords as recfromrecords
29
+ )
30
+
31
+ _byteorderconv = np.core.records._byteorderconv
32
+
33
+
34
+ _check_fill_value = ma.core._check_fill_value
35
+
36
+
37
+ __all__ = [
38
+ 'MaskedRecords', 'mrecarray', 'fromarrays', 'fromrecords',
39
+ 'fromtextfile', 'addfield',
40
+ ]
41
+
42
+ reserved_fields = ['_data', '_mask', '_fieldmask', 'dtype']
43
+
44
+
45
+ def _checknames(descr, names=None):
46
+ """
47
+ Checks that field names ``descr`` are not reserved keywords.
48
+
49
+ If this is the case, a default 'f%i' is substituted. If the argument
50
+ `names` is not None, updates the field names to valid names.
51
+
52
+ """
53
+ ndescr = len(descr)
54
+ default_names = ['f%i' % i for i in range(ndescr)]
55
+ if names is None:
56
+ new_names = default_names
57
+ else:
58
+ if isinstance(names, (tuple, list)):
59
+ new_names = names
60
+ elif isinstance(names, str):
61
+ new_names = names.split(',')
62
+ else:
63
+ raise NameError(f'illegal input names {names!r}')
64
+ nnames = len(new_names)
65
+ if nnames < ndescr:
66
+ new_names += default_names[nnames:]
67
+ ndescr = []
68
+ for (n, d, t) in zip(new_names, default_names, descr.descr):
69
+ if n in reserved_fields:
70
+ if t[0] in reserved_fields:
71
+ ndescr.append((d, t[1]))
72
+ else:
73
+ ndescr.append(t)
74
+ else:
75
+ ndescr.append((n, t[1]))
76
+ return np.dtype(ndescr)
77
+
78
+
79
+ def _get_fieldmask(self):
80
+ mdescr = [(n, '|b1') for n in self.dtype.names]
81
+ fdmask = np.empty(self.shape, dtype=mdescr)
82
+ fdmask.flat = tuple([False] * len(mdescr))
83
+ return fdmask
84
+
85
+
86
+ class MaskedRecords(MaskedArray):
87
+ """
88
+
89
+ Attributes
90
+ ----------
91
+ _data : recarray
92
+ Underlying data, as a record array.
93
+ _mask : boolean array
94
+ Mask of the records. A record is masked when all its fields are
95
+ masked.
96
+ _fieldmask : boolean recarray
97
+ Record array of booleans, setting the mask of each individual field
98
+ of each record.
99
+ _fill_value : record
100
+ Filling values for each field.
101
+
102
+ """
103
+
104
+ def __new__(cls, shape, dtype=None, buf=None, offset=0, strides=None,
105
+ formats=None, names=None, titles=None,
106
+ byteorder=None, aligned=False,
107
+ mask=nomask, hard_mask=False, fill_value=None, keep_mask=True,
108
+ copy=False,
109
+ **options):
110
+
111
+ self = recarray.__new__(cls, shape, dtype=dtype, buf=buf, offset=offset,
112
+ strides=strides, formats=formats, names=names,
113
+ titles=titles, byteorder=byteorder,
114
+ aligned=aligned,)
115
+
116
+ mdtype = ma.make_mask_descr(self.dtype)
117
+ if mask is nomask or not np.size(mask):
118
+ if not keep_mask:
119
+ self._mask = tuple([False] * len(mdtype))
120
+ else:
121
+ mask = np.array(mask, copy=copy)
122
+ if mask.shape != self.shape:
123
+ (nd, nm) = (self.size, mask.size)
124
+ if nm == 1:
125
+ mask = np.resize(mask, self.shape)
126
+ elif nm == nd:
127
+ mask = np.reshape(mask, self.shape)
128
+ else:
129
+ msg = "Mask and data not compatible: data size is %i, " + \
130
+ "mask size is %i."
131
+ raise MAError(msg % (nd, nm))
132
+ if not keep_mask:
133
+ self.__setmask__(mask)
134
+ self._sharedmask = True
135
+ else:
136
+ if mask.dtype == mdtype:
137
+ _mask = mask
138
+ else:
139
+ _mask = np.array([tuple([m] * len(mdtype)) for m in mask],
140
+ dtype=mdtype)
141
+ self._mask = _mask
142
+ return self
143
+
144
+ def __array_finalize__(self, obj):
145
+ # Make sure we have a _fieldmask by default
146
+ _mask = getattr(obj, '_mask', None)
147
+ if _mask is None:
148
+ objmask = getattr(obj, '_mask', nomask)
149
+ _dtype = ndarray.__getattribute__(self, 'dtype')
150
+ if objmask is nomask:
151
+ _mask = ma.make_mask_none(self.shape, dtype=_dtype)
152
+ else:
153
+ mdescr = ma.make_mask_descr(_dtype)
154
+ _mask = narray([tuple([m] * len(mdescr)) for m in objmask],
155
+ dtype=mdescr).view(recarray)
156
+ # Update some of the attributes
157
+ _dict = self.__dict__
158
+ _dict.update(_mask=_mask)
159
+ self._update_from(obj)
160
+ if _dict['_baseclass'] == ndarray:
161
+ _dict['_baseclass'] = recarray
162
+ return
163
+
164
+ @property
165
+ def _data(self):
166
+ """
167
+ Returns the data as a recarray.
168
+
169
+ """
170
+ return ndarray.view(self, recarray)
171
+
172
+ @property
173
+ def _fieldmask(self):
174
+ """
175
+ Alias to mask.
176
+
177
+ """
178
+ return self._mask
179
+
180
+ def __len__(self):
181
+ """
182
+ Returns the length
183
+
184
+ """
185
+ # We have more than one record
186
+ if self.ndim:
187
+ return len(self._data)
188
+ # We have only one record: return the nb of fields
189
+ return len(self.dtype)
190
+
191
+ def __getattribute__(self, attr):
192
+ try:
193
+ return object.__getattribute__(self, attr)
194
+ except AttributeError:
195
+ # attr must be a fieldname
196
+ pass
197
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields
198
+ try:
199
+ res = fielddict[attr][:2]
200
+ except (TypeError, KeyError) as e:
201
+ raise AttributeError(
202
+ f'record array has no attribute {attr}') from e
203
+ # So far, so good
204
+ _localdict = ndarray.__getattribute__(self, '__dict__')
205
+ _data = ndarray.view(self, _localdict['_baseclass'])
206
+ obj = _data.getfield(*res)
207
+ if obj.dtype.names is not None:
208
+ raise NotImplementedError("MaskedRecords is currently limited to"
209
+ "simple records.")
210
+ # Get some special attributes
211
+ # Reset the object's mask
212
+ hasmasked = False
213
+ _mask = _localdict.get('_mask', None)
214
+ if _mask is not None:
215
+ try:
216
+ _mask = _mask[attr]
217
+ except IndexError:
218
+ # Couldn't find a mask: use the default (nomask)
219
+ pass
220
+ tp_len = len(_mask.dtype)
221
+ hasmasked = _mask.view((bool, ((tp_len,) if tp_len else ()))).any()
222
+ if (obj.shape or hasmasked):
223
+ obj = obj.view(MaskedArray)
224
+ obj._baseclass = ndarray
225
+ obj._isfield = True
226
+ obj._mask = _mask
227
+ # Reset the field values
228
+ _fill_value = _localdict.get('_fill_value', None)
229
+ if _fill_value is not None:
230
+ try:
231
+ obj._fill_value = _fill_value[attr]
232
+ except ValueError:
233
+ obj._fill_value = None
234
+ else:
235
+ obj = obj.item()
236
+ return obj
237
+
238
+ def __setattr__(self, attr, val):
239
+ """
240
+ Sets the attribute attr to the value val.
241
+
242
+ """
243
+ # Should we call __setmask__ first ?
244
+ if attr in ['mask', 'fieldmask']:
245
+ self.__setmask__(val)
246
+ return
247
+ # Create a shortcut (so that we don't have to call getattr all the time)
248
+ _localdict = object.__getattribute__(self, '__dict__')
249
+ # Check whether we're creating a new field
250
+ newattr = attr not in _localdict
251
+ try:
252
+ # Is attr a generic attribute ?
253
+ ret = object.__setattr__(self, attr, val)
254
+ except Exception:
255
+ # Not a generic attribute: exit if it's not a valid field
256
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
257
+ optinfo = ndarray.__getattribute__(self, '_optinfo') or {}
258
+ if not (attr in fielddict or attr in optinfo):
259
+ raise
260
+ else:
261
+ # Get the list of names
262
+ fielddict = ndarray.__getattribute__(self, 'dtype').fields or {}
263
+ # Check the attribute
264
+ if attr not in fielddict:
265
+ return ret
266
+ if newattr:
267
+ # We just added this one or this setattr worked on an
268
+ # internal attribute.
269
+ try:
270
+ object.__delattr__(self, attr)
271
+ except Exception:
272
+ return ret
273
+ # Let's try to set the field
274
+ try:
275
+ res = fielddict[attr][:2]
276
+ except (TypeError, KeyError) as e:
277
+ raise AttributeError(
278
+ f'record array has no attribute {attr}') from e
279
+
280
+ if val is masked:
281
+ _fill_value = _localdict['_fill_value']
282
+ if _fill_value is not None:
283
+ dval = _localdict['_fill_value'][attr]
284
+ else:
285
+ dval = val
286
+ mval = True
287
+ else:
288
+ dval = filled(val)
289
+ mval = getmaskarray(val)
290
+ obj = ndarray.__getattribute__(self, '_data').setfield(dval, *res)
291
+ _localdict['_mask'].__setitem__(attr, mval)
292
+ return obj
293
+
294
+ def __getitem__(self, indx):
295
+ """
296
+ Returns all the fields sharing the same fieldname base.
297
+
298
+ The fieldname base is either `_data` or `_mask`.
299
+
300
+ """
301
+ _localdict = self.__dict__
302
+ _mask = ndarray.__getattribute__(self, '_mask')
303
+ _data = ndarray.view(self, _localdict['_baseclass'])
304
+ # We want a field
305
+ if isinstance(indx, str):
306
+ # Make sure _sharedmask is True to propagate back to _fieldmask
307
+ # Don't use _set_mask, there are some copies being made that
308
+ # break propagation Don't force the mask to nomask, that wreaks
309
+ # easy masking
310
+ obj = _data[indx].view(MaskedArray)
311
+ obj._mask = _mask[indx]
312
+ obj._sharedmask = True
313
+ fval = _localdict['_fill_value']
314
+ if fval is not None:
315
+ obj._fill_value = fval[indx]
316
+ # Force to masked if the mask is True
317
+ if not obj.ndim and obj._mask:
318
+ return masked
319
+ return obj
320
+ # We want some elements.
321
+ # First, the data.
322
+ obj = np.array(_data[indx], copy=False).view(mrecarray)
323
+ obj._mask = np.array(_mask[indx], copy=False).view(recarray)
324
+ return obj
325
+
326
+ def __setitem__(self, indx, value):
327
+ """
328
+ Sets the given record to value.
329
+
330
+ """
331
+ MaskedArray.__setitem__(self, indx, value)
332
+ if isinstance(indx, str):
333
+ self._mask[indx] = ma.getmaskarray(value)
334
+
335
+ def __str__(self):
336
+ """
337
+ Calculates the string representation.
338
+
339
+ """
340
+ if self.size > 1:
341
+ mstr = [f"({','.join([str(i) for i in s])})"
342
+ for s in zip(*[getattr(self, f) for f in self.dtype.names])]
343
+ return f"[{', '.join(mstr)}]"
344
+ else:
345
+ mstr = [f"{','.join([str(i) for i in s])}"
346
+ for s in zip([getattr(self, f) for f in self.dtype.names])]
347
+ return f"({', '.join(mstr)})"
348
+
349
+ def __repr__(self):
350
+ """
351
+ Calculates the repr representation.
352
+
353
+ """
354
+ _names = self.dtype.names
355
+ fmt = "%%%is : %%s" % (max([len(n) for n in _names]) + 4,)
356
+ reprstr = [fmt % (f, getattr(self, f)) for f in self.dtype.names]
357
+ reprstr.insert(0, 'masked_records(')
358
+ reprstr.extend([fmt % (' fill_value', self.fill_value),
359
+ ' )'])
360
+ return str("\n".join(reprstr))
361
+
362
+ def view(self, dtype=None, type=None):
363
+ """
364
+ Returns a view of the mrecarray.
365
+
366
+ """
367
+ # OK, basic copy-paste from MaskedArray.view.
368
+ if dtype is None:
369
+ if type is None:
370
+ output = ndarray.view(self)
371
+ else:
372
+ output = ndarray.view(self, type)
373
+ # Here again.
374
+ elif type is None:
375
+ try:
376
+ if issubclass(dtype, ndarray):
377
+ output = ndarray.view(self, dtype)
378
+ else:
379
+ output = ndarray.view(self, dtype)
380
+ # OK, there's the change
381
+ except TypeError:
382
+ dtype = np.dtype(dtype)
383
+ # we need to revert to MaskedArray, but keeping the possibility
384
+ # of subclasses (eg, TimeSeriesRecords), so we'll force a type
385
+ # set to the first parent
386
+ if dtype.fields is None:
387
+ basetype = self.__class__.__bases__[0]
388
+ output = self.__array__().view(dtype, basetype)
389
+ output._update_from(self)
390
+ else:
391
+ output = ndarray.view(self, dtype)
392
+ output._fill_value = None
393
+ else:
394
+ output = ndarray.view(self, dtype, type)
395
+ # Update the mask, just like in MaskedArray.view
396
+ if (getattr(output, '_mask', nomask) is not nomask):
397
+ mdtype = ma.make_mask_descr(output.dtype)
398
+ output._mask = self._mask.view(mdtype, ndarray)
399
+ output._mask.shape = output.shape
400
+ return output
401
+
402
+ def harden_mask(self):
403
+ """
404
+ Forces the mask to hard.
405
+
406
+ """
407
+ self._hardmask = True
408
+
409
+ def soften_mask(self):
410
+ """
411
+ Forces the mask to soft
412
+
413
+ """
414
+ self._hardmask = False
415
+
416
+ def copy(self):
417
+ """
418
+ Returns a copy of the masked record.
419
+
420
+ """
421
+ copied = self._data.copy().view(type(self))
422
+ copied._mask = self._mask.copy()
423
+ return copied
424
+
425
+ def tolist(self, fill_value=None):
426
+ """
427
+ Return the data portion of the array as a list.
428
+
429
+ Data items are converted to the nearest compatible Python type.
430
+ Masked values are converted to fill_value. If fill_value is None,
431
+ the corresponding entries in the output list will be ``None``.
432
+
433
+ """
434
+ if fill_value is not None:
435
+ return self.filled(fill_value).tolist()
436
+ result = narray(self.filled().tolist(), dtype=object)
437
+ mask = narray(self._mask.tolist())
438
+ result[mask] = None
439
+ return result.tolist()
440
+
441
+ def __getstate__(self):
442
+ """Return the internal state of the masked array.
443
+
444
+ This is for pickling.
445
+
446
+ """
447
+ state = (1,
448
+ self.shape,
449
+ self.dtype,
450
+ self.flags.fnc,
451
+ self._data.tobytes(),
452
+ self._mask.tobytes(),
453
+ self._fill_value,
454
+ )
455
+ return state
456
+
457
+ def __setstate__(self, state):
458
+ """
459
+ Restore the internal state of the masked array.
460
+
461
+ This is for pickling. ``state`` is typically the output of the
462
+ ``__getstate__`` output, and is a 5-tuple:
463
+
464
+ - class name
465
+ - a tuple giving the shape of the data
466
+ - a typecode for the data
467
+ - a binary string for the data
468
+ - a binary string for the mask.
469
+
470
+ """
471
+ (ver, shp, typ, isf, raw, msk, flv) = state
472
+ ndarray.__setstate__(self, (shp, typ, isf, raw))
473
+ mdtype = dtype([(k, bool_) for (k, _) in self.dtype.descr])
474
+ self.__dict__['_mask'].__setstate__((shp, mdtype, isf, msk))
475
+ self.fill_value = flv
476
+
477
+ def __reduce__(self):
478
+ """
479
+ Return a 3-tuple for pickling a MaskedArray.
480
+
481
+ """
482
+ return (_mrreconstruct,
483
+ (self.__class__, self._baseclass, (0,), 'b',),
484
+ self.__getstate__())
485
+
486
+
487
+ def _mrreconstruct(subtype, baseclass, baseshape, basetype,):
488
+ """
489
+ Build a new MaskedArray from the information stored in a pickle.
490
+
491
+ """
492
+ _data = ndarray.__new__(baseclass, baseshape, basetype).view(subtype)
493
+ _mask = ndarray.__new__(ndarray, baseshape, 'b1')
494
+ return subtype.__new__(subtype, _data, mask=_mask, dtype=basetype,)
495
+
496
+ mrecarray = MaskedRecords
497
+
498
+
499
+ ###############################################################################
500
+ # Constructors #
501
+ ###############################################################################
502
+
503
+
504
+ def fromarrays(arraylist, dtype=None, shape=None, formats=None,
505
+ names=None, titles=None, aligned=False, byteorder=None,
506
+ fill_value=None):
507
+ """
508
+ Creates a mrecarray from a (flat) list of masked arrays.
509
+
510
+ Parameters
511
+ ----------
512
+ arraylist : sequence
513
+ A list of (masked) arrays. Each element of the sequence is first converted
514
+ to a masked array if needed. If a 2D array is passed as argument, it is
515
+ processed line by line
516
+ dtype : {None, dtype}, optional
517
+ Data type descriptor.
518
+ shape : {None, integer}, optional
519
+ Number of records. If None, shape is defined from the shape of the
520
+ first array in the list.
521
+ formats : {None, sequence}, optional
522
+ Sequence of formats for each individual field. If None, the formats will
523
+ be autodetected by inspecting the fields and selecting the highest dtype
524
+ possible.
525
+ names : {None, sequence}, optional
526
+ Sequence of the names of each field.
527
+ fill_value : {None, sequence}, optional
528
+ Sequence of data to be used as filling values.
529
+
530
+ Notes
531
+ -----
532
+ Lists of tuples should be preferred over lists of lists for faster processing.
533
+
534
+ """
535
+ datalist = [getdata(x) for x in arraylist]
536
+ masklist = [np.atleast_1d(getmaskarray(x)) for x in arraylist]
537
+ _array = recfromarrays(datalist,
538
+ dtype=dtype, shape=shape, formats=formats,
539
+ names=names, titles=titles, aligned=aligned,
540
+ byteorder=byteorder).view(mrecarray)
541
+ _array._mask.flat = list(zip(*masklist))
542
+ if fill_value is not None:
543
+ _array.fill_value = fill_value
544
+ return _array
545
+
546
+
547
+ def fromrecords(reclist, dtype=None, shape=None, formats=None, names=None,
548
+ titles=None, aligned=False, byteorder=None,
549
+ fill_value=None, mask=nomask):
550
+ """
551
+ Creates a MaskedRecords from a list of records.
552
+
553
+ Parameters
554
+ ----------
555
+ reclist : sequence
556
+ A list of records. Each element of the sequence is first converted
557
+ to a masked array if needed. If a 2D array is passed as argument, it is
558
+ processed line by line
559
+ dtype : {None, dtype}, optional
560
+ Data type descriptor.
561
+ shape : {None,int}, optional
562
+ Number of records. If None, ``shape`` is defined from the shape of the
563
+ first array in the list.
564
+ formats : {None, sequence}, optional
565
+ Sequence of formats for each individual field. If None, the formats will
566
+ be autodetected by inspecting the fields and selecting the highest dtype
567
+ possible.
568
+ names : {None, sequence}, optional
569
+ Sequence of the names of each field.
570
+ fill_value : {None, sequence}, optional
571
+ Sequence of data to be used as filling values.
572
+ mask : {nomask, sequence}, optional.
573
+ External mask to apply on the data.
574
+
575
+ Notes
576
+ -----
577
+ Lists of tuples should be preferred over lists of lists for faster processing.
578
+
579
+ """
580
+ # Grab the initial _fieldmask, if needed:
581
+ _mask = getattr(reclist, '_mask', None)
582
+ # Get the list of records.
583
+ if isinstance(reclist, ndarray):
584
+ # Make sure we don't have some hidden mask
585
+ if isinstance(reclist, MaskedArray):
586
+ reclist = reclist.filled().view(ndarray)
587
+ # Grab the initial dtype, just in case
588
+ if dtype is None:
589
+ dtype = reclist.dtype
590
+ reclist = reclist.tolist()
591
+ mrec = recfromrecords(reclist, dtype=dtype, shape=shape, formats=formats,
592
+ names=names, titles=titles,
593
+ aligned=aligned, byteorder=byteorder).view(mrecarray)
594
+ # Set the fill_value if needed
595
+ if fill_value is not None:
596
+ mrec.fill_value = fill_value
597
+ # Now, let's deal w/ the mask
598
+ if mask is not nomask:
599
+ mask = np.array(mask, copy=False)
600
+ maskrecordlength = len(mask.dtype)
601
+ if maskrecordlength:
602
+ mrec._mask.flat = mask
603
+ elif mask.ndim == 2:
604
+ mrec._mask.flat = [tuple(m) for m in mask]
605
+ else:
606
+ mrec.__setmask__(mask)
607
+ if _mask is not None:
608
+ mrec._mask[:] = _mask
609
+ return mrec
610
+
611
+
612
+ def _guessvartypes(arr):
613
+ """
614
+ Tries to guess the dtypes of the str_ ndarray `arr`.
615
+
616
+ Guesses by testing element-wise conversion. Returns a list of dtypes.
617
+ The array is first converted to ndarray. If the array is 2D, the test
618
+ is performed on the first line. An exception is raised if the file is
619
+ 3D or more.
620
+
621
+ """
622
+ vartypes = []
623
+ arr = np.asarray(arr)
624
+ if arr.ndim == 2:
625
+ arr = arr[0]
626
+ elif arr.ndim > 2:
627
+ raise ValueError("The array should be 2D at most!")
628
+ # Start the conversion loop.
629
+ for f in arr:
630
+ try:
631
+ int(f)
632
+ except (ValueError, TypeError):
633
+ try:
634
+ float(f)
635
+ except (ValueError, TypeError):
636
+ try:
637
+ complex(f)
638
+ except (ValueError, TypeError):
639
+ vartypes.append(arr.dtype)
640
+ else:
641
+ vartypes.append(np.dtype(complex))
642
+ else:
643
+ vartypes.append(np.dtype(float))
644
+ else:
645
+ vartypes.append(np.dtype(int))
646
+ return vartypes
647
+
648
+
649
+ def openfile(fname):
650
+ """
651
+ Opens the file handle of file `fname`.
652
+
653
+ """
654
+ # A file handle
655
+ if hasattr(fname, 'readline'):
656
+ return fname
657
+ # Try to open the file and guess its type
658
+ try:
659
+ f = open(fname)
660
+ except FileNotFoundError as e:
661
+ raise FileNotFoundError(f"No such file: '{fname}'") from e
662
+ if f.readline()[:2] != "\\x":
663
+ f.seek(0, 0)
664
+ return f
665
+ f.close()
666
+ raise NotImplementedError("Wow, binary file")
667
+
668
+
669
+ def fromtextfile(fname, delimiter=None, commentchar='#', missingchar='',
670
+ varnames=None, vartypes=None,
671
+ *, delimitor=np._NoValue): # backwards compatibility
672
+ """
673
+ Creates a mrecarray from data stored in the file `filename`.
674
+
675
+ Parameters
676
+ ----------
677
+ fname : {file name/handle}
678
+ Handle of an opened file.
679
+ delimiter : {None, string}, optional
680
+ Alphanumeric character used to separate columns in the file.
681
+ If None, any (group of) white spacestring(s) will be used.
682
+ commentchar : {'#', string}, optional
683
+ Alphanumeric character used to mark the start of a comment.
684
+ missingchar : {'', string}, optional
685
+ String indicating missing data, and used to create the masks.
686
+ varnames : {None, sequence}, optional
687
+ Sequence of the variable names. If None, a list will be created from
688
+ the first non empty line of the file.
689
+ vartypes : {None, sequence}, optional
690
+ Sequence of the variables dtypes. If None, it will be estimated from
691
+ the first non-commented line.
692
+
693
+
694
+ Ultra simple: the varnames are in the header, one line"""
695
+ if delimitor is not np._NoValue:
696
+ if delimiter is not None:
697
+ raise TypeError("fromtextfile() got multiple values for argument "
698
+ "'delimiter'")
699
+ # NumPy 1.22.0, 2021-09-23
700
+ warnings.warn("The 'delimitor' keyword argument of "
701
+ "numpy.ma.mrecords.fromtextfile() is deprecated "
702
+ "since NumPy 1.22.0, use 'delimiter' instead.",
703
+ DeprecationWarning, stacklevel=2)
704
+ delimiter = delimitor
705
+
706
+ # Try to open the file.
707
+ ftext = openfile(fname)
708
+
709
+ # Get the first non-empty line as the varnames
710
+ while True:
711
+ line = ftext.readline()
712
+ firstline = line[:line.find(commentchar)].strip()
713
+ _varnames = firstline.split(delimiter)
714
+ if len(_varnames) > 1:
715
+ break
716
+ if varnames is None:
717
+ varnames = _varnames
718
+
719
+ # Get the data.
720
+ _variables = masked_array([line.strip().split(delimiter) for line in ftext
721
+ if line[0] != commentchar and len(line) > 1])
722
+ (_, nfields) = _variables.shape
723
+ ftext.close()
724
+
725
+ # Try to guess the dtype.
726
+ if vartypes is None:
727
+ vartypes = _guessvartypes(_variables[0])
728
+ else:
729
+ vartypes = [np.dtype(v) for v in vartypes]
730
+ if len(vartypes) != nfields:
731
+ msg = "Attempting to %i dtypes for %i fields!"
732
+ msg += " Reverting to default."
733
+ warnings.warn(msg % (len(vartypes), nfields), stacklevel=2)
734
+ vartypes = _guessvartypes(_variables[0])
735
+
736
+ # Construct the descriptor.
737
+ mdescr = [(n, f) for (n, f) in zip(varnames, vartypes)]
738
+ mfillv = [ma.default_fill_value(f) for f in vartypes]
739
+
740
+ # Get the data and the mask.
741
+ # We just need a list of masked_arrays. It's easier to create it like that:
742
+ _mask = (_variables.T == missingchar)
743
+ _datalist = [masked_array(a, mask=m, dtype=t, fill_value=f)
744
+ for (a, m, t, f) in zip(_variables.T, _mask, vartypes, mfillv)]
745
+
746
+ return fromarrays(_datalist, dtype=mdescr)
747
+
748
+
749
+ def addfield(mrecord, newfield, newfieldname=None):
750
+ """Adds a new field to the masked record array
751
+
752
+ Uses `newfield` as data and `newfieldname` as name. If `newfieldname`
753
+ is None, the new field name is set to 'fi', where `i` is the number of
754
+ existing fields.
755
+
756
+ """
757
+ _data = mrecord._data
758
+ _mask = mrecord._mask
759
+ if newfieldname is None or newfieldname in reserved_fields:
760
+ newfieldname = 'f%i' % len(_data.dtype)
761
+ newfield = ma.array(newfield)
762
+ # Get the new data.
763
+ # Create a new empty recarray
764
+ newdtype = np.dtype(_data.dtype.descr + [(newfieldname, newfield.dtype)])
765
+ newdata = recarray(_data.shape, newdtype)
766
+ # Add the existing field
767
+ [newdata.setfield(_data.getfield(*f), *f)
768
+ for f in _data.dtype.fields.values()]
769
+ # Add the new field
770
+ newdata.setfield(newfield._data, *newdata.dtype.fields[newfieldname])
771
+ newdata = newdata.view(MaskedRecords)
772
+ # Get the new mask
773
+ # Create a new empty recarray
774
+ newmdtype = np.dtype([(n, bool_) for n in newdtype.names])
775
+ newmask = recarray(_data.shape, newmdtype)
776
+ # Add the old masks
777
+ [newmask.setfield(_mask.getfield(*f), *f)
778
+ for f in _mask.dtype.fields.values()]
779
+ # Add the mask of the new field
780
+ newmask.setfield(getmaskarray(newfield),
781
+ *newmask.dtype.fields[newfieldname])
782
+ newdata._mask = newmask
783
+ return newdata
llava_next/lib/python3.10/site-packages/numpy/ma/mrecords.pyi ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Any, TypeVar
2
+
3
+ from numpy import dtype
4
+ from numpy.ma import MaskedArray
5
+
6
+ __all__: list[str]
7
+
8
+ # TODO: Set the `bound` to something more suitable once we
9
+ # have proper shape support
10
+ _ShapeType = TypeVar("_ShapeType", bound=Any)
11
+ _DType_co = TypeVar("_DType_co", bound=dtype[Any], covariant=True)
12
+
13
+ class MaskedRecords(MaskedArray[_ShapeType, _DType_co]):
14
+ def __new__(
15
+ cls,
16
+ shape,
17
+ dtype=...,
18
+ buf=...,
19
+ offset=...,
20
+ strides=...,
21
+ formats=...,
22
+ names=...,
23
+ titles=...,
24
+ byteorder=...,
25
+ aligned=...,
26
+ mask=...,
27
+ hard_mask=...,
28
+ fill_value=...,
29
+ keep_mask=...,
30
+ copy=...,
31
+ **options,
32
+ ): ...
33
+ _mask: Any
34
+ _fill_value: Any
35
+ @property
36
+ def _data(self): ...
37
+ @property
38
+ def _fieldmask(self): ...
39
+ def __array_finalize__(self, obj): ...
40
+ def __len__(self): ...
41
+ def __getattribute__(self, attr): ...
42
+ def __setattr__(self, attr, val): ...
43
+ def __getitem__(self, indx): ...
44
+ def __setitem__(self, indx, value): ...
45
+ def view(self, dtype=..., type=...): ...
46
+ def harden_mask(self): ...
47
+ def soften_mask(self): ...
48
+ def copy(self): ...
49
+ def tolist(self, fill_value=...): ...
50
+ def __reduce__(self): ...
51
+
52
+ mrecarray = MaskedRecords
53
+
54
+ def fromarrays(
55
+ arraylist,
56
+ dtype=...,
57
+ shape=...,
58
+ formats=...,
59
+ names=...,
60
+ titles=...,
61
+ aligned=...,
62
+ byteorder=...,
63
+ fill_value=...,
64
+ ): ...
65
+
66
+ def fromrecords(
67
+ reclist,
68
+ dtype=...,
69
+ shape=...,
70
+ formats=...,
71
+ names=...,
72
+ titles=...,
73
+ aligned=...,
74
+ byteorder=...,
75
+ fill_value=...,
76
+ mask=...,
77
+ ): ...
78
+
79
+ def fromtextfile(
80
+ fname,
81
+ delimiter=...,
82
+ commentchar=...,
83
+ missingchar=...,
84
+ varnames=...,
85
+ vartypes=...,
86
+ # NOTE: deprecated: NumPy 1.22.0, 2021-09-23
87
+ # delimitor=...,
88
+ ): ...
89
+
90
+ def addfield(mrecord, newfield, newfieldname=...): ...
llava_next/lib/python3.10/site-packages/numpy/ma/setup.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ def configuration(parent_package='',top_path=None):
3
+ from numpy.distutils.misc_util import Configuration
4
+ config = Configuration('ma', parent_package, top_path)
5
+ config.add_subpackage('tests')
6
+ config.add_data_files('*.pyi')
7
+ return config
8
+
9
+ if __name__ == "__main__":
10
+ from numpy.distutils.core import setup
11
+ config = configuration(top_path='').todict()
12
+ setup(**config)
llava_next/lib/python3.10/site-packages/numpy/ma/tests/test_old_ma.py ADDED
@@ -0,0 +1,874 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from functools import reduce
2
+
3
+ import pytest
4
+
5
+ import numpy as np
6
+ import numpy.core.umath as umath
7
+ import numpy.core.fromnumeric as fromnumeric
8
+ from numpy.testing import (
9
+ assert_, assert_raises, assert_equal,
10
+ )
11
+ from numpy.ma import (
12
+ MaskType, MaskedArray, absolute, add, all, allclose, allequal, alltrue,
13
+ arange, arccos, arcsin, arctan, arctan2, array, average, choose,
14
+ concatenate, conjugate, cos, cosh, count, divide, equal, exp, filled,
15
+ getmask, greater, greater_equal, inner, isMaskedArray, less,
16
+ less_equal, log, log10, make_mask, masked, masked_array, masked_equal,
17
+ masked_greater, masked_greater_equal, masked_inside, masked_less,
18
+ masked_less_equal, masked_not_equal, masked_outside,
19
+ masked_print_option, masked_values, masked_where, maximum, minimum,
20
+ multiply, nomask, nonzero, not_equal, ones, outer, product, put, ravel,
21
+ repeat, resize, shape, sin, sinh, sometrue, sort, sqrt, subtract, sum,
22
+ take, tan, tanh, transpose, where, zeros,
23
+ )
24
+ from numpy.compat import pickle
25
+
26
+ pi = np.pi
27
+
28
+
29
+ def eq(v, w, msg=''):
30
+ result = allclose(v, w)
31
+ if not result:
32
+ print(f'Not eq:{msg}\n{v}\n----{w}')
33
+ return result
34
+
35
+
36
+ class TestMa:
37
+
38
+ def setup_method(self):
39
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
40
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
41
+ a10 = 10.
42
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
43
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
44
+ xm = array(x, mask=m1)
45
+ ym = array(y, mask=m2)
46
+ z = np.array([-.5, 0., .5, .8])
47
+ zm = array(z, mask=[0, 1, 0, 0])
48
+ xf = np.where(m1, 1e+20, x)
49
+ s = x.shape
50
+ xm.set_fill_value(1e+20)
51
+ self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
52
+
53
+ def test_testBasic1d(self):
54
+ # Test of basic array creation and properties in 1 dimension.
55
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
56
+ assert_(not isMaskedArray(x))
57
+ assert_(isMaskedArray(xm))
58
+ assert_equal(shape(xm), s)
59
+ assert_equal(xm.shape, s)
60
+ assert_equal(xm.dtype, x.dtype)
61
+ assert_equal(xm.size, reduce(lambda x, y:x * y, s))
62
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y:x + y, m1))
63
+ assert_(eq(xm, xf))
64
+ assert_(eq(filled(xm, 1.e20), xf))
65
+ assert_(eq(x, xm))
66
+
67
+ @pytest.mark.parametrize("s", [(4, 3), (6, 2)])
68
+ def test_testBasic2d(self, s):
69
+ # Test of basic array creation and properties in 2 dimensions.
70
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
71
+ x.shape = s
72
+ y.shape = s
73
+ xm.shape = s
74
+ ym.shape = s
75
+ xf.shape = s
76
+
77
+ assert_(not isMaskedArray(x))
78
+ assert_(isMaskedArray(xm))
79
+ assert_equal(shape(xm), s)
80
+ assert_equal(xm.shape, s)
81
+ assert_equal(xm.size, reduce(lambda x, y: x * y, s))
82
+ assert_equal(count(xm), len(m1) - reduce(lambda x, y: x + y, m1))
83
+ assert_(eq(xm, xf))
84
+ assert_(eq(filled(xm, 1.e20), xf))
85
+ assert_(eq(x, xm))
86
+
87
+ def test_testArithmetic(self):
88
+ # Test of basic arithmetic.
89
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
90
+ a2d = array([[1, 2], [0, 4]])
91
+ a2dm = masked_array(a2d, [[0, 0], [1, 0]])
92
+ assert_(eq(a2d * a2d, a2d * a2dm))
93
+ assert_(eq(a2d + a2d, a2d + a2dm))
94
+ assert_(eq(a2d - a2d, a2d - a2dm))
95
+ for s in [(12,), (4, 3), (2, 6)]:
96
+ x = x.reshape(s)
97
+ y = y.reshape(s)
98
+ xm = xm.reshape(s)
99
+ ym = ym.reshape(s)
100
+ xf = xf.reshape(s)
101
+ assert_(eq(-x, -xm))
102
+ assert_(eq(x + y, xm + ym))
103
+ assert_(eq(x - y, xm - ym))
104
+ assert_(eq(x * y, xm * ym))
105
+ with np.errstate(divide='ignore', invalid='ignore'):
106
+ assert_(eq(x / y, xm / ym))
107
+ assert_(eq(a10 + y, a10 + ym))
108
+ assert_(eq(a10 - y, a10 - ym))
109
+ assert_(eq(a10 * y, a10 * ym))
110
+ with np.errstate(divide='ignore', invalid='ignore'):
111
+ assert_(eq(a10 / y, a10 / ym))
112
+ assert_(eq(x + a10, xm + a10))
113
+ assert_(eq(x - a10, xm - a10))
114
+ assert_(eq(x * a10, xm * a10))
115
+ assert_(eq(x / a10, xm / a10))
116
+ assert_(eq(x ** 2, xm ** 2))
117
+ assert_(eq(abs(x) ** 2.5, abs(xm) ** 2.5))
118
+ assert_(eq(x ** y, xm ** ym))
119
+ assert_(eq(np.add(x, y), add(xm, ym)))
120
+ assert_(eq(np.subtract(x, y), subtract(xm, ym)))
121
+ assert_(eq(np.multiply(x, y), multiply(xm, ym)))
122
+ with np.errstate(divide='ignore', invalid='ignore'):
123
+ assert_(eq(np.divide(x, y), divide(xm, ym)))
124
+
125
+ def test_testMixedArithmetic(self):
126
+ na = np.array([1])
127
+ ma = array([1])
128
+ assert_(isinstance(na + ma, MaskedArray))
129
+ assert_(isinstance(ma + na, MaskedArray))
130
+
131
+ def test_testUfuncs1(self):
132
+ # Test various functions such as sin, cos.
133
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
134
+ assert_(eq(np.cos(x), cos(xm)))
135
+ assert_(eq(np.cosh(x), cosh(xm)))
136
+ assert_(eq(np.sin(x), sin(xm)))
137
+ assert_(eq(np.sinh(x), sinh(xm)))
138
+ assert_(eq(np.tan(x), tan(xm)))
139
+ assert_(eq(np.tanh(x), tanh(xm)))
140
+ with np.errstate(divide='ignore', invalid='ignore'):
141
+ assert_(eq(np.sqrt(abs(x)), sqrt(xm)))
142
+ assert_(eq(np.log(abs(x)), log(xm)))
143
+ assert_(eq(np.log10(abs(x)), log10(xm)))
144
+ assert_(eq(np.exp(x), exp(xm)))
145
+ assert_(eq(np.arcsin(z), arcsin(zm)))
146
+ assert_(eq(np.arccos(z), arccos(zm)))
147
+ assert_(eq(np.arctan(z), arctan(zm)))
148
+ assert_(eq(np.arctan2(x, y), arctan2(xm, ym)))
149
+ assert_(eq(np.absolute(x), absolute(xm)))
150
+ assert_(eq(np.equal(x, y), equal(xm, ym)))
151
+ assert_(eq(np.not_equal(x, y), not_equal(xm, ym)))
152
+ assert_(eq(np.less(x, y), less(xm, ym)))
153
+ assert_(eq(np.greater(x, y), greater(xm, ym)))
154
+ assert_(eq(np.less_equal(x, y), less_equal(xm, ym)))
155
+ assert_(eq(np.greater_equal(x, y), greater_equal(xm, ym)))
156
+ assert_(eq(np.conjugate(x), conjugate(xm)))
157
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, ym))))
158
+ assert_(eq(np.concatenate((x, y)), concatenate((x, y))))
159
+ assert_(eq(np.concatenate((x, y)), concatenate((xm, y))))
160
+ assert_(eq(np.concatenate((x, y, x)), concatenate((x, ym, x))))
161
+
162
+ def test_xtestCount(self):
163
+ # Test count
164
+ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
165
+ assert_(count(ott).dtype.type is np.intp)
166
+ assert_equal(3, count(ott))
167
+ assert_equal(1, count(1))
168
+ assert_(eq(0, array(1, mask=[1])))
169
+ ott = ott.reshape((2, 2))
170
+ assert_(count(ott).dtype.type is np.intp)
171
+ assert_(isinstance(count(ott, 0), np.ndarray))
172
+ assert_(count(ott).dtype.type is np.intp)
173
+ assert_(eq(3, count(ott)))
174
+ assert_(getmask(count(ott, 0)) is nomask)
175
+ assert_(eq([1, 2], count(ott, 0)))
176
+
177
+ def test_testMinMax(self):
178
+ # Test minimum and maximum.
179
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
180
+ xr = np.ravel(x) # max doesn't work if shaped
181
+ xmr = ravel(xm)
182
+
183
+ # true because of careful selection of data
184
+ assert_(eq(max(xr), maximum.reduce(xmr)))
185
+ assert_(eq(min(xr), minimum.reduce(xmr)))
186
+
187
+ def test_testAddSumProd(self):
188
+ # Test add, sum, product.
189
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
190
+ assert_(eq(np.add.reduce(x), add.reduce(x)))
191
+ assert_(eq(np.add.accumulate(x), add.accumulate(x)))
192
+ assert_(eq(4, sum(array(4), axis=0)))
193
+ assert_(eq(4, sum(array(4), axis=0)))
194
+ assert_(eq(np.sum(x, axis=0), sum(x, axis=0)))
195
+ assert_(eq(np.sum(filled(xm, 0), axis=0), sum(xm, axis=0)))
196
+ assert_(eq(np.sum(x, 0), sum(x, 0)))
197
+ assert_(eq(np.prod(x, axis=0), product(x, axis=0)))
198
+ assert_(eq(np.prod(x, 0), product(x, 0)))
199
+ assert_(eq(np.prod(filled(xm, 1), axis=0),
200
+ product(xm, axis=0)))
201
+ if len(s) > 1:
202
+ assert_(eq(np.concatenate((x, y), 1),
203
+ concatenate((xm, ym), 1)))
204
+ assert_(eq(np.add.reduce(x, 1), add.reduce(x, 1)))
205
+ assert_(eq(np.sum(x, 1), sum(x, 1)))
206
+ assert_(eq(np.prod(x, 1), product(x, 1)))
207
+
208
+ def test_testCI(self):
209
+ # Test of conversions and indexing
210
+ x1 = np.array([1, 2, 4, 3])
211
+ x2 = array(x1, mask=[1, 0, 0, 0])
212
+ x3 = array(x1, mask=[0, 1, 0, 1])
213
+ x4 = array(x1)
214
+ # test conversion to strings
215
+ str(x2) # raises?
216
+ repr(x2) # raises?
217
+ assert_(eq(np.sort(x1), sort(x2, fill_value=0)))
218
+ # tests of indexing
219
+ assert_(type(x2[1]) is type(x1[1]))
220
+ assert_(x1[1] == x2[1])
221
+ assert_(x2[0] is masked)
222
+ assert_(eq(x1[2], x2[2]))
223
+ assert_(eq(x1[2:5], x2[2:5]))
224
+ assert_(eq(x1[:], x2[:]))
225
+ assert_(eq(x1[1:], x3[1:]))
226
+ x1[2] = 9
227
+ x2[2] = 9
228
+ assert_(eq(x1, x2))
229
+ x1[1:3] = 99
230
+ x2[1:3] = 99
231
+ assert_(eq(x1, x2))
232
+ x2[1] = masked
233
+ assert_(eq(x1, x2))
234
+ x2[1:3] = masked
235
+ assert_(eq(x1, x2))
236
+ x2[:] = x1
237
+ x2[1] = masked
238
+ assert_(allequal(getmask(x2), array([0, 1, 0, 0])))
239
+ x3[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
240
+ assert_(allequal(getmask(x3), array([0, 1, 1, 0])))
241
+ x4[:] = masked_array([1, 2, 3, 4], [0, 1, 1, 0])
242
+ assert_(allequal(getmask(x4), array([0, 1, 1, 0])))
243
+ assert_(allequal(x4, array([1, 2, 3, 4])))
244
+ x1 = np.arange(5) * 1.0
245
+ x2 = masked_values(x1, 3.0)
246
+ assert_(eq(x1, x2))
247
+ assert_(allequal(array([0, 0, 0, 1, 0], MaskType), x2.mask))
248
+ assert_(eq(3.0, x2.fill_value))
249
+ x1 = array([1, 'hello', 2, 3], object)
250
+ x2 = np.array([1, 'hello', 2, 3], object)
251
+ s1 = x1[1]
252
+ s2 = x2[1]
253
+ assert_equal(type(s2), str)
254
+ assert_equal(type(s1), str)
255
+ assert_equal(s1, s2)
256
+ assert_(x1[1:1].shape == (0,))
257
+
258
+ def test_testCopySize(self):
259
+ # Tests of some subtle points of copying and sizing.
260
+ n = [0, 0, 1, 0, 0]
261
+ m = make_mask(n)
262
+ m2 = make_mask(m)
263
+ assert_(m is m2)
264
+ m3 = make_mask(m, copy=True)
265
+ assert_(m is not m3)
266
+
267
+ x1 = np.arange(5)
268
+ y1 = array(x1, mask=m)
269
+ assert_(y1._data is not x1)
270
+ assert_(allequal(x1, y1._data))
271
+ assert_(y1._mask is m)
272
+
273
+ y1a = array(y1, copy=0)
274
+ # For copy=False, one might expect that the array would just
275
+ # passed on, i.e., that it would be "is" instead of "==".
276
+ # See gh-4043 for discussion.
277
+ assert_(y1a._mask.__array_interface__ ==
278
+ y1._mask.__array_interface__)
279
+
280
+ y2 = array(x1, mask=m3, copy=0)
281
+ assert_(y2._mask is m3)
282
+ assert_(y2[2] is masked)
283
+ y2[2] = 9
284
+ assert_(y2[2] is not masked)
285
+ assert_(y2._mask is m3)
286
+ assert_(allequal(y2.mask, 0))
287
+
288
+ y2a = array(x1, mask=m, copy=1)
289
+ assert_(y2a._mask is not m)
290
+ assert_(y2a[2] is masked)
291
+ y2a[2] = 9
292
+ assert_(y2a[2] is not masked)
293
+ assert_(y2a._mask is not m)
294
+ assert_(allequal(y2a.mask, 0))
295
+
296
+ y3 = array(x1 * 1.0, mask=m)
297
+ assert_(filled(y3).dtype is (x1 * 1.0).dtype)
298
+
299
+ x4 = arange(4)
300
+ x4[2] = masked
301
+ y4 = resize(x4, (8,))
302
+ assert_(eq(concatenate([x4, x4]), y4))
303
+ assert_(eq(getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0]))
304
+ y5 = repeat(x4, (2, 2, 2, 2), axis=0)
305
+ assert_(eq(y5, [0, 0, 1, 1, 2, 2, 3, 3]))
306
+ y6 = repeat(x4, 2, axis=0)
307
+ assert_(eq(y5, y6))
308
+
309
+ def test_testPut(self):
310
+ # Test of put
311
+ d = arange(5)
312
+ n = [0, 0, 0, 1, 1]
313
+ m = make_mask(n)
314
+ m2 = m.copy()
315
+ x = array(d, mask=m)
316
+ assert_(x[3] is masked)
317
+ assert_(x[4] is masked)
318
+ x[[1, 4]] = [10, 40]
319
+ assert_(x._mask is m)
320
+ assert_(x[3] is masked)
321
+ assert_(x[4] is not masked)
322
+ assert_(eq(x, [0, 10, 2, -1, 40]))
323
+
324
+ x = array(d, mask=m2, copy=True)
325
+ x.put([0, 1, 2], [-1, 100, 200])
326
+ assert_(x._mask is not m2)
327
+ assert_(x[3] is masked)
328
+ assert_(x[4] is masked)
329
+ assert_(eq(x, [-1, 100, 200, 0, 0]))
330
+
331
+ def test_testPut2(self):
332
+ # Test of put
333
+ d = arange(5)
334
+ x = array(d, mask=[0, 0, 0, 0, 0])
335
+ z = array([10, 40], mask=[1, 0])
336
+ assert_(x[2] is not masked)
337
+ assert_(x[3] is not masked)
338
+ x[2:4] = z
339
+ assert_(x[2] is masked)
340
+ assert_(x[3] is not masked)
341
+ assert_(eq(x, [0, 1, 10, 40, 4]))
342
+
343
+ d = arange(5)
344
+ x = array(d, mask=[0, 0, 0, 0, 0])
345
+ y = x[2:4]
346
+ z = array([10, 40], mask=[1, 0])
347
+ assert_(x[2] is not masked)
348
+ assert_(x[3] is not masked)
349
+ y[:] = z
350
+ assert_(y[0] is masked)
351
+ assert_(y[1] is not masked)
352
+ assert_(eq(y, [10, 40]))
353
+ assert_(x[2] is masked)
354
+ assert_(x[3] is not masked)
355
+ assert_(eq(x, [0, 1, 10, 40, 4]))
356
+
357
+ def test_testMaPut(self):
358
+ (x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
359
+ m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
360
+ i = np.nonzero(m)[0]
361
+ put(ym, i, zm)
362
+ assert_(all(take(ym, i, axis=0) == zm))
363
+
364
+ def test_testOddFeatures(self):
365
+ # Test of other odd features
366
+ x = arange(20)
367
+ x = x.reshape(4, 5)
368
+ x.flat[5] = 12
369
+ assert_(x[1, 0] == 12)
370
+ z = x + 10j * x
371
+ assert_(eq(z.real, x))
372
+ assert_(eq(z.imag, 10 * x))
373
+ assert_(eq((z * conjugate(z)).real, 101 * x * x))
374
+ z.imag[...] = 0.0
375
+
376
+ x = arange(10)
377
+ x[3] = masked
378
+ assert_(str(x[3]) == str(masked))
379
+ c = x >= 8
380
+ assert_(count(where(c, masked, masked)) == 0)
381
+ assert_(shape(where(c, masked, masked)) == c.shape)
382
+ z = where(c, x, masked)
383
+ assert_(z.dtype is x.dtype)
384
+ assert_(z[3] is masked)
385
+ assert_(z[4] is masked)
386
+ assert_(z[7] is masked)
387
+ assert_(z[8] is not masked)
388
+ assert_(z[9] is not masked)
389
+ assert_(eq(x, z))
390
+ z = where(c, masked, x)
391
+ assert_(z.dtype is x.dtype)
392
+ assert_(z[3] is masked)
393
+ assert_(z[4] is not masked)
394
+ assert_(z[7] is not masked)
395
+ assert_(z[8] is masked)
396
+ assert_(z[9] is masked)
397
+ z = masked_where(c, x)
398
+ assert_(z.dtype is x.dtype)
399
+ assert_(z[3] is masked)
400
+ assert_(z[4] is not masked)
401
+ assert_(z[7] is not masked)
402
+ assert_(z[8] is masked)
403
+ assert_(z[9] is masked)
404
+ assert_(eq(x, z))
405
+ x = array([1., 2., 3., 4., 5.])
406
+ c = array([1, 1, 1, 0, 0])
407
+ x[2] = masked
408
+ z = where(c, x, -x)
409
+ assert_(eq(z, [1., 2., 0., -4., -5]))
410
+ c[0] = masked
411
+ z = where(c, x, -x)
412
+ assert_(eq(z, [1., 2., 0., -4., -5]))
413
+ assert_(z[0] is masked)
414
+ assert_(z[1] is not masked)
415
+ assert_(z[2] is masked)
416
+ assert_(eq(masked_where(greater(x, 2), x), masked_greater(x, 2)))
417
+ assert_(eq(masked_where(greater_equal(x, 2), x),
418
+ masked_greater_equal(x, 2)))
419
+ assert_(eq(masked_where(less(x, 2), x), masked_less(x, 2)))
420
+ assert_(eq(masked_where(less_equal(x, 2), x), masked_less_equal(x, 2)))
421
+ assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
422
+ assert_(eq(masked_where(equal(x, 2), x), masked_equal(x, 2)))
423
+ assert_(eq(masked_where(not_equal(x, 2), x), masked_not_equal(x, 2)))
424
+ assert_(eq(masked_inside(list(range(5)), 1, 3), [0, 199, 199, 199, 4]))
425
+ assert_(eq(masked_outside(list(range(5)), 1, 3), [199, 1, 2, 3, 199]))
426
+ assert_(eq(masked_inside(array(list(range(5)),
427
+ mask=[1, 0, 0, 0, 0]), 1, 3).mask,
428
+ [1, 1, 1, 1, 0]))
429
+ assert_(eq(masked_outside(array(list(range(5)),
430
+ mask=[0, 1, 0, 0, 0]), 1, 3).mask,
431
+ [1, 1, 0, 0, 1]))
432
+ assert_(eq(masked_equal(array(list(range(5)),
433
+ mask=[1, 0, 0, 0, 0]), 2).mask,
434
+ [1, 0, 1, 0, 0]))
435
+ assert_(eq(masked_not_equal(array([2, 2, 1, 2, 1],
436
+ mask=[1, 0, 0, 0, 0]), 2).mask,
437
+ [1, 0, 1, 0, 1]))
438
+ assert_(eq(masked_where([1, 1, 0, 0, 0], [1, 2, 3, 4, 5]),
439
+ [99, 99, 3, 4, 5]))
440
+ atest = ones((10, 10, 10), dtype=np.float32)
441
+ btest = zeros(atest.shape, MaskType)
442
+ ctest = masked_where(btest, atest)
443
+ assert_(eq(atest, ctest))
444
+ z = choose(c, (-x, x))
445
+ assert_(eq(z, [1., 2., 0., -4., -5]))
446
+ assert_(z[0] is masked)
447
+ assert_(z[1] is not masked)
448
+ assert_(z[2] is masked)
449
+ x = arange(6)
450
+ x[5] = masked
451
+ y = arange(6) * 10
452
+ y[2] = masked
453
+ c = array([1, 1, 1, 0, 0, 0], mask=[1, 0, 0, 0, 0, 0])
454
+ cm = c.filled(1)
455
+ z = where(c, x, y)
456
+ zm = where(cm, x, y)
457
+ assert_(eq(z, zm))
458
+ assert_(getmask(zm) is nomask)
459
+ assert_(eq(zm, [0, 1, 2, 30, 40, 50]))
460
+ z = where(c, masked, 1)
461
+ assert_(eq(z, [99, 99, 99, 1, 1, 1]))
462
+ z = where(c, 1, masked)
463
+ assert_(eq(z, [99, 1, 1, 99, 99, 99]))
464
+
465
+ def test_testMinMax2(self):
466
+ # Test of minimum, maximum.
467
+ assert_(eq(minimum([1, 2, 3], [4, 0, 9]), [1, 0, 3]))
468
+ assert_(eq(maximum([1, 2, 3], [4, 0, 9]), [4, 2, 9]))
469
+ x = arange(5)
470
+ y = arange(5) - 2
471
+ x[3] = masked
472
+ y[0] = masked
473
+ assert_(eq(minimum(x, y), where(less(x, y), x, y)))
474
+ assert_(eq(maximum(x, y), where(greater(x, y), x, y)))
475
+ assert_(minimum.reduce(x) == 0)
476
+ assert_(maximum.reduce(x) == 4)
477
+
478
+ def test_testTakeTransposeInnerOuter(self):
479
+ # Test of take, transpose, inner, outer products
480
+ x = arange(24)
481
+ y = np.arange(24)
482
+ x[5:6] = masked
483
+ x = x.reshape(2, 3, 4)
484
+ y = y.reshape(2, 3, 4)
485
+ assert_(eq(np.transpose(y, (2, 0, 1)), transpose(x, (2, 0, 1))))
486
+ assert_(eq(np.take(y, (2, 0, 1), 1), take(x, (2, 0, 1), 1)))
487
+ assert_(eq(np.inner(filled(x, 0), filled(y, 0)),
488
+ inner(x, y)))
489
+ assert_(eq(np.outer(filled(x, 0), filled(y, 0)),
490
+ outer(x, y)))
491
+ y = array(['abc', 1, 'def', 2, 3], object)
492
+ y[2] = masked
493
+ t = take(y, [0, 3, 4])
494
+ assert_(t[0] == 'abc')
495
+ assert_(t[1] == 2)
496
+ assert_(t[2] == 3)
497
+
498
+ def test_testInplace(self):
499
+ # Test of inplace operations and rich comparisons
500
+ y = arange(10)
501
+
502
+ x = arange(10)
503
+ xm = arange(10)
504
+ xm[2] = masked
505
+ x += 1
506
+ assert_(eq(x, y + 1))
507
+ xm += 1
508
+ assert_(eq(x, y + 1))
509
+
510
+ x = arange(10)
511
+ xm = arange(10)
512
+ xm[2] = masked
513
+ x -= 1
514
+ assert_(eq(x, y - 1))
515
+ xm -= 1
516
+ assert_(eq(xm, y - 1))
517
+
518
+ x = arange(10) * 1.0
519
+ xm = arange(10) * 1.0
520
+ xm[2] = masked
521
+ x *= 2.0
522
+ assert_(eq(x, y * 2))
523
+ xm *= 2.0
524
+ assert_(eq(xm, y * 2))
525
+
526
+ x = arange(10) * 2
527
+ xm = arange(10)
528
+ xm[2] = masked
529
+ x //= 2
530
+ assert_(eq(x, y))
531
+ xm //= 2
532
+ assert_(eq(x, y))
533
+
534
+ x = arange(10) * 1.0
535
+ xm = arange(10) * 1.0
536
+ xm[2] = masked
537
+ x /= 2.0
538
+ assert_(eq(x, y / 2.0))
539
+ xm /= arange(10)
540
+ assert_(eq(xm, ones((10,))))
541
+
542
+ x = arange(10).astype(np.float32)
543
+ xm = arange(10)
544
+ xm[2] = masked
545
+ x += 1.
546
+ assert_(eq(x, y + 1.))
547
+
548
+ def test_testPickle(self):
549
+ # Test of pickling
550
+ x = arange(12)
551
+ x[4:10:2] = masked
552
+ x = x.reshape(4, 3)
553
+ for proto in range(2, pickle.HIGHEST_PROTOCOL + 1):
554
+ s = pickle.dumps(x, protocol=proto)
555
+ y = pickle.loads(s)
556
+ assert_(eq(x, y))
557
+
558
+ def test_testMasked(self):
559
+ # Test of masked element
560
+ xx = arange(6)
561
+ xx[1] = masked
562
+ assert_(str(masked) == '--')
563
+ assert_(xx[1] is masked)
564
+ assert_equal(filled(xx[1], 0), 0)
565
+
566
+ def test_testAverage1(self):
567
+ # Test of average.
568
+ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
569
+ assert_(eq(2.0, average(ott, axis=0)))
570
+ assert_(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
571
+ result, wts = average(ott, weights=[1., 1., 2., 1.], returned=True)
572
+ assert_(eq(2.0, result))
573
+ assert_(wts == 4.0)
574
+ ott[:] = masked
575
+ assert_(average(ott, axis=0) is masked)
576
+ ott = array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
577
+ ott = ott.reshape(2, 2)
578
+ ott[:, 1] = masked
579
+ assert_(eq(average(ott, axis=0), [2.0, 0.0]))
580
+ assert_(average(ott, axis=1)[0] is masked)
581
+ assert_(eq([2., 0.], average(ott, axis=0)))
582
+ result, wts = average(ott, axis=0, returned=True)
583
+ assert_(eq(wts, [1., 0.]))
584
+
585
+ def test_testAverage2(self):
586
+ # More tests of average.
587
+ w1 = [0, 1, 1, 1, 1, 0]
588
+ w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
589
+ x = arange(6)
590
+ assert_(allclose(average(x, axis=0), 2.5))
591
+ assert_(allclose(average(x, axis=0, weights=w1), 2.5))
592
+ y = array([arange(6), 2.0 * arange(6)])
593
+ assert_(allclose(average(y, None),
594
+ np.add.reduce(np.arange(6)) * 3. / 12.))
595
+ assert_(allclose(average(y, axis=0), np.arange(6) * 3. / 2.))
596
+ assert_(allclose(average(y, axis=1),
597
+ [average(x, axis=0), average(x, axis=0)*2.0]))
598
+ assert_(allclose(average(y, None, weights=w2), 20. / 6.))
599
+ assert_(allclose(average(y, axis=0, weights=w2),
600
+ [0., 1., 2., 3., 4., 10.]))
601
+ assert_(allclose(average(y, axis=1),
602
+ [average(x, axis=0), average(x, axis=0)*2.0]))
603
+ m1 = zeros(6)
604
+ m2 = [0, 0, 1, 1, 0, 0]
605
+ m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
606
+ m4 = ones(6)
607
+ m5 = [0, 1, 1, 1, 1, 1]
608
+ assert_(allclose(average(masked_array(x, m1), axis=0), 2.5))
609
+ assert_(allclose(average(masked_array(x, m2), axis=0), 2.5))
610
+ assert_(average(masked_array(x, m4), axis=0) is masked)
611
+ assert_equal(average(masked_array(x, m5), axis=0), 0.0)
612
+ assert_equal(count(average(masked_array(x, m4), axis=0)), 0)
613
+ z = masked_array(y, m3)
614
+ assert_(allclose(average(z, None), 20. / 6.))
615
+ assert_(allclose(average(z, axis=0),
616
+ [0., 1., 99., 99., 4.0, 7.5]))
617
+ assert_(allclose(average(z, axis=1), [2.5, 5.0]))
618
+ assert_(allclose(average(z, axis=0, weights=w2),
619
+ [0., 1., 99., 99., 4.0, 10.0]))
620
+
621
+ a = arange(6)
622
+ b = arange(6) * 3
623
+ r1, w1 = average([[a, b], [b, a]], axis=1, returned=True)
624
+ assert_equal(shape(r1), shape(w1))
625
+ assert_equal(r1.shape, w1.shape)
626
+ r2, w2 = average(ones((2, 2, 3)), axis=0, weights=[3, 1], returned=True)
627
+ assert_equal(shape(w2), shape(r2))
628
+ r2, w2 = average(ones((2, 2, 3)), returned=True)
629
+ assert_equal(shape(w2), shape(r2))
630
+ r2, w2 = average(ones((2, 2, 3)), weights=ones((2, 2, 3)), returned=True)
631
+ assert_(shape(w2) == shape(r2))
632
+ a2d = array([[1, 2], [0, 4]], float)
633
+ a2dm = masked_array(a2d, [[0, 0], [1, 0]])
634
+ a2da = average(a2d, axis=0)
635
+ assert_(eq(a2da, [0.5, 3.0]))
636
+ a2dma = average(a2dm, axis=0)
637
+ assert_(eq(a2dma, [1.0, 3.0]))
638
+ a2dma = average(a2dm, axis=None)
639
+ assert_(eq(a2dma, 7. / 3.))
640
+ a2dma = average(a2dm, axis=1)
641
+ assert_(eq(a2dma, [1.5, 4.0]))
642
+
643
+ def test_testToPython(self):
644
+ assert_equal(1, int(array(1)))
645
+ assert_equal(1.0, float(array(1)))
646
+ assert_equal(1, int(array([[[1]]])))
647
+ assert_equal(1.0, float(array([[1]])))
648
+ assert_raises(TypeError, float, array([1, 1]))
649
+ assert_raises(ValueError, bool, array([0, 1]))
650
+ assert_raises(ValueError, bool, array([0, 0], mask=[0, 1]))
651
+
652
+ def test_testScalarArithmetic(self):
653
+ xm = array(0, mask=1)
654
+ #TODO FIXME: Find out what the following raises a warning in r8247
655
+ with np.errstate(divide='ignore'):
656
+ assert_((1 / array(0)).mask)
657
+ assert_((1 + xm).mask)
658
+ assert_((-xm).mask)
659
+ assert_((-xm).mask)
660
+ assert_(maximum(xm, xm).mask)
661
+ assert_(minimum(xm, xm).mask)
662
+ assert_(xm.filled().dtype is xm._data.dtype)
663
+ x = array(0, mask=0)
664
+ assert_(x.filled() == x._data)
665
+ assert_equal(str(xm), str(masked_print_option))
666
+
667
+ def test_testArrayMethods(self):
668
+ a = array([1, 3, 2])
669
+ assert_(eq(a.any(), a._data.any()))
670
+ assert_(eq(a.all(), a._data.all()))
671
+ assert_(eq(a.argmax(), a._data.argmax()))
672
+ assert_(eq(a.argmin(), a._data.argmin()))
673
+ assert_(eq(a.choose(0, 1, 2, 3, 4),
674
+ a._data.choose(0, 1, 2, 3, 4)))
675
+ assert_(eq(a.compress([1, 0, 1]), a._data.compress([1, 0, 1])))
676
+ assert_(eq(a.conj(), a._data.conj()))
677
+ assert_(eq(a.conjugate(), a._data.conjugate()))
678
+ m = array([[1, 2], [3, 4]])
679
+ assert_(eq(m.diagonal(), m._data.diagonal()))
680
+ assert_(eq(a.sum(), a._data.sum()))
681
+ assert_(eq(a.take([1, 2]), a._data.take([1, 2])))
682
+ assert_(eq(m.transpose(), m._data.transpose()))
683
+
684
+ def test_testArrayAttributes(self):
685
+ a = array([1, 3, 2])
686
+ assert_equal(a.ndim, 1)
687
+
688
+ def test_testAPI(self):
689
+ assert_(not [m for m in dir(np.ndarray)
690
+ if m not in dir(MaskedArray) and
691
+ not m.startswith('_')])
692
+
693
+ def test_testSingleElementSubscript(self):
694
+ a = array([1, 3, 2])
695
+ b = array([1, 3, 2], mask=[1, 0, 1])
696
+ assert_equal(a[0].shape, ())
697
+ assert_equal(b[0].shape, ())
698
+ assert_equal(b[1].shape, ())
699
+
700
+ def test_assignment_by_condition(self):
701
+ # Test for gh-18951
702
+ a = array([1, 2, 3, 4], mask=[1, 0, 1, 0])
703
+ c = a >= 3
704
+ a[c] = 5
705
+ assert_(a[2] is masked)
706
+
707
+ def test_assignment_by_condition_2(self):
708
+ # gh-19721
709
+ a = masked_array([0, 1], mask=[False, False])
710
+ b = masked_array([0, 1], mask=[True, True])
711
+ mask = a < 1
712
+ b[mask] = a[mask]
713
+ expected_mask = [False, True]
714
+ assert_equal(b.mask, expected_mask)
715
+
716
+
717
+ class TestUfuncs:
718
+ def setup_method(self):
719
+ self.d = (array([1.0, 0, -1, pi / 2] * 2, mask=[0, 1] + [0] * 6),
720
+ array([1.0, 0, -1, pi / 2] * 2, mask=[1, 0] + [0] * 6),)
721
+
722
+ def test_testUfuncRegression(self):
723
+ f_invalid_ignore = [
724
+ 'sqrt', 'arctanh', 'arcsin', 'arccos',
725
+ 'arccosh', 'arctanh', 'log', 'log10', 'divide',
726
+ 'true_divide', 'floor_divide', 'remainder', 'fmod']
727
+ for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
728
+ 'sin', 'cos', 'tan',
729
+ 'arcsin', 'arccos', 'arctan',
730
+ 'sinh', 'cosh', 'tanh',
731
+ 'arcsinh',
732
+ 'arccosh',
733
+ 'arctanh',
734
+ 'absolute', 'fabs', 'negative',
735
+ 'floor', 'ceil',
736
+ 'logical_not',
737
+ 'add', 'subtract', 'multiply',
738
+ 'divide', 'true_divide', 'floor_divide',
739
+ 'remainder', 'fmod', 'hypot', 'arctan2',
740
+ 'equal', 'not_equal', 'less_equal', 'greater_equal',
741
+ 'less', 'greater',
742
+ 'logical_and', 'logical_or', 'logical_xor']:
743
+ try:
744
+ uf = getattr(umath, f)
745
+ except AttributeError:
746
+ uf = getattr(fromnumeric, f)
747
+ mf = getattr(np.ma, f)
748
+ args = self.d[:uf.nin]
749
+ with np.errstate():
750
+ if f in f_invalid_ignore:
751
+ np.seterr(invalid='ignore')
752
+ if f in ['arctanh', 'log', 'log10']:
753
+ np.seterr(divide='ignore')
754
+ ur = uf(*args)
755
+ mr = mf(*args)
756
+ assert_(eq(ur.filled(0), mr.filled(0), f))
757
+ assert_(eqmask(ur.mask, mr.mask))
758
+
759
+ def test_reduce(self):
760
+ a = self.d[0]
761
+ assert_(not alltrue(a, axis=0))
762
+ assert_(sometrue(a, axis=0))
763
+ assert_equal(sum(a[:3], axis=0), 0)
764
+ assert_equal(product(a, axis=0), 0)
765
+
766
+ def test_minmax(self):
767
+ a = arange(1, 13).reshape(3, 4)
768
+ amask = masked_where(a < 5, a)
769
+ assert_equal(amask.max(), a.max())
770
+ assert_equal(amask.min(), 5)
771
+ assert_((amask.max(0) == a.max(0)).all())
772
+ assert_((amask.min(0) == [5, 6, 7, 8]).all())
773
+ assert_(amask.max(1)[0].mask)
774
+ assert_(amask.min(1)[0].mask)
775
+
776
+ def test_nonzero(self):
777
+ for t in "?bhilqpBHILQPfdgFDGO":
778
+ x = array([1, 0, 2, 0], mask=[0, 0, 1, 1])
779
+ assert_(eq(nonzero(x), [0]))
780
+
781
+
782
+ class TestArrayMethods:
783
+
784
+ def setup_method(self):
785
+ x = np.array([8.375, 7.545, 8.828, 8.5, 1.757, 5.928,
786
+ 8.43, 7.78, 9.865, 5.878, 8.979, 4.732,
787
+ 3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
788
+ 6.04, 9.63, 7.712, 3.382, 4.489, 6.479,
789
+ 7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
790
+ 7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
791
+ X = x.reshape(6, 6)
792
+ XX = x.reshape(3, 2, 2, 3)
793
+
794
+ m = np.array([0, 1, 0, 1, 0, 0,
795
+ 1, 0, 1, 1, 0, 1,
796
+ 0, 0, 0, 1, 0, 1,
797
+ 0, 0, 0, 1, 1, 1,
798
+ 1, 0, 0, 1, 0, 0,
799
+ 0, 0, 1, 0, 1, 0])
800
+ mx = array(data=x, mask=m)
801
+ mX = array(data=X, mask=m.reshape(X.shape))
802
+ mXX = array(data=XX, mask=m.reshape(XX.shape))
803
+
804
+ self.d = (x, X, XX, m, mx, mX, mXX)
805
+
806
+ def test_trace(self):
807
+ (x, X, XX, m, mx, mX, mXX,) = self.d
808
+ mXdiag = mX.diagonal()
809
+ assert_equal(mX.trace(), mX.diagonal().compressed().sum())
810
+ assert_(eq(mX.trace(),
811
+ X.trace() - sum(mXdiag.mask * X.diagonal(),
812
+ axis=0)))
813
+
814
+ def test_clip(self):
815
+ (x, X, XX, m, mx, mX, mXX,) = self.d
816
+ clipped = mx.clip(2, 8)
817
+ assert_(eq(clipped.mask, mx.mask))
818
+ assert_(eq(clipped._data, x.clip(2, 8)))
819
+ assert_(eq(clipped._data, mx._data.clip(2, 8)))
820
+
821
+ def test_ptp(self):
822
+ (x, X, XX, m, mx, mX, mXX,) = self.d
823
+ (n, m) = X.shape
824
+ assert_equal(mx.ptp(), mx.compressed().ptp())
825
+ rows = np.zeros(n, np.float_)
826
+ cols = np.zeros(m, np.float_)
827
+ for k in range(m):
828
+ cols[k] = mX[:, k].compressed().ptp()
829
+ for k in range(n):
830
+ rows[k] = mX[k].compressed().ptp()
831
+ assert_(eq(mX.ptp(0), cols))
832
+ assert_(eq(mX.ptp(1), rows))
833
+
834
+ def test_swapaxes(self):
835
+ (x, X, XX, m, mx, mX, mXX,) = self.d
836
+ mXswapped = mX.swapaxes(0, 1)
837
+ assert_(eq(mXswapped[-1], mX[:, -1]))
838
+ mXXswapped = mXX.swapaxes(0, 2)
839
+ assert_equal(mXXswapped.shape, (2, 2, 3, 3))
840
+
841
+ def test_cumprod(self):
842
+ (x, X, XX, m, mx, mX, mXX,) = self.d
843
+ mXcp = mX.cumprod(0)
844
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(0)))
845
+ mXcp = mX.cumprod(1)
846
+ assert_(eq(mXcp._data, mX.filled(1).cumprod(1)))
847
+
848
+ def test_cumsum(self):
849
+ (x, X, XX, m, mx, mX, mXX,) = self.d
850
+ mXcp = mX.cumsum(0)
851
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(0)))
852
+ mXcp = mX.cumsum(1)
853
+ assert_(eq(mXcp._data, mX.filled(0).cumsum(1)))
854
+
855
+ def test_varstd(self):
856
+ (x, X, XX, m, mx, mX, mXX,) = self.d
857
+ assert_(eq(mX.var(axis=None), mX.compressed().var()))
858
+ assert_(eq(mX.std(axis=None), mX.compressed().std()))
859
+ assert_(eq(mXX.var(axis=3).shape, XX.var(axis=3).shape))
860
+ assert_(eq(mX.var().shape, X.var().shape))
861
+ (mXvar0, mXvar1) = (mX.var(axis=0), mX.var(axis=1))
862
+ for k in range(6):
863
+ assert_(eq(mXvar1[k], mX[k].compressed().var()))
864
+ assert_(eq(mXvar0[k], mX[:, k].compressed().var()))
865
+ assert_(eq(np.sqrt(mXvar0[k]),
866
+ mX[:, k].compressed().std()))
867
+
868
+
869
+ def eqmask(m1, m2):
870
+ if m1 is nomask:
871
+ return m2 is nomask
872
+ if m2 is nomask:
873
+ return m1 is nomask
874
+ return (m1 == m2).all()
llava_next/lib/python3.10/site-packages/numpy/ma/testutils.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Miscellaneous functions for testing masked arrays and subclasses
2
+
3
+ :author: Pierre Gerard-Marchant
4
+ :contact: pierregm_at_uga_dot_edu
5
+ :version: $Id: testutils.py 3529 2007-11-13 08:01:14Z jarrod.millman $
6
+
7
+ """
8
+ import operator
9
+
10
+ import numpy as np
11
+ from numpy import ndarray, float_
12
+ import numpy.core.umath as umath
13
+ import numpy.testing
14
+ from numpy.testing import (
15
+ assert_, assert_allclose, assert_array_almost_equal_nulp,
16
+ assert_raises, build_err_msg
17
+ )
18
+ from .core import mask_or, getmask, masked_array, nomask, masked, filled
19
+
20
+ __all__masked = [
21
+ 'almost', 'approx', 'assert_almost_equal', 'assert_array_almost_equal',
22
+ 'assert_array_approx_equal', 'assert_array_compare',
23
+ 'assert_array_equal', 'assert_array_less', 'assert_close',
24
+ 'assert_equal', 'assert_equal_records', 'assert_mask_equal',
25
+ 'assert_not_equal', 'fail_if_array_equal',
26
+ ]
27
+
28
+ # Include some normal test functions to avoid breaking other projects who
29
+ # have mistakenly included them from this file. SciPy is one. That is
30
+ # unfortunate, as some of these functions are not intended to work with
31
+ # masked arrays. But there was no way to tell before.
32
+ from unittest import TestCase
33
+ __some__from_testing = [
34
+ 'TestCase', 'assert_', 'assert_allclose', 'assert_array_almost_equal_nulp',
35
+ 'assert_raises'
36
+ ]
37
+
38
+ __all__ = __all__masked + __some__from_testing
39
+
40
+
41
+ def approx(a, b, fill_value=True, rtol=1e-5, atol=1e-8):
42
+ """
43
+ Returns true if all components of a and b are equal to given tolerances.
44
+
45
+ If fill_value is True, masked values considered equal. Otherwise,
46
+ masked values are considered unequal. The relative error rtol should
47
+ be positive and << 1.0 The absolute error atol comes into play for
48
+ those elements of b that are very small or zero; it says how small a
49
+ must be also.
50
+
51
+ """
52
+ m = mask_or(getmask(a), getmask(b))
53
+ d1 = filled(a)
54
+ d2 = filled(b)
55
+ if d1.dtype.char == "O" or d2.dtype.char == "O":
56
+ return np.equal(d1, d2).ravel()
57
+ x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
58
+ y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
59
+ d = np.less_equal(umath.absolute(x - y), atol + rtol * umath.absolute(y))
60
+ return d.ravel()
61
+
62
+
63
+ def almost(a, b, decimal=6, fill_value=True):
64
+ """
65
+ Returns True if a and b are equal up to decimal places.
66
+
67
+ If fill_value is True, masked values considered equal. Otherwise,
68
+ masked values are considered unequal.
69
+
70
+ """
71
+ m = mask_or(getmask(a), getmask(b))
72
+ d1 = filled(a)
73
+ d2 = filled(b)
74
+ if d1.dtype.char == "O" or d2.dtype.char == "O":
75
+ return np.equal(d1, d2).ravel()
76
+ x = filled(masked_array(d1, copy=False, mask=m), fill_value).astype(float_)
77
+ y = filled(masked_array(d2, copy=False, mask=m), 1).astype(float_)
78
+ d = np.around(np.abs(x - y), decimal) <= 10.0 ** (-decimal)
79
+ return d.ravel()
80
+
81
+
82
+ def _assert_equal_on_sequences(actual, desired, err_msg=''):
83
+ """
84
+ Asserts the equality of two non-array sequences.
85
+
86
+ """
87
+ assert_equal(len(actual), len(desired), err_msg)
88
+ for k in range(len(desired)):
89
+ assert_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}')
90
+ return
91
+
92
+
93
+ def assert_equal_records(a, b):
94
+ """
95
+ Asserts that two records are equal.
96
+
97
+ Pretty crude for now.
98
+
99
+ """
100
+ assert_equal(a.dtype, b.dtype)
101
+ for f in a.dtype.names:
102
+ (af, bf) = (operator.getitem(a, f), operator.getitem(b, f))
103
+ if not (af is masked) and not (bf is masked):
104
+ assert_equal(operator.getitem(a, f), operator.getitem(b, f))
105
+ return
106
+
107
+
108
+ def assert_equal(actual, desired, err_msg=''):
109
+ """
110
+ Asserts that two items are equal.
111
+
112
+ """
113
+ # Case #1: dictionary .....
114
+ if isinstance(desired, dict):
115
+ if not isinstance(actual, dict):
116
+ raise AssertionError(repr(type(actual)))
117
+ assert_equal(len(actual), len(desired), err_msg)
118
+ for k, i in desired.items():
119
+ if k not in actual:
120
+ raise AssertionError(f"{k} not in {actual}")
121
+ assert_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}')
122
+ return
123
+ # Case #2: lists .....
124
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
125
+ return _assert_equal_on_sequences(actual, desired, err_msg='')
126
+ if not (isinstance(actual, ndarray) or isinstance(desired, ndarray)):
127
+ msg = build_err_msg([actual, desired], err_msg,)
128
+ if not desired == actual:
129
+ raise AssertionError(msg)
130
+ return
131
+ # Case #4. arrays or equivalent
132
+ if ((actual is masked) and not (desired is masked)) or \
133
+ ((desired is masked) and not (actual is masked)):
134
+ msg = build_err_msg([actual, desired],
135
+ err_msg, header='', names=('x', 'y'))
136
+ raise ValueError(msg)
137
+ actual = np.asanyarray(actual)
138
+ desired = np.asanyarray(desired)
139
+ (actual_dtype, desired_dtype) = (actual.dtype, desired.dtype)
140
+ if actual_dtype.char == "S" and desired_dtype.char == "S":
141
+ return _assert_equal_on_sequences(actual.tolist(),
142
+ desired.tolist(),
143
+ err_msg='')
144
+ return assert_array_equal(actual, desired, err_msg)
145
+
146
+
147
+ def fail_if_equal(actual, desired, err_msg='',):
148
+ """
149
+ Raises an assertion error if two items are equal.
150
+
151
+ """
152
+ if isinstance(desired, dict):
153
+ if not isinstance(actual, dict):
154
+ raise AssertionError(repr(type(actual)))
155
+ fail_if_equal(len(actual), len(desired), err_msg)
156
+ for k, i in desired.items():
157
+ if k not in actual:
158
+ raise AssertionError(repr(k))
159
+ fail_if_equal(actual[k], desired[k], f'key={k!r}\n{err_msg}')
160
+ return
161
+ if isinstance(desired, (list, tuple)) and isinstance(actual, (list, tuple)):
162
+ fail_if_equal(len(actual), len(desired), err_msg)
163
+ for k in range(len(desired)):
164
+ fail_if_equal(actual[k], desired[k], f'item={k!r}\n{err_msg}')
165
+ return
166
+ if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray):
167
+ return fail_if_array_equal(actual, desired, err_msg)
168
+ msg = build_err_msg([actual, desired], err_msg)
169
+ if not desired != actual:
170
+ raise AssertionError(msg)
171
+
172
+
173
+ assert_not_equal = fail_if_equal
174
+
175
+
176
+ def assert_almost_equal(actual, desired, decimal=7, err_msg='', verbose=True):
177
+ """
178
+ Asserts that two items are almost equal.
179
+
180
+ The test is equivalent to abs(desired-actual) < 0.5 * 10**(-decimal).
181
+
182
+ """
183
+ if isinstance(actual, np.ndarray) or isinstance(desired, np.ndarray):
184
+ return assert_array_almost_equal(actual, desired, decimal=decimal,
185
+ err_msg=err_msg, verbose=verbose)
186
+ msg = build_err_msg([actual, desired],
187
+ err_msg=err_msg, verbose=verbose)
188
+ if not round(abs(desired - actual), decimal) == 0:
189
+ raise AssertionError(msg)
190
+
191
+
192
+ assert_close = assert_almost_equal
193
+
194
+
195
+ def assert_array_compare(comparison, x, y, err_msg='', verbose=True, header='',
196
+ fill_value=True):
197
+ """
198
+ Asserts that comparison between two masked arrays is satisfied.
199
+
200
+ The comparison is elementwise.
201
+
202
+ """
203
+ # Allocate a common mask and refill
204
+ m = mask_or(getmask(x), getmask(y))
205
+ x = masked_array(x, copy=False, mask=m, keep_mask=False, subok=False)
206
+ y = masked_array(y, copy=False, mask=m, keep_mask=False, subok=False)
207
+ if ((x is masked) and not (y is masked)) or \
208
+ ((y is masked) and not (x is masked)):
209
+ msg = build_err_msg([x, y], err_msg=err_msg, verbose=verbose,
210
+ header=header, names=('x', 'y'))
211
+ raise ValueError(msg)
212
+ # OK, now run the basic tests on filled versions
213
+ return np.testing.assert_array_compare(comparison,
214
+ x.filled(fill_value),
215
+ y.filled(fill_value),
216
+ err_msg=err_msg,
217
+ verbose=verbose, header=header)
218
+
219
+
220
+ def assert_array_equal(x, y, err_msg='', verbose=True):
221
+ """
222
+ Checks the elementwise equality of two masked arrays.
223
+
224
+ """
225
+ assert_array_compare(operator.__eq__, x, y,
226
+ err_msg=err_msg, verbose=verbose,
227
+ header='Arrays are not equal')
228
+
229
+
230
+ def fail_if_array_equal(x, y, err_msg='', verbose=True):
231
+ """
232
+ Raises an assertion error if two masked arrays are not equal elementwise.
233
+
234
+ """
235
+ def compare(x, y):
236
+ return (not np.all(approx(x, y)))
237
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
238
+ header='Arrays are not equal')
239
+
240
+
241
+ def assert_array_approx_equal(x, y, decimal=6, err_msg='', verbose=True):
242
+ """
243
+ Checks the equality of two masked arrays, up to given number odecimals.
244
+
245
+ The equality is checked elementwise.
246
+
247
+ """
248
+ def compare(x, y):
249
+ "Returns the result of the loose comparison between x and y)."
250
+ return approx(x, y, rtol=10. ** -decimal)
251
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
252
+ header='Arrays are not almost equal')
253
+
254
+
255
+ def assert_array_almost_equal(x, y, decimal=6, err_msg='', verbose=True):
256
+ """
257
+ Checks the equality of two masked arrays, up to given number odecimals.
258
+
259
+ The equality is checked elementwise.
260
+
261
+ """
262
+ def compare(x, y):
263
+ "Returns the result of the loose comparison between x and y)."
264
+ return almost(x, y, decimal)
265
+ assert_array_compare(compare, x, y, err_msg=err_msg, verbose=verbose,
266
+ header='Arrays are not almost equal')
267
+
268
+
269
+ def assert_array_less(x, y, err_msg='', verbose=True):
270
+ """
271
+ Checks that x is smaller than y elementwise.
272
+
273
+ """
274
+ assert_array_compare(operator.__lt__, x, y,
275
+ err_msg=err_msg, verbose=verbose,
276
+ header='Arrays are not less-ordered')
277
+
278
+
279
+ def assert_mask_equal(m1, m2, err_msg=''):
280
+ """
281
+ Asserts the equality of two masks.
282
+
283
+ """
284
+ if m1 is nomask:
285
+ assert_(m2 is nomask)
286
+ if m2 is nomask:
287
+ assert_(m1 is nomask)
288
+ assert_array_equal(m1, m2, err_msg=err_msg)
llava_next/lib/python3.10/site-packages/numpy/ma/timer_comparison.py ADDED
@@ -0,0 +1,443 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import timeit
2
+ from functools import reduce
3
+
4
+ import numpy as np
5
+ from numpy import float_
6
+ import numpy.core.fromnumeric as fromnumeric
7
+
8
+ from numpy.testing import build_err_msg
9
+
10
+
11
+ pi = np.pi
12
+
13
+ class ModuleTester:
14
+ def __init__(self, module):
15
+ self.module = module
16
+ self.allequal = module.allequal
17
+ self.arange = module.arange
18
+ self.array = module.array
19
+ self.concatenate = module.concatenate
20
+ self.count = module.count
21
+ self.equal = module.equal
22
+ self.filled = module.filled
23
+ self.getmask = module.getmask
24
+ self.getmaskarray = module.getmaskarray
25
+ self.id = id
26
+ self.inner = module.inner
27
+ self.make_mask = module.make_mask
28
+ self.masked = module.masked
29
+ self.masked_array = module.masked_array
30
+ self.masked_values = module.masked_values
31
+ self.mask_or = module.mask_or
32
+ self.nomask = module.nomask
33
+ self.ones = module.ones
34
+ self.outer = module.outer
35
+ self.repeat = module.repeat
36
+ self.resize = module.resize
37
+ self.sort = module.sort
38
+ self.take = module.take
39
+ self.transpose = module.transpose
40
+ self.zeros = module.zeros
41
+ self.MaskType = module.MaskType
42
+ try:
43
+ self.umath = module.umath
44
+ except AttributeError:
45
+ self.umath = module.core.umath
46
+ self.testnames = []
47
+
48
+ def assert_array_compare(self, comparison, x, y, err_msg='', header='',
49
+ fill_value=True):
50
+ """
51
+ Assert that a comparison of two masked arrays is satisfied elementwise.
52
+
53
+ """
54
+ xf = self.filled(x)
55
+ yf = self.filled(y)
56
+ m = self.mask_or(self.getmask(x), self.getmask(y))
57
+
58
+ x = self.filled(self.masked_array(xf, mask=m), fill_value)
59
+ y = self.filled(self.masked_array(yf, mask=m), fill_value)
60
+ if (x.dtype.char != "O"):
61
+ x = x.astype(float_)
62
+ if isinstance(x, np.ndarray) and x.size > 1:
63
+ x[np.isnan(x)] = 0
64
+ elif np.isnan(x):
65
+ x = 0
66
+ if (y.dtype.char != "O"):
67
+ y = y.astype(float_)
68
+ if isinstance(y, np.ndarray) and y.size > 1:
69
+ y[np.isnan(y)] = 0
70
+ elif np.isnan(y):
71
+ y = 0
72
+ try:
73
+ cond = (x.shape == () or y.shape == ()) or x.shape == y.shape
74
+ if not cond:
75
+ msg = build_err_msg([x, y],
76
+ err_msg
77
+ + f'\n(shapes {x.shape}, {y.shape} mismatch)',
78
+ header=header,
79
+ names=('x', 'y'))
80
+ assert cond, msg
81
+ val = comparison(x, y)
82
+ if m is not self.nomask and fill_value:
83
+ val = self.masked_array(val, mask=m)
84
+ if isinstance(val, bool):
85
+ cond = val
86
+ reduced = [0]
87
+ else:
88
+ reduced = val.ravel()
89
+ cond = reduced.all()
90
+ reduced = reduced.tolist()
91
+ if not cond:
92
+ match = 100-100.0*reduced.count(1)/len(reduced)
93
+ msg = build_err_msg([x, y],
94
+ err_msg
95
+ + '\n(mismatch %s%%)' % (match,),
96
+ header=header,
97
+ names=('x', 'y'))
98
+ assert cond, msg
99
+ except ValueError as e:
100
+ msg = build_err_msg([x, y], err_msg, header=header, names=('x', 'y'))
101
+ raise ValueError(msg) from e
102
+
103
+ def assert_array_equal(self, x, y, err_msg=''):
104
+ """
105
+ Checks the elementwise equality of two masked arrays.
106
+
107
+ """
108
+ self.assert_array_compare(self.equal, x, y, err_msg=err_msg,
109
+ header='Arrays are not equal')
110
+
111
+ @np.errstate(all='ignore')
112
+ def test_0(self):
113
+ """
114
+ Tests creation
115
+
116
+ """
117
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
118
+ m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
119
+ xm = self.masked_array(x, mask=m)
120
+ xm[0]
121
+
122
+ @np.errstate(all='ignore')
123
+ def test_1(self):
124
+ """
125
+ Tests creation
126
+
127
+ """
128
+ x = np.array([1., 1., 1., -2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
129
+ y = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
130
+ m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
131
+ m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0, 0, 1]
132
+ xm = self.masked_array(x, mask=m1)
133
+ ym = self.masked_array(y, mask=m2)
134
+ xf = np.where(m1, 1.e+20, x)
135
+ xm.set_fill_value(1.e+20)
136
+
137
+ assert((xm-ym).filled(0).any())
138
+ s = x.shape
139
+ assert(xm.size == reduce(lambda x, y:x*y, s))
140
+ assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
141
+
142
+ for s in [(4, 3), (6, 2)]:
143
+ x.shape = s
144
+ y.shape = s
145
+ xm.shape = s
146
+ ym.shape = s
147
+ xf.shape = s
148
+ assert(self.count(xm) == len(m1) - reduce(lambda x, y:x+y, m1))
149
+
150
+ @np.errstate(all='ignore')
151
+ def test_2(self):
152
+ """
153
+ Tests conversions and indexing.
154
+
155
+ """
156
+ x1 = np.array([1, 2, 4, 3])
157
+ x2 = self.array(x1, mask=[1, 0, 0, 0])
158
+ x3 = self.array(x1, mask=[0, 1, 0, 1])
159
+ x4 = self.array(x1)
160
+ # test conversion to strings, no errors
161
+ str(x2)
162
+ repr(x2)
163
+ # tests of indexing
164
+ assert type(x2[1]) is type(x1[1])
165
+ assert x1[1] == x2[1]
166
+ x1[2] = 9
167
+ x2[2] = 9
168
+ self.assert_array_equal(x1, x2)
169
+ x1[1:3] = 99
170
+ x2[1:3] = 99
171
+ x2[1] = self.masked
172
+ x2[1:3] = self.masked
173
+ x2[:] = x1
174
+ x2[1] = self.masked
175
+ x3[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
176
+ x4[:] = self.masked_array([1, 2, 3, 4], [0, 1, 1, 0])
177
+ x1 = np.arange(5)*1.0
178
+ x2 = self.masked_values(x1, 3.0)
179
+ x1 = self.array([1, 'hello', 2, 3], object)
180
+ x2 = np.array([1, 'hello', 2, 3], object)
181
+ # check that no error occurs.
182
+ x1[1]
183
+ x2[1]
184
+ assert x1[1:1].shape == (0,)
185
+ # Tests copy-size
186
+ n = [0, 0, 1, 0, 0]
187
+ m = self.make_mask(n)
188
+ m2 = self.make_mask(m)
189
+ assert(m is m2)
190
+ m3 = self.make_mask(m, copy=1)
191
+ assert(m is not m3)
192
+
193
+ @np.errstate(all='ignore')
194
+ def test_3(self):
195
+ """
196
+ Tests resize/repeat
197
+
198
+ """
199
+ x4 = self.arange(4)
200
+ x4[2] = self.masked
201
+ y4 = self.resize(x4, (8,))
202
+ assert self.allequal(self.concatenate([x4, x4]), y4)
203
+ assert self.allequal(self.getmask(y4), [0, 0, 1, 0, 0, 0, 1, 0])
204
+ y5 = self.repeat(x4, (2, 2, 2, 2), axis=0)
205
+ self.assert_array_equal(y5, [0, 0, 1, 1, 2, 2, 3, 3])
206
+ y6 = self.repeat(x4, 2, axis=0)
207
+ assert self.allequal(y5, y6)
208
+ y7 = x4.repeat((2, 2, 2, 2), axis=0)
209
+ assert self.allequal(y5, y7)
210
+ y8 = x4.repeat(2, 0)
211
+ assert self.allequal(y5, y8)
212
+
213
+ @np.errstate(all='ignore')
214
+ def test_4(self):
215
+ """
216
+ Test of take, transpose, inner, outer products.
217
+
218
+ """
219
+ x = self.arange(24)
220
+ y = np.arange(24)
221
+ x[5:6] = self.masked
222
+ x = x.reshape(2, 3, 4)
223
+ y = y.reshape(2, 3, 4)
224
+ assert self.allequal(np.transpose(y, (2, 0, 1)), self.transpose(x, (2, 0, 1)))
225
+ assert self.allequal(np.take(y, (2, 0, 1), 1), self.take(x, (2, 0, 1), 1))
226
+ assert self.allequal(np.inner(self.filled(x, 0), self.filled(y, 0)),
227
+ self.inner(x, y))
228
+ assert self.allequal(np.outer(self.filled(x, 0), self.filled(y, 0)),
229
+ self.outer(x, y))
230
+ y = self.array(['abc', 1, 'def', 2, 3], object)
231
+ y[2] = self.masked
232
+ t = self.take(y, [0, 3, 4])
233
+ assert t[0] == 'abc'
234
+ assert t[1] == 2
235
+ assert t[2] == 3
236
+
237
+ @np.errstate(all='ignore')
238
+ def test_5(self):
239
+ """
240
+ Tests inplace w/ scalar
241
+
242
+ """
243
+ x = self.arange(10)
244
+ y = self.arange(10)
245
+ xm = self.arange(10)
246
+ xm[2] = self.masked
247
+ x += 1
248
+ assert self.allequal(x, y+1)
249
+ xm += 1
250
+ assert self.allequal(xm, y+1)
251
+
252
+ x = self.arange(10)
253
+ xm = self.arange(10)
254
+ xm[2] = self.masked
255
+ x -= 1
256
+ assert self.allequal(x, y-1)
257
+ xm -= 1
258
+ assert self.allequal(xm, y-1)
259
+
260
+ x = self.arange(10)*1.0
261
+ xm = self.arange(10)*1.0
262
+ xm[2] = self.masked
263
+ x *= 2.0
264
+ assert self.allequal(x, y*2)
265
+ xm *= 2.0
266
+ assert self.allequal(xm, y*2)
267
+
268
+ x = self.arange(10)*2
269
+ xm = self.arange(10)*2
270
+ xm[2] = self.masked
271
+ x /= 2
272
+ assert self.allequal(x, y)
273
+ xm /= 2
274
+ assert self.allequal(xm, y)
275
+
276
+ x = self.arange(10)*1.0
277
+ xm = self.arange(10)*1.0
278
+ xm[2] = self.masked
279
+ x /= 2.0
280
+ assert self.allequal(x, y/2.0)
281
+ xm /= self.arange(10)
282
+ self.assert_array_equal(xm, self.ones((10,)))
283
+
284
+ x = self.arange(10).astype(float_)
285
+ xm = self.arange(10)
286
+ xm[2] = self.masked
287
+ x += 1.
288
+ assert self.allequal(x, y + 1.)
289
+
290
+ @np.errstate(all='ignore')
291
+ def test_6(self):
292
+ """
293
+ Tests inplace w/ array
294
+
295
+ """
296
+ x = self.arange(10, dtype=float_)
297
+ y = self.arange(10)
298
+ xm = self.arange(10, dtype=float_)
299
+ xm[2] = self.masked
300
+ m = xm.mask
301
+ a = self.arange(10, dtype=float_)
302
+ a[-1] = self.masked
303
+ x += a
304
+ xm += a
305
+ assert self.allequal(x, y+a)
306
+ assert self.allequal(xm, y+a)
307
+ assert self.allequal(xm.mask, self.mask_or(m, a.mask))
308
+
309
+ x = self.arange(10, dtype=float_)
310
+ xm = self.arange(10, dtype=float_)
311
+ xm[2] = self.masked
312
+ m = xm.mask
313
+ a = self.arange(10, dtype=float_)
314
+ a[-1] = self.masked
315
+ x -= a
316
+ xm -= a
317
+ assert self.allequal(x, y-a)
318
+ assert self.allequal(xm, y-a)
319
+ assert self.allequal(xm.mask, self.mask_or(m, a.mask))
320
+
321
+ x = self.arange(10, dtype=float_)
322
+ xm = self.arange(10, dtype=float_)
323
+ xm[2] = self.masked
324
+ m = xm.mask
325
+ a = self.arange(10, dtype=float_)
326
+ a[-1] = self.masked
327
+ x *= a
328
+ xm *= a
329
+ assert self.allequal(x, y*a)
330
+ assert self.allequal(xm, y*a)
331
+ assert self.allequal(xm.mask, self.mask_or(m, a.mask))
332
+
333
+ x = self.arange(10, dtype=float_)
334
+ xm = self.arange(10, dtype=float_)
335
+ xm[2] = self.masked
336
+ m = xm.mask
337
+ a = self.arange(10, dtype=float_)
338
+ a[-1] = self.masked
339
+ x /= a
340
+ xm /= a
341
+
342
+ @np.errstate(all='ignore')
343
+ def test_7(self):
344
+ "Tests ufunc"
345
+ d = (self.array([1.0, 0, -1, pi/2]*2, mask=[0, 1]+[0]*6),
346
+ self.array([1.0, 0, -1, pi/2]*2, mask=[1, 0]+[0]*6),)
347
+ for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
348
+ # 'sin', 'cos', 'tan',
349
+ # 'arcsin', 'arccos', 'arctan',
350
+ # 'sinh', 'cosh', 'tanh',
351
+ # 'arcsinh',
352
+ # 'arccosh',
353
+ # 'arctanh',
354
+ # 'absolute', 'fabs', 'negative',
355
+ # # 'nonzero', 'around',
356
+ # 'floor', 'ceil',
357
+ # # 'sometrue', 'alltrue',
358
+ # 'logical_not',
359
+ # 'add', 'subtract', 'multiply',
360
+ # 'divide', 'true_divide', 'floor_divide',
361
+ # 'remainder', 'fmod', 'hypot', 'arctan2',
362
+ # 'equal', 'not_equal', 'less_equal', 'greater_equal',
363
+ # 'less', 'greater',
364
+ # 'logical_and', 'logical_or', 'logical_xor',
365
+ ]:
366
+ try:
367
+ uf = getattr(self.umath, f)
368
+ except AttributeError:
369
+ uf = getattr(fromnumeric, f)
370
+ mf = getattr(self.module, f)
371
+ args = d[:uf.nin]
372
+ ur = uf(*args)
373
+ mr = mf(*args)
374
+ self.assert_array_equal(ur.filled(0), mr.filled(0), f)
375
+ self.assert_array_equal(ur._mask, mr._mask)
376
+
377
+ @np.errstate(all='ignore')
378
+ def test_99(self):
379
+ # test average
380
+ ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
381
+ self.assert_array_equal(2.0, self.average(ott, axis=0))
382
+ self.assert_array_equal(2.0, self.average(ott, weights=[1., 1., 2., 1.]))
383
+ result, wts = self.average(ott, weights=[1., 1., 2., 1.], returned=1)
384
+ self.assert_array_equal(2.0, result)
385
+ assert(wts == 4.0)
386
+ ott[:] = self.masked
387
+ assert(self.average(ott, axis=0) is self.masked)
388
+ ott = self.array([0., 1., 2., 3.], mask=[1, 0, 0, 0])
389
+ ott = ott.reshape(2, 2)
390
+ ott[:, 1] = self.masked
391
+ self.assert_array_equal(self.average(ott, axis=0), [2.0, 0.0])
392
+ assert(self.average(ott, axis=1)[0] is self.masked)
393
+ self.assert_array_equal([2., 0.], self.average(ott, axis=0))
394
+ result, wts = self.average(ott, axis=0, returned=1)
395
+ self.assert_array_equal(wts, [1., 0.])
396
+ w1 = [0, 1, 1, 1, 1, 0]
397
+ w2 = [[0, 1, 1, 1, 1, 0], [1, 0, 0, 0, 0, 1]]
398
+ x = self.arange(6)
399
+ self.assert_array_equal(self.average(x, axis=0), 2.5)
400
+ self.assert_array_equal(self.average(x, axis=0, weights=w1), 2.5)
401
+ y = self.array([self.arange(6), 2.0*self.arange(6)])
402
+ self.assert_array_equal(self.average(y, None), np.add.reduce(np.arange(6))*3./12.)
403
+ self.assert_array_equal(self.average(y, axis=0), np.arange(6) * 3./2.)
404
+ self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
405
+ self.assert_array_equal(self.average(y, None, weights=w2), 20./6.)
406
+ self.assert_array_equal(self.average(y, axis=0, weights=w2), [0., 1., 2., 3., 4., 10.])
407
+ self.assert_array_equal(self.average(y, axis=1), [self.average(x, axis=0), self.average(x, axis=0) * 2.0])
408
+ m1 = self.zeros(6)
409
+ m2 = [0, 0, 1, 1, 0, 0]
410
+ m3 = [[0, 0, 1, 1, 0, 0], [0, 1, 1, 1, 1, 0]]
411
+ m4 = self.ones(6)
412
+ m5 = [0, 1, 1, 1, 1, 1]
413
+ self.assert_array_equal(self.average(self.masked_array(x, m1), axis=0), 2.5)
414
+ self.assert_array_equal(self.average(self.masked_array(x, m2), axis=0), 2.5)
415
+ self.assert_array_equal(self.average(self.masked_array(x, m5), axis=0), 0.0)
416
+ self.assert_array_equal(self.count(self.average(self.masked_array(x, m4), axis=0)), 0)
417
+ z = self.masked_array(y, m3)
418
+ self.assert_array_equal(self.average(z, None), 20./6.)
419
+ self.assert_array_equal(self.average(z, axis=0), [0., 1., 99., 99., 4.0, 7.5])
420
+ self.assert_array_equal(self.average(z, axis=1), [2.5, 5.0])
421
+ self.assert_array_equal(self.average(z, axis=0, weights=w2), [0., 1., 99., 99., 4.0, 10.0])
422
+
423
+ @np.errstate(all='ignore')
424
+ def test_A(self):
425
+ x = self.arange(24)
426
+ x[5:6] = self.masked
427
+ x = x.reshape(2, 3, 4)
428
+
429
+
430
+ if __name__ == '__main__':
431
+ setup_base = ("from __main__ import ModuleTester \n"
432
+ "import numpy\n"
433
+ "tester = ModuleTester(module)\n")
434
+ setup_cur = "import numpy.ma.core as module\n" + setup_base
435
+ (nrepeat, nloop) = (10, 10)
436
+
437
+ for i in range(1, 8):
438
+ func = 'tester.test_%i()' % i
439
+ cur = timeit.Timer(func, setup_cur).repeat(nrepeat, nloop*10)
440
+ cur = np.sort(cur)
441
+ print("#%i" % i + 50*'.')
442
+ print(eval("ModuleTester.test_%i.__doc__" % i))
443
+ print(f'core_current : {cur[0]:.3f} - {cur[1]:.3f}')
parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/abstract_nodes.cpython-310.pyc ADDED
Binary file (937 Bytes). View file
 
parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/approximations.cpython-310.pyc ADDED
Binary file (6.32 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/cfunctions.cpython-310.pyc ADDED
Binary file (14.1 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/cnodes.cpython-310.pyc ADDED
Binary file (4.88 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/cutils.cpython-310.pyc ADDED
Binary file (704 Bytes). View file
 
parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/fnodes.cpython-310.pyc ADDED
Binary file (21.2 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/codegen/__pycache__/matrix_nodes.cpython-310.pyc ADDED
Binary file (2.75 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/codegen/cutils.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ from sympy.printing.c import C99CodePrinter
2
+
3
+ def render_as_source_file(content, Printer=C99CodePrinter, settings=None):
4
+ """ Renders a C source file (with required #include statements) """
5
+ printer = Printer(settings or {})
6
+ code_str = printer.doprint(content)
7
+ includes = '\n'.join(['#include <%s>' % h for h in printer.headers])
8
+ return includes + '\n\n' + code_str
parrot/lib/python3.10/site-packages/sympy/core/operations.py ADDED
@@ -0,0 +1,718 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from operator import attrgetter
3
+ from collections import defaultdict
4
+
5
+ from sympy.utilities.exceptions import sympy_deprecation_warning
6
+
7
+ from .sympify import _sympify as _sympify_, sympify
8
+ from .basic import Basic
9
+ from .cache import cacheit
10
+ from .sorting import ordered
11
+ from .logic import fuzzy_and
12
+ from .parameters import global_parameters
13
+ from sympy.utilities.iterables import sift
14
+ from sympy.multipledispatch.dispatcher import (Dispatcher,
15
+ ambiguity_register_error_ignore_dup,
16
+ str_signature, RaiseNotImplementedError)
17
+
18
+
19
+ class AssocOp(Basic):
20
+ """ Associative operations, can separate noncommutative and
21
+ commutative parts.
22
+
23
+ (a op b) op c == a op (b op c) == a op b op c.
24
+
25
+ Base class for Add and Mul.
26
+
27
+ This is an abstract base class, concrete derived classes must define
28
+ the attribute `identity`.
29
+
30
+ .. deprecated:: 1.7
31
+
32
+ Using arguments that aren't subclasses of :class:`~.Expr` in core
33
+ operators (:class:`~.Mul`, :class:`~.Add`, and :class:`~.Pow`) is
34
+ deprecated. See :ref:`non-expr-args-deprecated` for details.
35
+
36
+ Parameters
37
+ ==========
38
+
39
+ *args :
40
+ Arguments which are operated
41
+
42
+ evaluate : bool, optional
43
+ Evaluate the operation. If not passed, refer to ``global_parameters.evaluate``.
44
+ """
45
+
46
+ # for performance reason, we don't let is_commutative go to assumptions,
47
+ # and keep it right here
48
+ __slots__: tuple[str, ...] = ('is_commutative',)
49
+
50
+ _args_type: type[Basic] | None = None
51
+
52
+ @cacheit
53
+ def __new__(cls, *args, evaluate=None, _sympify=True):
54
+ # Allow faster processing by passing ``_sympify=False``, if all arguments
55
+ # are already sympified.
56
+ if _sympify:
57
+ args = list(map(_sympify_, args))
58
+
59
+ # Disallow non-Expr args in Add/Mul
60
+ typ = cls._args_type
61
+ if typ is not None:
62
+ from .relational import Relational
63
+ if any(isinstance(arg, Relational) for arg in args):
64
+ raise TypeError("Relational cannot be used in %s" % cls.__name__)
65
+
66
+ # This should raise TypeError once deprecation period is over:
67
+ for arg in args:
68
+ if not isinstance(arg, typ):
69
+ sympy_deprecation_warning(
70
+ f"""
71
+
72
+ Using non-Expr arguments in {cls.__name__} is deprecated (in this case, one of
73
+ the arguments has type {type(arg).__name__!r}).
74
+
75
+ If you really did intend to use a multiplication or addition operation with
76
+ this object, use the * or + operator instead.
77
+
78
+ """,
79
+ deprecated_since_version="1.7",
80
+ active_deprecations_target="non-expr-args-deprecated",
81
+ stacklevel=4,
82
+ )
83
+
84
+ if evaluate is None:
85
+ evaluate = global_parameters.evaluate
86
+ if not evaluate:
87
+ obj = cls._from_args(args)
88
+ obj = cls._exec_constructor_postprocessors(obj)
89
+ return obj
90
+
91
+ args = [a for a in args if a is not cls.identity]
92
+
93
+ if len(args) == 0:
94
+ return cls.identity
95
+ if len(args) == 1:
96
+ return args[0]
97
+
98
+ c_part, nc_part, order_symbols = cls.flatten(args)
99
+ is_commutative = not nc_part
100
+ obj = cls._from_args(c_part + nc_part, is_commutative)
101
+ obj = cls._exec_constructor_postprocessors(obj)
102
+
103
+ if order_symbols is not None:
104
+ from sympy.series.order import Order
105
+ return Order(obj, *order_symbols)
106
+ return obj
107
+
108
+ @classmethod
109
+ def _from_args(cls, args, is_commutative=None):
110
+ """Create new instance with already-processed args.
111
+ If the args are not in canonical order, then a non-canonical
112
+ result will be returned, so use with caution. The order of
113
+ args may change if the sign of the args is changed."""
114
+ if len(args) == 0:
115
+ return cls.identity
116
+ elif len(args) == 1:
117
+ return args[0]
118
+
119
+ obj = super().__new__(cls, *args)
120
+ if is_commutative is None:
121
+ is_commutative = fuzzy_and(a.is_commutative for a in args)
122
+ obj.is_commutative = is_commutative
123
+ return obj
124
+
125
+ def _new_rawargs(self, *args, reeval=True, **kwargs):
126
+ """Create new instance of own class with args exactly as provided by
127
+ caller but returning the self class identity if args is empty.
128
+
129
+ Examples
130
+ ========
131
+
132
+ This is handy when we want to optimize things, e.g.
133
+
134
+ >>> from sympy import Mul, S
135
+ >>> from sympy.abc import x, y
136
+ >>> e = Mul(3, x, y)
137
+ >>> e.args
138
+ (3, x, y)
139
+ >>> Mul(*e.args[1:])
140
+ x*y
141
+ >>> e._new_rawargs(*e.args[1:]) # the same as above, but faster
142
+ x*y
143
+
144
+ Note: use this with caution. There is no checking of arguments at
145
+ all. This is best used when you are rebuilding an Add or Mul after
146
+ simply removing one or more args. If, for example, modifications,
147
+ result in extra 1s being inserted they will show up in the result:
148
+
149
+ >>> m = (x*y)._new_rawargs(S.One, x); m
150
+ 1*x
151
+ >>> m == x
152
+ False
153
+ >>> m.is_Mul
154
+ True
155
+
156
+ Another issue to be aware of is that the commutativity of the result
157
+ is based on the commutativity of self. If you are rebuilding the
158
+ terms that came from a commutative object then there will be no
159
+ problem, but if self was non-commutative then what you are
160
+ rebuilding may now be commutative.
161
+
162
+ Although this routine tries to do as little as possible with the
163
+ input, getting the commutativity right is important, so this level
164
+ of safety is enforced: commutativity will always be recomputed if
165
+ self is non-commutative and kwarg `reeval=False` has not been
166
+ passed.
167
+ """
168
+ if reeval and self.is_commutative is False:
169
+ is_commutative = None
170
+ else:
171
+ is_commutative = self.is_commutative
172
+ return self._from_args(args, is_commutative)
173
+
174
+ @classmethod
175
+ def flatten(cls, seq):
176
+ """Return seq so that none of the elements are of type `cls`. This is
177
+ the vanilla routine that will be used if a class derived from AssocOp
178
+ does not define its own flatten routine."""
179
+ # apply associativity, no commutativity property is used
180
+ new_seq = []
181
+ while seq:
182
+ o = seq.pop()
183
+ if o.__class__ is cls: # classes must match exactly
184
+ seq.extend(o.args)
185
+ else:
186
+ new_seq.append(o)
187
+ new_seq.reverse()
188
+
189
+ # c_part, nc_part, order_symbols
190
+ return [], new_seq, None
191
+
192
+ def _matches_commutative(self, expr, repl_dict=None, old=False):
193
+ """
194
+ Matches Add/Mul "pattern" to an expression "expr".
195
+
196
+ repl_dict ... a dictionary of (wild: expression) pairs, that get
197
+ returned with the results
198
+
199
+ This function is the main workhorse for Add/Mul.
200
+
201
+ Examples
202
+ ========
203
+
204
+ >>> from sympy import symbols, Wild, sin
205
+ >>> a = Wild("a")
206
+ >>> b = Wild("b")
207
+ >>> c = Wild("c")
208
+ >>> x, y, z = symbols("x y z")
209
+ >>> (a+sin(b)*c)._matches_commutative(x+sin(y)*z)
210
+ {a_: x, b_: y, c_: z}
211
+
212
+ In the example above, "a+sin(b)*c" is the pattern, and "x+sin(y)*z" is
213
+ the expression.
214
+
215
+ The repl_dict contains parts that were already matched. For example
216
+ here:
217
+
218
+ >>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z, repl_dict={a: x})
219
+ {a_: x, b_: y, c_: z}
220
+
221
+ the only function of the repl_dict is to return it in the
222
+ result, e.g. if you omit it:
223
+
224
+ >>> (x+sin(b)*c)._matches_commutative(x+sin(y)*z)
225
+ {b_: y, c_: z}
226
+
227
+ the "a: x" is not returned in the result, but otherwise it is
228
+ equivalent.
229
+
230
+ """
231
+ from .function import _coeff_isneg
232
+ # make sure expr is Expr if pattern is Expr
233
+ from .expr import Expr
234
+ if isinstance(self, Expr) and not isinstance(expr, Expr):
235
+ return None
236
+
237
+ if repl_dict is None:
238
+ repl_dict = {}
239
+
240
+ # handle simple patterns
241
+ if self == expr:
242
+ return repl_dict
243
+
244
+ d = self._matches_simple(expr, repl_dict)
245
+ if d is not None:
246
+ return d
247
+
248
+ # eliminate exact part from pattern: (2+a+w1+w2).matches(expr) -> (w1+w2).matches(expr-a-2)
249
+ from .function import WildFunction
250
+ from .symbol import Wild
251
+ wild_part, exact_part = sift(self.args, lambda p:
252
+ p.has(Wild, WildFunction) and not expr.has(p),
253
+ binary=True)
254
+ if not exact_part:
255
+ wild_part = list(ordered(wild_part))
256
+ if self.is_Add:
257
+ # in addition to normal ordered keys, impose
258
+ # sorting on Muls with leading Number to put
259
+ # them in order
260
+ wild_part = sorted(wild_part, key=lambda x:
261
+ x.args[0] if x.is_Mul and x.args[0].is_Number else
262
+ 0)
263
+ else:
264
+ exact = self._new_rawargs(*exact_part)
265
+ free = expr.free_symbols
266
+ if free and (exact.free_symbols - free):
267
+ # there are symbols in the exact part that are not
268
+ # in the expr; but if there are no free symbols, let
269
+ # the matching continue
270
+ return None
271
+ newexpr = self._combine_inverse(expr, exact)
272
+ if not old and (expr.is_Add or expr.is_Mul):
273
+ check = newexpr
274
+ if _coeff_isneg(check):
275
+ check = -check
276
+ if check.count_ops() > expr.count_ops():
277
+ return None
278
+ newpattern = self._new_rawargs(*wild_part)
279
+ return newpattern.matches(newexpr, repl_dict)
280
+
281
+ # now to real work ;)
282
+ i = 0
283
+ saw = set()
284
+ while expr not in saw:
285
+ saw.add(expr)
286
+ args = tuple(ordered(self.make_args(expr)))
287
+ if self.is_Add and expr.is_Add:
288
+ # in addition to normal ordered keys, impose
289
+ # sorting on Muls with leading Number to put
290
+ # them in order
291
+ args = tuple(sorted(args, key=lambda x:
292
+ x.args[0] if x.is_Mul and x.args[0].is_Number else
293
+ 0))
294
+ expr_list = (self.identity,) + args
295
+ for last_op in reversed(expr_list):
296
+ for w in reversed(wild_part):
297
+ d1 = w.matches(last_op, repl_dict)
298
+ if d1 is not None:
299
+ d2 = self.xreplace(d1).matches(expr, d1)
300
+ if d2 is not None:
301
+ return d2
302
+
303
+ if i == 0:
304
+ if self.is_Mul:
305
+ # make e**i look like Mul
306
+ if expr.is_Pow and expr.exp.is_Integer:
307
+ from .mul import Mul
308
+ if expr.exp > 0:
309
+ expr = Mul(*[expr.base, expr.base**(expr.exp - 1)], evaluate=False)
310
+ else:
311
+ expr = Mul(*[1/expr.base, expr.base**(expr.exp + 1)], evaluate=False)
312
+ i += 1
313
+ continue
314
+
315
+ elif self.is_Add:
316
+ # make i*e look like Add
317
+ c, e = expr.as_coeff_Mul()
318
+ if abs(c) > 1:
319
+ from .add import Add
320
+ if c > 0:
321
+ expr = Add(*[e, (c - 1)*e], evaluate=False)
322
+ else:
323
+ expr = Add(*[-e, (c + 1)*e], evaluate=False)
324
+ i += 1
325
+ continue
326
+
327
+ # try collection on non-Wild symbols
328
+ from sympy.simplify.radsimp import collect
329
+ was = expr
330
+ did = set()
331
+ for w in reversed(wild_part):
332
+ c, w = w.as_coeff_mul(Wild)
333
+ free = c.free_symbols - did
334
+ if free:
335
+ did.update(free)
336
+ expr = collect(expr, free)
337
+ if expr != was:
338
+ i += 0
339
+ continue
340
+
341
+ break # if we didn't continue, there is nothing more to do
342
+
343
+ return
344
+
345
+ def _has_matcher(self):
346
+ """Helper for .has() that checks for containment of
347
+ subexpressions within an expr by using sets of args
348
+ of similar nodes, e.g. x + 1 in x + y + 1 checks
349
+ to see that {x, 1} & {x, y, 1} == {x, 1}
350
+ """
351
+ def _ncsplit(expr):
352
+ # this is not the same as args_cnc because here
353
+ # we don't assume expr is a Mul -- hence deal with args --
354
+ # and always return a set.
355
+ cpart, ncpart = sift(expr.args,
356
+ lambda arg: arg.is_commutative is True, binary=True)
357
+ return set(cpart), ncpart
358
+
359
+ c, nc = _ncsplit(self)
360
+ cls = self.__class__
361
+
362
+ def is_in(expr):
363
+ if isinstance(expr, cls):
364
+ if expr == self:
365
+ return True
366
+ _c, _nc = _ncsplit(expr)
367
+ if (c & _c) == c:
368
+ if not nc:
369
+ return True
370
+ elif len(nc) <= len(_nc):
371
+ for i in range(len(_nc) - len(nc) + 1):
372
+ if _nc[i:i + len(nc)] == nc:
373
+ return True
374
+ return False
375
+ return is_in
376
+
377
+ def _eval_evalf(self, prec):
378
+ """
379
+ Evaluate the parts of self that are numbers; if the whole thing
380
+ was a number with no functions it would have been evaluated, but
381
+ it wasn't so we must judiciously extract the numbers and reconstruct
382
+ the object. This is *not* simply replacing numbers with evaluated
383
+ numbers. Numbers should be handled in the largest pure-number
384
+ expression as possible. So the code below separates ``self`` into
385
+ number and non-number parts and evaluates the number parts and
386
+ walks the args of the non-number part recursively (doing the same
387
+ thing).
388
+ """
389
+ from .add import Add
390
+ from .mul import Mul
391
+ from .symbol import Symbol
392
+ from .function import AppliedUndef
393
+ if isinstance(self, (Mul, Add)):
394
+ x, tail = self.as_independent(Symbol, AppliedUndef)
395
+ # if x is an AssocOp Function then the _evalf below will
396
+ # call _eval_evalf (here) so we must break the recursion
397
+ if not (tail is self.identity or
398
+ isinstance(x, AssocOp) and x.is_Function or
399
+ x is self.identity and isinstance(tail, AssocOp)):
400
+ # here, we have a number so we just call to _evalf with prec;
401
+ # prec is not the same as n, it is the binary precision so
402
+ # that's why we don't call to evalf.
403
+ x = x._evalf(prec) if x is not self.identity else self.identity
404
+ args = []
405
+ tail_args = tuple(self.func.make_args(tail))
406
+ for a in tail_args:
407
+ # here we call to _eval_evalf since we don't know what we
408
+ # are dealing with and all other _eval_evalf routines should
409
+ # be doing the same thing (i.e. taking binary prec and
410
+ # finding the evalf-able args)
411
+ newa = a._eval_evalf(prec)
412
+ if newa is None:
413
+ args.append(a)
414
+ else:
415
+ args.append(newa)
416
+ return self.func(x, *args)
417
+
418
+ # this is the same as above, but there were no pure-number args to
419
+ # deal with
420
+ args = []
421
+ for a in self.args:
422
+ newa = a._eval_evalf(prec)
423
+ if newa is None:
424
+ args.append(a)
425
+ else:
426
+ args.append(newa)
427
+ return self.func(*args)
428
+
429
+ @classmethod
430
+ def make_args(cls, expr):
431
+ """
432
+ Return a sequence of elements `args` such that cls(*args) == expr
433
+
434
+ Examples
435
+ ========
436
+
437
+ >>> from sympy import Symbol, Mul, Add
438
+ >>> x, y = map(Symbol, 'xy')
439
+
440
+ >>> Mul.make_args(x*y)
441
+ (x, y)
442
+ >>> Add.make_args(x*y)
443
+ (x*y,)
444
+ >>> set(Add.make_args(x*y + y)) == set([y, x*y])
445
+ True
446
+
447
+ """
448
+ if isinstance(expr, cls):
449
+ return expr.args
450
+ else:
451
+ return (sympify(expr),)
452
+
453
+ def doit(self, **hints):
454
+ if hints.get('deep', True):
455
+ terms = [term.doit(**hints) for term in self.args]
456
+ else:
457
+ terms = self.args
458
+ return self.func(*terms, evaluate=True)
459
+
460
+ class ShortCircuit(Exception):
461
+ pass
462
+
463
+
464
+ class LatticeOp(AssocOp):
465
+ """
466
+ Join/meet operations of an algebraic lattice[1].
467
+
468
+ Explanation
469
+ ===========
470
+
471
+ These binary operations are associative (op(op(a, b), c) = op(a, op(b, c))),
472
+ commutative (op(a, b) = op(b, a)) and idempotent (op(a, a) = op(a) = a).
473
+ Common examples are AND, OR, Union, Intersection, max or min. They have an
474
+ identity element (op(identity, a) = a) and an absorbing element
475
+ conventionally called zero (op(zero, a) = zero).
476
+
477
+ This is an abstract base class, concrete derived classes must declare
478
+ attributes zero and identity. All defining properties are then respected.
479
+
480
+ Examples
481
+ ========
482
+
483
+ >>> from sympy import Integer
484
+ >>> from sympy.core.operations import LatticeOp
485
+ >>> class my_join(LatticeOp):
486
+ ... zero = Integer(0)
487
+ ... identity = Integer(1)
488
+ >>> my_join(2, 3) == my_join(3, 2)
489
+ True
490
+ >>> my_join(2, my_join(3, 4)) == my_join(2, 3, 4)
491
+ True
492
+ >>> my_join(0, 1, 4, 2, 3, 4)
493
+ 0
494
+ >>> my_join(1, 2)
495
+ 2
496
+
497
+ References
498
+ ==========
499
+
500
+ .. [1] https://en.wikipedia.org/wiki/Lattice_%28order%29
501
+ """
502
+
503
+ is_commutative = True
504
+
505
+ def __new__(cls, *args, **options):
506
+ args = (_sympify_(arg) for arg in args)
507
+
508
+ try:
509
+ # /!\ args is a generator and _new_args_filter
510
+ # must be careful to handle as such; this
511
+ # is done so short-circuiting can be done
512
+ # without having to sympify all values
513
+ _args = frozenset(cls._new_args_filter(args))
514
+ except ShortCircuit:
515
+ return sympify(cls.zero)
516
+ if not _args:
517
+ return sympify(cls.identity)
518
+ elif len(_args) == 1:
519
+ return set(_args).pop()
520
+ else:
521
+ # XXX in almost every other case for __new__, *_args is
522
+ # passed along, but the expectation here is for _args
523
+ obj = super(AssocOp, cls).__new__(cls, *ordered(_args))
524
+ obj._argset = _args
525
+ return obj
526
+
527
+ @classmethod
528
+ def _new_args_filter(cls, arg_sequence, call_cls=None):
529
+ """Generator filtering args"""
530
+ ncls = call_cls or cls
531
+ for arg in arg_sequence:
532
+ if arg == ncls.zero:
533
+ raise ShortCircuit(arg)
534
+ elif arg == ncls.identity:
535
+ continue
536
+ elif arg.func == ncls:
537
+ yield from arg.args
538
+ else:
539
+ yield arg
540
+
541
+ @classmethod
542
+ def make_args(cls, expr):
543
+ """
544
+ Return a set of args such that cls(*arg_set) == expr.
545
+ """
546
+ if isinstance(expr, cls):
547
+ return expr._argset
548
+ else:
549
+ return frozenset([sympify(expr)])
550
+
551
+
552
+ class AssocOpDispatcher:
553
+ """
554
+ Handler dispatcher for associative operators
555
+
556
+ .. notes::
557
+ This approach is experimental, and can be replaced or deleted in the future.
558
+ See https://github.com/sympy/sympy/pull/19463.
559
+
560
+ Explanation
561
+ ===========
562
+
563
+ If arguments of different types are passed, the classes which handle the operation for each type
564
+ are collected. Then, a class which performs the operation is selected by recursive binary dispatching.
565
+ Dispatching relation can be registered by ``register_handlerclass`` method.
566
+
567
+ Priority registration is unordered. You cannot make ``A*B`` and ``B*A`` refer to
568
+ different handler classes. All logic dealing with the order of arguments must be implemented
569
+ in the handler class.
570
+
571
+ Examples
572
+ ========
573
+
574
+ >>> from sympy import Add, Expr, Symbol
575
+ >>> from sympy.core.add import add
576
+
577
+ >>> class NewExpr(Expr):
578
+ ... @property
579
+ ... def _add_handler(self):
580
+ ... return NewAdd
581
+ >>> class NewAdd(NewExpr, Add):
582
+ ... pass
583
+ >>> add.register_handlerclass((Add, NewAdd), NewAdd)
584
+
585
+ >>> a, b = Symbol('a'), NewExpr()
586
+ >>> add(a, b) == NewAdd(a, b)
587
+ True
588
+
589
+ """
590
+ def __init__(self, name, doc=None):
591
+ self.name = name
592
+ self.doc = doc
593
+ self.handlerattr = "_%s_handler" % name
594
+ self._handlergetter = attrgetter(self.handlerattr)
595
+ self._dispatcher = Dispatcher(name)
596
+
597
+ def __repr__(self):
598
+ return "<dispatched %s>" % self.name
599
+
600
+ def register_handlerclass(self, classes, typ, on_ambiguity=ambiguity_register_error_ignore_dup):
601
+ """
602
+ Register the handler class for two classes, in both straight and reversed order.
603
+
604
+ Paramteters
605
+ ===========
606
+
607
+ classes : tuple of two types
608
+ Classes who are compared with each other.
609
+
610
+ typ:
611
+ Class which is registered to represent *cls1* and *cls2*.
612
+ Handler method of *self* must be implemented in this class.
613
+ """
614
+ if not len(classes) == 2:
615
+ raise RuntimeError(
616
+ "Only binary dispatch is supported, but got %s types: <%s>." % (
617
+ len(classes), str_signature(classes)
618
+ ))
619
+ if len(set(classes)) == 1:
620
+ raise RuntimeError(
621
+ "Duplicate types <%s> cannot be dispatched." % str_signature(classes)
622
+ )
623
+ self._dispatcher.add(tuple(classes), typ, on_ambiguity=on_ambiguity)
624
+ self._dispatcher.add(tuple(reversed(classes)), typ, on_ambiguity=on_ambiguity)
625
+
626
+ @cacheit
627
+ def __call__(self, *args, _sympify=True, **kwargs):
628
+ """
629
+ Parameters
630
+ ==========
631
+
632
+ *args :
633
+ Arguments which are operated
634
+ """
635
+ if _sympify:
636
+ args = tuple(map(_sympify_, args))
637
+ handlers = frozenset(map(self._handlergetter, args))
638
+
639
+ # no need to sympify again
640
+ return self.dispatch(handlers)(*args, _sympify=False, **kwargs)
641
+
642
+ @cacheit
643
+ def dispatch(self, handlers):
644
+ """
645
+ Select the handler class, and return its handler method.
646
+ """
647
+
648
+ # Quick exit for the case where all handlers are same
649
+ if len(handlers) == 1:
650
+ h, = handlers
651
+ if not isinstance(h, type):
652
+ raise RuntimeError("Handler {!r} is not a type.".format(h))
653
+ return h
654
+
655
+ # Recursively select with registered binary priority
656
+ for i, typ in enumerate(handlers):
657
+
658
+ if not isinstance(typ, type):
659
+ raise RuntimeError("Handler {!r} is not a type.".format(typ))
660
+
661
+ if i == 0:
662
+ handler = typ
663
+ else:
664
+ prev_handler = handler
665
+ handler = self._dispatcher.dispatch(prev_handler, typ)
666
+
667
+ if not isinstance(handler, type):
668
+ raise RuntimeError(
669
+ "Dispatcher for {!r} and {!r} must return a type, but got {!r}".format(
670
+ prev_handler, typ, handler
671
+ ))
672
+
673
+ # return handler class
674
+ return handler
675
+
676
+ @property
677
+ def __doc__(self):
678
+ docs = [
679
+ "Multiply dispatched associative operator: %s" % self.name,
680
+ "Note that support for this is experimental, see the docs for :class:`AssocOpDispatcher` for details"
681
+ ]
682
+
683
+ if self.doc:
684
+ docs.append(self.doc)
685
+
686
+ s = "Registered handler classes\n"
687
+ s += '=' * len(s)
688
+ docs.append(s)
689
+
690
+ amb_sigs = []
691
+
692
+ typ_sigs = defaultdict(list)
693
+ for sigs in self._dispatcher.ordering[::-1]:
694
+ key = self._dispatcher.funcs[sigs]
695
+ typ_sigs[key].append(sigs)
696
+
697
+ for typ, sigs in typ_sigs.items():
698
+
699
+ sigs_str = ', '.join('<%s>' % str_signature(sig) for sig in sigs)
700
+
701
+ if isinstance(typ, RaiseNotImplementedError):
702
+ amb_sigs.append(sigs_str)
703
+ continue
704
+
705
+ s = 'Inputs: %s\n' % sigs_str
706
+ s += '-' * len(s) + '\n'
707
+ s += typ.__name__
708
+ docs.append(s)
709
+
710
+ if amb_sigs:
711
+ s = "Ambiguous handler classes\n"
712
+ s += '=' * len(s)
713
+ docs.append(s)
714
+
715
+ s = '\n'.join(amb_sigs)
716
+ docs.append(s)
717
+
718
+ return '\n\n'.join(docs)
parrot/lib/python3.10/site-packages/sympy/core/random.py ADDED
@@ -0,0 +1,227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ When you need to use random numbers in SymPy library code, import from here
3
+ so there is only one generator working for SymPy. Imports from here should
4
+ behave the same as if they were being imported from Python's random module.
5
+ But only the routines currently used in SymPy are included here. To use others
6
+ import ``rng`` and access the method directly. For example, to capture the
7
+ current state of the generator use ``rng.getstate()``.
8
+
9
+ There is intentionally no Random to import from here. If you want
10
+ to control the state of the generator, import ``seed`` and call it
11
+ with or without an argument to set the state.
12
+
13
+ Examples
14
+ ========
15
+
16
+ >>> from sympy.core.random import random, seed
17
+ >>> assert random() < 1
18
+ >>> seed(1); a = random()
19
+ >>> b = random()
20
+ >>> seed(1); c = random()
21
+ >>> assert a == c
22
+ >>> assert a != b # remote possibility this will fail
23
+
24
+ """
25
+ from sympy.utilities.iterables import is_sequence
26
+ from sympy.utilities.misc import as_int
27
+
28
+ import random as _random
29
+ rng = _random.Random()
30
+
31
+ choice = rng.choice
32
+ random = rng.random
33
+ randint = rng.randint
34
+ randrange = rng.randrange
35
+ sample = rng.sample
36
+ # seed = rng.seed
37
+ shuffle = rng.shuffle
38
+ uniform = rng.uniform
39
+
40
+ _assumptions_rng = _random.Random()
41
+ _assumptions_shuffle = _assumptions_rng.shuffle
42
+
43
+
44
+ def seed(a=None, version=2):
45
+ rng.seed(a=a, version=version)
46
+ _assumptions_rng.seed(a=a, version=version)
47
+
48
+
49
+ def random_complex_number(a=2, b=-1, c=3, d=1, rational=False, tolerance=None):
50
+ """
51
+ Return a random complex number.
52
+
53
+ To reduce chance of hitting branch cuts or anything, we guarantee
54
+ b <= Im z <= d, a <= Re z <= c
55
+
56
+ When rational is True, a rational approximation to a random number
57
+ is obtained within specified tolerance, if any.
58
+ """
59
+ from sympy.core.numbers import I
60
+ from sympy.simplify.simplify import nsimplify
61
+ A, B = uniform(a, c), uniform(b, d)
62
+ if not rational:
63
+ return A + I*B
64
+ return (nsimplify(A, rational=True, tolerance=tolerance) +
65
+ I*nsimplify(B, rational=True, tolerance=tolerance))
66
+
67
+
68
+ def verify_numerically(f, g, z=None, tol=1.0e-6, a=2, b=-1, c=3, d=1):
69
+ """
70
+ Test numerically that f and g agree when evaluated in the argument z.
71
+
72
+ If z is None, all symbols will be tested. This routine does not test
73
+ whether there are Floats present with precision higher than 15 digits
74
+ so if there are, your results may not be what you expect due to round-
75
+ off errors.
76
+
77
+ Examples
78
+ ========
79
+
80
+ >>> from sympy import sin, cos
81
+ >>> from sympy.abc import x
82
+ >>> from sympy.core.random import verify_numerically as tn
83
+ >>> tn(sin(x)**2 + cos(x)**2, 1, x)
84
+ True
85
+ """
86
+ from sympy.core.symbol import Symbol
87
+ from sympy.core.sympify import sympify
88
+ from sympy.core.numbers import comp
89
+ f, g = (sympify(i) for i in (f, g))
90
+ if z is None:
91
+ z = f.free_symbols | g.free_symbols
92
+ elif isinstance(z, Symbol):
93
+ z = [z]
94
+ reps = list(zip(z, [random_complex_number(a, b, c, d) for _ in z]))
95
+ z1 = f.subs(reps).n()
96
+ z2 = g.subs(reps).n()
97
+ return comp(z1, z2, tol)
98
+
99
+
100
+ def test_derivative_numerically(f, z, tol=1.0e-6, a=2, b=-1, c=3, d=1):
101
+ """
102
+ Test numerically that the symbolically computed derivative of f
103
+ with respect to z is correct.
104
+
105
+ This routine does not test whether there are Floats present with
106
+ precision higher than 15 digits so if there are, your results may
107
+ not be what you expect due to round-off errors.
108
+
109
+ Examples
110
+ ========
111
+
112
+ >>> from sympy import sin
113
+ >>> from sympy.abc import x
114
+ >>> from sympy.core.random import test_derivative_numerically as td
115
+ >>> td(sin(x), x)
116
+ True
117
+ """
118
+ from sympy.core.numbers import comp
119
+ from sympy.core.function import Derivative
120
+ z0 = random_complex_number(a, b, c, d)
121
+ f1 = f.diff(z).subs(z, z0)
122
+ f2 = Derivative(f, z).doit_numerically(z0)
123
+ return comp(f1.n(), f2.n(), tol)
124
+
125
+
126
+ def _randrange(seed=None):
127
+ """Return a randrange generator.
128
+
129
+ ``seed`` can be
130
+
131
+ * None - return randomly seeded generator
132
+ * int - return a generator seeded with the int
133
+ * list - the values to be returned will be taken from the list
134
+ in the order given; the provided list is not modified.
135
+
136
+ Examples
137
+ ========
138
+
139
+ >>> from sympy.core.random import _randrange
140
+ >>> rr = _randrange()
141
+ >>> rr(1000) # doctest: +SKIP
142
+ 999
143
+ >>> rr = _randrange(3)
144
+ >>> rr(1000) # doctest: +SKIP
145
+ 238
146
+ >>> rr = _randrange([0, 5, 1, 3, 4])
147
+ >>> rr(3), rr(3)
148
+ (0, 1)
149
+ """
150
+ if seed is None:
151
+ return randrange
152
+ elif isinstance(seed, int):
153
+ rng.seed(seed)
154
+ return randrange
155
+ elif is_sequence(seed):
156
+ seed = list(seed) # make a copy
157
+ seed.reverse()
158
+
159
+ def give(a, b=None, seq=seed):
160
+ if b is None:
161
+ a, b = 0, a
162
+ a, b = as_int(a), as_int(b)
163
+ w = b - a
164
+ if w < 1:
165
+ raise ValueError('_randrange got empty range')
166
+ try:
167
+ x = seq.pop()
168
+ except IndexError:
169
+ raise ValueError('_randrange sequence was too short')
170
+ if a <= x < b:
171
+ return x
172
+ else:
173
+ return give(a, b, seq)
174
+ return give
175
+ else:
176
+ raise ValueError('_randrange got an unexpected seed')
177
+
178
+
179
+ def _randint(seed=None):
180
+ """Return a randint generator.
181
+
182
+ ``seed`` can be
183
+
184
+ * None - return randomly seeded generator
185
+ * int - return a generator seeded with the int
186
+ * list - the values to be returned will be taken from the list
187
+ in the order given; the provided list is not modified.
188
+
189
+ Examples
190
+ ========
191
+
192
+ >>> from sympy.core.random import _randint
193
+ >>> ri = _randint()
194
+ >>> ri(1, 1000) # doctest: +SKIP
195
+ 999
196
+ >>> ri = _randint(3)
197
+ >>> ri(1, 1000) # doctest: +SKIP
198
+ 238
199
+ >>> ri = _randint([0, 5, 1, 2, 4])
200
+ >>> ri(1, 3), ri(1, 3)
201
+ (1, 2)
202
+ """
203
+ if seed is None:
204
+ return randint
205
+ elif isinstance(seed, int):
206
+ rng.seed(seed)
207
+ return randint
208
+ elif is_sequence(seed):
209
+ seed = list(seed) # make a copy
210
+ seed.reverse()
211
+
212
+ def give(a, b, seq=seed):
213
+ a, b = as_int(a), as_int(b)
214
+ w = b - a
215
+ if w < 0:
216
+ raise ValueError('_randint got empty range')
217
+ try:
218
+ x = seq.pop()
219
+ except IndexError:
220
+ raise ValueError('_randint sequence was too short')
221
+ if a <= x <= b:
222
+ return x
223
+ else:
224
+ return give(a, b, seq)
225
+ return give
226
+ else:
227
+ raise ValueError('_randint got an unexpected seed')
parrot/lib/python3.10/site-packages/sympy/core/sorting.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from collections import defaultdict
2
+
3
+ from .sympify import sympify, SympifyError
4
+ from sympy.utilities.iterables import iterable, uniq
5
+
6
+
7
+ __all__ = ['default_sort_key', 'ordered']
8
+
9
+
10
+ def default_sort_key(item, order=None):
11
+ """Return a key that can be used for sorting.
12
+
13
+ The key has the structure:
14
+
15
+ (class_key, (len(args), args), exponent.sort_key(), coefficient)
16
+
17
+ This key is supplied by the sort_key routine of Basic objects when
18
+ ``item`` is a Basic object or an object (other than a string) that
19
+ sympifies to a Basic object. Otherwise, this function produces the
20
+ key.
21
+
22
+ The ``order`` argument is passed along to the sort_key routine and is
23
+ used to determine how the terms *within* an expression are ordered.
24
+ (See examples below) ``order`` options are: 'lex', 'grlex', 'grevlex',
25
+ and reversed values of the same (e.g. 'rev-lex'). The default order
26
+ value is None (which translates to 'lex').
27
+
28
+ Examples
29
+ ========
30
+
31
+ >>> from sympy import S, I, default_sort_key, sin, cos, sqrt
32
+ >>> from sympy.core.function import UndefinedFunction
33
+ >>> from sympy.abc import x
34
+
35
+ The following are equivalent ways of getting the key for an object:
36
+
37
+ >>> x.sort_key() == default_sort_key(x)
38
+ True
39
+
40
+ Here are some examples of the key that is produced:
41
+
42
+ >>> default_sort_key(UndefinedFunction('f'))
43
+ ((0, 0, 'UndefinedFunction'), (1, ('f',)), ((1, 0, 'Number'),
44
+ (0, ()), (), 1), 1)
45
+ >>> default_sort_key('1')
46
+ ((0, 0, 'str'), (1, ('1',)), ((1, 0, 'Number'), (0, ()), (), 1), 1)
47
+ >>> default_sort_key(S.One)
48
+ ((1, 0, 'Number'), (0, ()), (), 1)
49
+ >>> default_sort_key(2)
50
+ ((1, 0, 'Number'), (0, ()), (), 2)
51
+
52
+ While sort_key is a method only defined for SymPy objects,
53
+ default_sort_key will accept anything as an argument so it is
54
+ more robust as a sorting key. For the following, using key=
55
+ lambda i: i.sort_key() would fail because 2 does not have a sort_key
56
+ method; that's why default_sort_key is used. Note, that it also
57
+ handles sympification of non-string items likes ints:
58
+
59
+ >>> a = [2, I, -I]
60
+ >>> sorted(a, key=default_sort_key)
61
+ [2, -I, I]
62
+
63
+ The returned key can be used anywhere that a key can be specified for
64
+ a function, e.g. sort, min, max, etc...:
65
+
66
+ >>> a.sort(key=default_sort_key); a[0]
67
+ 2
68
+ >>> min(a, key=default_sort_key)
69
+ 2
70
+
71
+ Notes
72
+ =====
73
+
74
+ The key returned is useful for getting items into a canonical order
75
+ that will be the same across platforms. It is not directly useful for
76
+ sorting lists of expressions:
77
+
78
+ >>> a, b = x, 1/x
79
+
80
+ Since ``a`` has only 1 term, its value of sort_key is unaffected by
81
+ ``order``:
82
+
83
+ >>> a.sort_key() == a.sort_key('rev-lex')
84
+ True
85
+
86
+ If ``a`` and ``b`` are combined then the key will differ because there
87
+ are terms that can be ordered:
88
+
89
+ >>> eq = a + b
90
+ >>> eq.sort_key() == eq.sort_key('rev-lex')
91
+ False
92
+ >>> eq.as_ordered_terms()
93
+ [x, 1/x]
94
+ >>> eq.as_ordered_terms('rev-lex')
95
+ [1/x, x]
96
+
97
+ But since the keys for each of these terms are independent of ``order``'s
98
+ value, they do not sort differently when they appear separately in a list:
99
+
100
+ >>> sorted(eq.args, key=default_sort_key)
101
+ [1/x, x]
102
+ >>> sorted(eq.args, key=lambda i: default_sort_key(i, order='rev-lex'))
103
+ [1/x, x]
104
+
105
+ The order of terms obtained when using these keys is the order that would
106
+ be obtained if those terms were *factors* in a product.
107
+
108
+ Although it is useful for quickly putting expressions in canonical order,
109
+ it does not sort expressions based on their complexity defined by the
110
+ number of operations, power of variables and others:
111
+
112
+ >>> sorted([sin(x)*cos(x), sin(x)], key=default_sort_key)
113
+ [sin(x)*cos(x), sin(x)]
114
+ >>> sorted([x, x**2, sqrt(x), x**3], key=default_sort_key)
115
+ [sqrt(x), x, x**2, x**3]
116
+
117
+ See Also
118
+ ========
119
+
120
+ ordered, sympy.core.expr.Expr.as_ordered_factors, sympy.core.expr.Expr.as_ordered_terms
121
+
122
+ """
123
+ from .basic import Basic
124
+ from .singleton import S
125
+
126
+ if isinstance(item, Basic):
127
+ return item.sort_key(order=order)
128
+
129
+ if iterable(item, exclude=str):
130
+ if isinstance(item, dict):
131
+ args = item.items()
132
+ unordered = True
133
+ elif isinstance(item, set):
134
+ args = item
135
+ unordered = True
136
+ else:
137
+ # e.g. tuple, list
138
+ args = list(item)
139
+ unordered = False
140
+
141
+ args = [default_sort_key(arg, order=order) for arg in args]
142
+
143
+ if unordered:
144
+ # e.g. dict, set
145
+ args = sorted(args)
146
+
147
+ cls_index, args = 10, (len(args), tuple(args))
148
+ else:
149
+ if not isinstance(item, str):
150
+ try:
151
+ item = sympify(item, strict=True)
152
+ except SympifyError:
153
+ # e.g. lambda x: x
154
+ pass
155
+ else:
156
+ if isinstance(item, Basic):
157
+ # e.g int -> Integer
158
+ return default_sort_key(item)
159
+ # e.g. UndefinedFunction
160
+
161
+ # e.g. str
162
+ cls_index, args = 0, (1, (str(item),))
163
+
164
+ return (cls_index, 0, item.__class__.__name__
165
+ ), args, S.One.sort_key(), S.One
166
+
167
+
168
+ def _node_count(e):
169
+ # this not only counts nodes, it affirms that the
170
+ # args are Basic (i.e. have an args property). If
171
+ # some object has a non-Basic arg, it needs to be
172
+ # fixed since it is intended that all Basic args
173
+ # are of Basic type (though this is not easy to enforce).
174
+ if e.is_Float:
175
+ return 0.5
176
+ return 1 + sum(map(_node_count, e.args))
177
+
178
+
179
+ def _nodes(e):
180
+ """
181
+ A helper for ordered() which returns the node count of ``e`` which
182
+ for Basic objects is the number of Basic nodes in the expression tree
183
+ but for other objects is 1 (unless the object is an iterable or dict
184
+ for which the sum of nodes is returned).
185
+ """
186
+ from .basic import Basic
187
+ from .function import Derivative
188
+
189
+ if isinstance(e, Basic):
190
+ if isinstance(e, Derivative):
191
+ return _nodes(e.expr) + sum(i[1] if i[1].is_Number else
192
+ _nodes(i[1]) for i in e.variable_count)
193
+ return _node_count(e)
194
+ elif iterable(e):
195
+ return 1 + sum(_nodes(ei) for ei in e)
196
+ elif isinstance(e, dict):
197
+ return 1 + sum(_nodes(k) + _nodes(v) for k, v in e.items())
198
+ else:
199
+ return 1
200
+
201
+
202
+ def ordered(seq, keys=None, default=True, warn=False):
203
+ """Return an iterator of the seq where keys are used to break ties
204
+ in a conservative fashion: if, after applying a key, there are no
205
+ ties then no other keys will be computed.
206
+
207
+ Two default keys will be applied if 1) keys are not provided or
208
+ 2) the given keys do not resolve all ties (but only if ``default``
209
+ is True). The two keys are ``_nodes`` (which places smaller
210
+ expressions before large) and ``default_sort_key`` which (if the
211
+ ``sort_key`` for an object is defined properly) should resolve
212
+ any ties. This strategy is similar to sorting done by
213
+ ``Basic.compare``, but differs in that ``ordered`` never makes a
214
+ decision based on an objects name.
215
+
216
+ If ``warn`` is True then an error will be raised if there were no
217
+ keys remaining to break ties. This can be used if it was expected that
218
+ there should be no ties between items that are not identical.
219
+
220
+ Examples
221
+ ========
222
+
223
+ >>> from sympy import ordered, count_ops
224
+ >>> from sympy.abc import x, y
225
+
226
+ The count_ops is not sufficient to break ties in this list and the first
227
+ two items appear in their original order (i.e. the sorting is stable):
228
+
229
+ >>> list(ordered([y + 2, x + 2, x**2 + y + 3],
230
+ ... count_ops, default=False, warn=False))
231
+ ...
232
+ [y + 2, x + 2, x**2 + y + 3]
233
+
234
+ The default_sort_key allows the tie to be broken:
235
+
236
+ >>> list(ordered([y + 2, x + 2, x**2 + y + 3]))
237
+ ...
238
+ [x + 2, y + 2, x**2 + y + 3]
239
+
240
+ Here, sequences are sorted by length, then sum:
241
+
242
+ >>> seq, keys = [[[1, 2, 1], [0, 3, 1], [1, 1, 3], [2], [1]], [
243
+ ... lambda x: len(x),
244
+ ... lambda x: sum(x)]]
245
+ ...
246
+ >>> list(ordered(seq, keys, default=False, warn=False))
247
+ [[1], [2], [1, 2, 1], [0, 3, 1], [1, 1, 3]]
248
+
249
+ If ``warn`` is True, an error will be raised if there were not
250
+ enough keys to break ties:
251
+
252
+ >>> list(ordered(seq, keys, default=False, warn=True))
253
+ Traceback (most recent call last):
254
+ ...
255
+ ValueError: not enough keys to break ties
256
+
257
+
258
+ Notes
259
+ =====
260
+
261
+ The decorated sort is one of the fastest ways to sort a sequence for
262
+ which special item comparison is desired: the sequence is decorated,
263
+ sorted on the basis of the decoration (e.g. making all letters lower
264
+ case) and then undecorated. If one wants to break ties for items that
265
+ have the same decorated value, a second key can be used. But if the
266
+ second key is expensive to compute then it is inefficient to decorate
267
+ all items with both keys: only those items having identical first key
268
+ values need to be decorated. This function applies keys successively
269
+ only when needed to break ties. By yielding an iterator, use of the
270
+ tie-breaker is delayed as long as possible.
271
+
272
+ This function is best used in cases when use of the first key is
273
+ expected to be a good hashing function; if there are no unique hashes
274
+ from application of a key, then that key should not have been used. The
275
+ exception, however, is that even if there are many collisions, if the
276
+ first group is small and one does not need to process all items in the
277
+ list then time will not be wasted sorting what one was not interested
278
+ in. For example, if one were looking for the minimum in a list and
279
+ there were several criteria used to define the sort order, then this
280
+ function would be good at returning that quickly if the first group
281
+ of candidates is small relative to the number of items being processed.
282
+
283
+ """
284
+
285
+ d = defaultdict(list)
286
+ if keys:
287
+ if isinstance(keys, (list, tuple)):
288
+ keys = list(keys)
289
+ f = keys.pop(0)
290
+ else:
291
+ f = keys
292
+ keys = []
293
+ for a in seq:
294
+ d[f(a)].append(a)
295
+ else:
296
+ if not default:
297
+ raise ValueError('if default=False then keys must be provided')
298
+ d[None].extend(seq)
299
+
300
+ for k, value in sorted(d.items()):
301
+ if len(value) > 1:
302
+ if keys:
303
+ value = ordered(value, keys, default, warn)
304
+ elif default:
305
+ value = ordered(value, (_nodes, default_sort_key,),
306
+ default=False, warn=warn)
307
+ elif warn:
308
+ u = list(uniq(value))
309
+ if len(u) > 1:
310
+ raise ValueError(
311
+ 'not enough keys to break ties: %s' % u)
312
+ yield from value
parrot/lib/python3.10/site-packages/sympy/holonomic/__init__.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ r"""
2
+ The :py:mod:`~sympy.holonomic` module is intended to deal with holonomic functions along
3
+ with various operations on them like addition, multiplication, composition,
4
+ integration and differentiation. The module also implements various kinds of
5
+ conversions such as converting holonomic functions to a different form and the
6
+ other way around.
7
+ """
8
+
9
+ from .holonomic import (DifferentialOperator, HolonomicFunction, DifferentialOperators,
10
+ from_hyper, from_meijerg, expr_to_holonomic)
11
+ from .recurrence import RecurrenceOperators, RecurrenceOperator, HolonomicSequence
12
+
13
+ __all__ = [
14
+ 'DifferentialOperator', 'HolonomicFunction', 'DifferentialOperators',
15
+ 'from_hyper', 'from_meijerg', 'expr_to_holonomic',
16
+
17
+ 'RecurrenceOperators', 'RecurrenceOperator', 'HolonomicSequence',
18
+ ]
parrot/lib/python3.10/site-packages/sympy/holonomic/holonomic.py ADDED
@@ -0,0 +1,2790 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This module implements Holonomic Functions and
3
+ various operations on them.
4
+ """
5
+
6
+ from sympy.core import Add, Mul, Pow
7
+ from sympy.core.numbers import (NaN, Infinity, NegativeInfinity, Float, I, pi,
8
+ equal_valued, int_valued)
9
+ from sympy.core.singleton import S
10
+ from sympy.core.sorting import ordered
11
+ from sympy.core.symbol import Dummy, Symbol
12
+ from sympy.core.sympify import sympify
13
+ from sympy.functions.combinatorial.factorials import binomial, factorial, rf
14
+ from sympy.functions.elementary.exponential import exp_polar, exp, log
15
+ from sympy.functions.elementary.hyperbolic import (cosh, sinh)
16
+ from sympy.functions.elementary.miscellaneous import sqrt
17
+ from sympy.functions.elementary.trigonometric import (cos, sin, sinc)
18
+ from sympy.functions.special.error_functions import (Ci, Shi, Si, erf, erfc, erfi)
19
+ from sympy.functions.special.gamma_functions import gamma
20
+ from sympy.functions.special.hyper import hyper, meijerg
21
+ from sympy.integrals import meijerint
22
+ from sympy.matrices import Matrix
23
+ from sympy.polys.rings import PolyElement
24
+ from sympy.polys.fields import FracElement
25
+ from sympy.polys.domains import QQ, RR
26
+ from sympy.polys.polyclasses import DMF
27
+ from sympy.polys.polyroots import roots
28
+ from sympy.polys.polytools import Poly
29
+ from sympy.polys.matrices import DomainMatrix
30
+ from sympy.printing import sstr
31
+ from sympy.series.limits import limit
32
+ from sympy.series.order import Order
33
+ from sympy.simplify.hyperexpand import hyperexpand
34
+ from sympy.simplify.simplify import nsimplify
35
+ from sympy.solvers.solvers import solve
36
+
37
+ from .recurrence import HolonomicSequence, RecurrenceOperator, RecurrenceOperators
38
+ from .holonomicerrors import (NotPowerSeriesError, NotHyperSeriesError,
39
+ SingularityError, NotHolonomicError)
40
+
41
+
42
+ def _find_nonzero_solution(r, homosys):
43
+ ones = lambda shape: DomainMatrix.ones(shape, r.domain)
44
+ particular, nullspace = r._solve(homosys)
45
+ nullity = nullspace.shape[0]
46
+ nullpart = ones((1, nullity)) * nullspace
47
+ sol = (particular + nullpart).transpose()
48
+ return sol
49
+
50
+
51
+
52
+ def DifferentialOperators(base, generator):
53
+ r"""
54
+ This function is used to create annihilators using ``Dx``.
55
+
56
+ Explanation
57
+ ===========
58
+
59
+ Returns an Algebra of Differential Operators also called Weyl Algebra
60
+ and the operator for differentiation i.e. the ``Dx`` operator.
61
+
62
+ Parameters
63
+ ==========
64
+
65
+ base:
66
+ Base polynomial ring for the algebra.
67
+ The base polynomial ring is the ring of polynomials in :math:`x` that
68
+ will appear as coefficients in the operators.
69
+ generator:
70
+ Generator of the algebra which can
71
+ be either a noncommutative ``Symbol`` or a string. e.g. "Dx" or "D".
72
+
73
+ Examples
74
+ ========
75
+
76
+ >>> from sympy import ZZ
77
+ >>> from sympy.abc import x
78
+ >>> from sympy.holonomic.holonomic import DifferentialOperators
79
+ >>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
80
+ >>> R
81
+ Univariate Differential Operator Algebra in intermediate Dx over the base ring ZZ[x]
82
+ >>> Dx*x
83
+ (1) + (x)*Dx
84
+ """
85
+
86
+ ring = DifferentialOperatorAlgebra(base, generator)
87
+ return (ring, ring.derivative_operator)
88
+
89
+
90
+ class DifferentialOperatorAlgebra:
91
+ r"""
92
+ An Ore Algebra is a set of noncommutative polynomials in the
93
+ intermediate ``Dx`` and coefficients in a base polynomial ring :math:`A`.
94
+ It follows the commutation rule:
95
+
96
+ .. math ::
97
+ Dxa = \sigma(a)Dx + \delta(a)
98
+
99
+ for :math:`a \subset A`.
100
+
101
+ Where :math:`\sigma: A \Rightarrow A` is an endomorphism and :math:`\delta: A \rightarrow A`
102
+ is a skew-derivation i.e. :math:`\delta(ab) = \delta(a) b + \sigma(a) \delta(b)`.
103
+
104
+ If one takes the sigma as identity map and delta as the standard derivation
105
+ then it becomes the algebra of Differential Operators also called
106
+ a Weyl Algebra i.e. an algebra whose elements are Differential Operators.
107
+
108
+ This class represents a Weyl Algebra and serves as the parent ring for
109
+ Differential Operators.
110
+
111
+ Examples
112
+ ========
113
+
114
+ >>> from sympy import ZZ
115
+ >>> from sympy import symbols
116
+ >>> from sympy.holonomic.holonomic import DifferentialOperators
117
+ >>> x = symbols('x')
118
+ >>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x), 'Dx')
119
+ >>> R
120
+ Univariate Differential Operator Algebra in intermediate Dx over the base ring
121
+ ZZ[x]
122
+
123
+ See Also
124
+ ========
125
+
126
+ DifferentialOperator
127
+ """
128
+
129
+ def __init__(self, base, generator):
130
+ # the base polynomial ring for the algebra
131
+ self.base = base
132
+ # the operator representing differentiation i.e. `Dx`
133
+ self.derivative_operator = DifferentialOperator(
134
+ [base.zero, base.one], self)
135
+
136
+ if generator is None:
137
+ self.gen_symbol = Symbol('Dx', commutative=False)
138
+ else:
139
+ if isinstance(generator, str):
140
+ self.gen_symbol = Symbol(generator, commutative=False)
141
+ elif isinstance(generator, Symbol):
142
+ self.gen_symbol = generator
143
+
144
+ def __str__(self):
145
+ string = 'Univariate Differential Operator Algebra in intermediate '\
146
+ + sstr(self.gen_symbol) + ' over the base ring ' + \
147
+ (self.base).__str__()
148
+
149
+ return string
150
+
151
+ __repr__ = __str__
152
+
153
+ def __eq__(self, other):
154
+ return self.base == other.base and \
155
+ self.gen_symbol == other.gen_symbol
156
+
157
+
158
+ class DifferentialOperator:
159
+ """
160
+ Differential Operators are elements of Weyl Algebra. The Operators
161
+ are defined by a list of polynomials in the base ring and the
162
+ parent ring of the Operator i.e. the algebra it belongs to.
163
+
164
+ Explanation
165
+ ===========
166
+
167
+ Takes a list of polynomials for each power of ``Dx`` and the
168
+ parent ring which must be an instance of DifferentialOperatorAlgebra.
169
+
170
+ A Differential Operator can be created easily using
171
+ the operator ``Dx``. See examples below.
172
+
173
+ Examples
174
+ ========
175
+
176
+ >>> from sympy.holonomic.holonomic import DifferentialOperator, DifferentialOperators
177
+ >>> from sympy import ZZ
178
+ >>> from sympy import symbols
179
+ >>> x = symbols('x')
180
+ >>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
181
+
182
+ >>> DifferentialOperator([0, 1, x**2], R)
183
+ (1)*Dx + (x**2)*Dx**2
184
+
185
+ >>> (x*Dx*x + 1 - Dx**2)**2
186
+ (2*x**2 + 2*x + 1) + (4*x**3 + 2*x**2 - 4)*Dx + (x**4 - 6*x - 2)*Dx**2 + (-2*x**2)*Dx**3 + (1)*Dx**4
187
+
188
+ See Also
189
+ ========
190
+
191
+ DifferentialOperatorAlgebra
192
+ """
193
+
194
+ _op_priority = 20
195
+
196
+ def __init__(self, list_of_poly, parent):
197
+ """
198
+ Parameters
199
+ ==========
200
+
201
+ list_of_poly:
202
+ List of polynomials belonging to the base ring of the algebra.
203
+ parent:
204
+ Parent algebra of the operator.
205
+ """
206
+
207
+ # the parent ring for this operator
208
+ # must be an DifferentialOperatorAlgebra object
209
+ self.parent = parent
210
+ base = self.parent.base
211
+ self.x = base.gens[0] if isinstance(base.gens[0], Symbol) else base.gens[0][0]
212
+ # sequence of polynomials in x for each power of Dx
213
+ # the list should not have trailing zeroes
214
+ # represents the operator
215
+ # convert the expressions into ring elements using from_sympy
216
+ for i, j in enumerate(list_of_poly):
217
+ if not isinstance(j, base.dtype):
218
+ list_of_poly[i] = base.from_sympy(sympify(j))
219
+ else:
220
+ list_of_poly[i] = base.from_sympy(base.to_sympy(j))
221
+
222
+ self.listofpoly = list_of_poly
223
+ # highest power of `Dx`
224
+ self.order = len(self.listofpoly) - 1
225
+
226
+ def __mul__(self, other):
227
+ """
228
+ Multiplies two DifferentialOperator and returns another
229
+ DifferentialOperator instance using the commutation rule
230
+ Dx*a = a*Dx + a'
231
+ """
232
+
233
+ listofself = self.listofpoly
234
+ if isinstance(other, DifferentialOperator):
235
+ listofother = other.listofpoly
236
+ elif isinstance(other, self.parent.base.dtype):
237
+ listofother = [other]
238
+ else:
239
+ listofother = [self.parent.base.from_sympy(sympify(other))]
240
+
241
+ # multiplies a polynomial `b` with a list of polynomials
242
+ def _mul_dmp_diffop(b, listofother):
243
+ if isinstance(listofother, list):
244
+ return [i * b for i in listofother]
245
+ return [b * listofother]
246
+
247
+ sol = _mul_dmp_diffop(listofself[0], listofother)
248
+
249
+ # compute Dx^i * b
250
+ def _mul_Dxi_b(b):
251
+ sol1 = [self.parent.base.zero]
252
+ sol2 = []
253
+
254
+ if isinstance(b, list):
255
+ for i in b:
256
+ sol1.append(i)
257
+ sol2.append(i.diff())
258
+ else:
259
+ sol1.append(self.parent.base.from_sympy(b))
260
+ sol2.append(self.parent.base.from_sympy(b).diff())
261
+
262
+ return _add_lists(sol1, sol2)
263
+
264
+ for i in range(1, len(listofself)):
265
+ # find Dx^i * b in ith iteration
266
+ listofother = _mul_Dxi_b(listofother)
267
+ # solution = solution + listofself[i] * (Dx^i * b)
268
+ sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
269
+
270
+ return DifferentialOperator(sol, self.parent)
271
+
272
+ def __rmul__(self, other):
273
+ if not isinstance(other, DifferentialOperator):
274
+
275
+ if not isinstance(other, self.parent.base.dtype):
276
+ other = (self.parent.base).from_sympy(sympify(other))
277
+
278
+ sol = [other * j for j in self.listofpoly]
279
+ return DifferentialOperator(sol, self.parent)
280
+
281
+ def __add__(self, other):
282
+ if isinstance(other, DifferentialOperator):
283
+
284
+ sol = _add_lists(self.listofpoly, other.listofpoly)
285
+ return DifferentialOperator(sol, self.parent)
286
+
287
+ list_self = self.listofpoly
288
+ if not isinstance(other, self.parent.base.dtype):
289
+ list_other = [((self.parent).base).from_sympy(sympify(other))]
290
+ else:
291
+ list_other = [other]
292
+ sol = [list_self[0] + list_other[0]] + list_self[1:]
293
+ return DifferentialOperator(sol, self.parent)
294
+
295
+ __radd__ = __add__
296
+
297
+ def __sub__(self, other):
298
+ return self + (-1) * other
299
+
300
+ def __rsub__(self, other):
301
+ return (-1) * self + other
302
+
303
+ def __neg__(self):
304
+ return -1 * self
305
+
306
+ def __truediv__(self, other):
307
+ return self * (S.One / other)
308
+
309
+ def __pow__(self, n):
310
+ if n == 1:
311
+ return self
312
+ result = DifferentialOperator([self.parent.base.one], self.parent)
313
+ if n == 0:
314
+ return result
315
+ # if self is `Dx`
316
+ if self.listofpoly == self.parent.derivative_operator.listofpoly:
317
+ sol = [self.parent.base.zero]*n + [self.parent.base.one]
318
+ return DifferentialOperator(sol, self.parent)
319
+ x = self
320
+ while True:
321
+ if n % 2:
322
+ result *= x
323
+ n >>= 1
324
+ if not n:
325
+ break
326
+ x *= x
327
+ return result
328
+
329
+ def __str__(self):
330
+ listofpoly = self.listofpoly
331
+ print_str = ''
332
+
333
+ for i, j in enumerate(listofpoly):
334
+ if j == self.parent.base.zero:
335
+ continue
336
+
337
+ j = self.parent.base.to_sympy(j)
338
+
339
+ if i == 0:
340
+ print_str += '(' + sstr(j) + ')'
341
+ continue
342
+
343
+ if print_str:
344
+ print_str += ' + '
345
+
346
+ if i == 1:
347
+ print_str += '(' + sstr(j) + ')*%s' %(self.parent.gen_symbol)
348
+ continue
349
+
350
+ print_str += '(' + sstr(j) + ')' + '*%s**' %(self.parent.gen_symbol) + sstr(i)
351
+
352
+ return print_str
353
+
354
+ __repr__ = __str__
355
+
356
+ def __eq__(self, other):
357
+ if isinstance(other, DifferentialOperator):
358
+ return self.listofpoly == other.listofpoly and \
359
+ self.parent == other.parent
360
+ return self.listofpoly[0] == other and \
361
+ all(i is self.parent.base.zero for i in self.listofpoly[1:])
362
+
363
+ def is_singular(self, x0):
364
+ """
365
+ Checks if the differential equation is singular at x0.
366
+ """
367
+
368
+ base = self.parent.base
369
+ return x0 in roots(base.to_sympy(self.listofpoly[-1]), self.x)
370
+
371
+
372
+ class HolonomicFunction:
373
+ r"""
374
+ A Holonomic Function is a solution to a linear homogeneous ordinary
375
+ differential equation with polynomial coefficients. This differential
376
+ equation can also be represented by an annihilator i.e. a Differential
377
+ Operator ``L`` such that :math:`L.f = 0`. For uniqueness of these functions,
378
+ initial conditions can also be provided along with the annihilator.
379
+
380
+ Explanation
381
+ ===========
382
+
383
+ Holonomic functions have closure properties and thus forms a ring.
384
+ Given two Holonomic Functions f and g, their sum, product,
385
+ integral and derivative is also a Holonomic Function.
386
+
387
+ For ordinary points initial condition should be a vector of values of
388
+ the derivatives i.e. :math:`[y(x_0), y'(x_0), y''(x_0) ... ]`.
389
+
390
+ For regular singular points initial conditions can also be provided in this
391
+ format:
392
+ :math:`{s0: [C_0, C_1, ...], s1: [C^1_0, C^1_1, ...], ...}`
393
+ where s0, s1, ... are the roots of indicial equation and vectors
394
+ :math:`[C_0, C_1, ...], [C^0_0, C^0_1, ...], ...` are the corresponding initial
395
+ terms of the associated power series. See Examples below.
396
+
397
+ Examples
398
+ ========
399
+
400
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
401
+ >>> from sympy import QQ
402
+ >>> from sympy import symbols, S
403
+ >>> x = symbols('x')
404
+ >>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
405
+
406
+ >>> p = HolonomicFunction(Dx - 1, x, 0, [1]) # e^x
407
+ >>> q = HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]) # sin(x)
408
+
409
+ >>> p + q # annihilator of e^x + sin(x)
410
+ HolonomicFunction((-1) + (1)*Dx + (-1)*Dx**2 + (1)*Dx**3, x, 0, [1, 2, 1])
411
+
412
+ >>> p * q # annihilator of e^x * sin(x)
413
+ HolonomicFunction((2) + (-2)*Dx + (1)*Dx**2, x, 0, [0, 1])
414
+
415
+ An example of initial conditions for regular singular points,
416
+ the indicial equation has only one root `1/2`.
417
+
418
+ >>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]})
419
+ HolonomicFunction((-1/2) + (x)*Dx, x, 0, {1/2: [1]})
420
+
421
+ >>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_expr()
422
+ sqrt(x)
423
+
424
+ To plot a Holonomic Function, one can use `.evalf()` for numerical
425
+ computation. Here's an example on `sin(x)**2/x` using numpy and matplotlib.
426
+
427
+ >>> import sympy.holonomic # doctest: +SKIP
428
+ >>> from sympy import var, sin # doctest: +SKIP
429
+ >>> import matplotlib.pyplot as plt # doctest: +SKIP
430
+ >>> import numpy as np # doctest: +SKIP
431
+ >>> var("x") # doctest: +SKIP
432
+ >>> r = np.linspace(1, 5, 100) # doctest: +SKIP
433
+ >>> y = sympy.holonomic.expr_to_holonomic(sin(x)**2/x, x0=1).evalf(r) # doctest: +SKIP
434
+ >>> plt.plot(r, y, label="holonomic function") # doctest: +SKIP
435
+ >>> plt.show() # doctest: +SKIP
436
+
437
+ """
438
+
439
+ _op_priority = 20
440
+
441
+ def __init__(self, annihilator, x, x0=0, y0=None):
442
+ """
443
+
444
+ Parameters
445
+ ==========
446
+
447
+ annihilator:
448
+ Annihilator of the Holonomic Function, represented by a
449
+ `DifferentialOperator` object.
450
+ x:
451
+ Variable of the function.
452
+ x0:
453
+ The point at which initial conditions are stored.
454
+ Generally an integer.
455
+ y0:
456
+ The initial condition. The proper format for the initial condition
457
+ is described in class docstring. To make the function unique,
458
+ length of the vector `y0` should be equal to or greater than the
459
+ order of differential equation.
460
+ """
461
+
462
+ # initial condition
463
+ self.y0 = y0
464
+ # the point for initial conditions, default is zero.
465
+ self.x0 = x0
466
+ # differential operator L such that L.f = 0
467
+ self.annihilator = annihilator
468
+ self.x = x
469
+
470
+ def __str__(self):
471
+ if self._have_init_cond():
472
+ str_sol = 'HolonomicFunction(%s, %s, %s, %s)' % (str(self.annihilator),\
473
+ sstr(self.x), sstr(self.x0), sstr(self.y0))
474
+ else:
475
+ str_sol = 'HolonomicFunction(%s, %s)' % (str(self.annihilator),\
476
+ sstr(self.x))
477
+
478
+ return str_sol
479
+
480
+ __repr__ = __str__
481
+
482
+ def unify(self, other):
483
+ """
484
+ Unifies the base polynomial ring of a given two Holonomic
485
+ Functions.
486
+ """
487
+
488
+ R1 = self.annihilator.parent.base
489
+ R2 = other.annihilator.parent.base
490
+
491
+ dom1 = R1.dom
492
+ dom2 = R2.dom
493
+
494
+ if R1 == R2:
495
+ return (self, other)
496
+
497
+ R = (dom1.unify(dom2)).old_poly_ring(self.x)
498
+
499
+ newparent, _ = DifferentialOperators(R, str(self.annihilator.parent.gen_symbol))
500
+
501
+ sol1 = [R1.to_sympy(i) for i in self.annihilator.listofpoly]
502
+ sol2 = [R2.to_sympy(i) for i in other.annihilator.listofpoly]
503
+
504
+ sol1 = DifferentialOperator(sol1, newparent)
505
+ sol2 = DifferentialOperator(sol2, newparent)
506
+
507
+ sol1 = HolonomicFunction(sol1, self.x, self.x0, self.y0)
508
+ sol2 = HolonomicFunction(sol2, other.x, other.x0, other.y0)
509
+
510
+ return (sol1, sol2)
511
+
512
+ def is_singularics(self):
513
+ """
514
+ Returns True if the function have singular initial condition
515
+ in the dictionary format.
516
+
517
+ Returns False if the function have ordinary initial condition
518
+ in the list format.
519
+
520
+ Returns None for all other cases.
521
+ """
522
+
523
+ if isinstance(self.y0, dict):
524
+ return True
525
+ elif isinstance(self.y0, list):
526
+ return False
527
+
528
+ def _have_init_cond(self):
529
+ """
530
+ Checks if the function have initial condition.
531
+ """
532
+ return bool(self.y0)
533
+
534
+ def _singularics_to_ord(self):
535
+ """
536
+ Converts a singular initial condition to ordinary if possible.
537
+ """
538
+ a = list(self.y0)[0]
539
+ b = self.y0[a]
540
+
541
+ if len(self.y0) == 1 and a == int(a) and a > 0:
542
+ a = int(a)
543
+ y0 = [S.Zero] * a
544
+ y0 += [j * factorial(a + i) for i, j in enumerate(b)]
545
+
546
+ return HolonomicFunction(self.annihilator, self.x, self.x0, y0)
547
+
548
+ def __add__(self, other):
549
+ # if the ground domains are different
550
+ if self.annihilator.parent.base != other.annihilator.parent.base:
551
+ a, b = self.unify(other)
552
+ return a + b
553
+
554
+ deg1 = self.annihilator.order
555
+ deg2 = other.annihilator.order
556
+ dim = max(deg1, deg2)
557
+ R = self.annihilator.parent.base
558
+ K = R.get_field()
559
+
560
+ rowsself = [self.annihilator]
561
+ rowsother = [other.annihilator]
562
+ gen = self.annihilator.parent.derivative_operator
563
+
564
+ # constructing annihilators up to order dim
565
+ for i in range(dim - deg1):
566
+ diff1 = (gen * rowsself[-1])
567
+ rowsself.append(diff1)
568
+
569
+ for i in range(dim - deg2):
570
+ diff2 = (gen * rowsother[-1])
571
+ rowsother.append(diff2)
572
+
573
+ row = rowsself + rowsother
574
+
575
+ # constructing the matrix of the ansatz
576
+ r = []
577
+
578
+ for expr in row:
579
+ p = []
580
+ for i in range(dim + 1):
581
+ if i >= len(expr.listofpoly):
582
+ p.append(K.zero)
583
+ else:
584
+ p.append(K.new(expr.listofpoly[i].to_list()))
585
+ r.append(p)
586
+
587
+ # solving the linear system using gauss jordan solver
588
+ r = DomainMatrix(r, (len(row), dim+1), K).transpose()
589
+ homosys = DomainMatrix.zeros((dim+1, 1), K)
590
+ sol = _find_nonzero_solution(r, homosys)
591
+
592
+ # if a solution is not obtained then increasing the order by 1 in each
593
+ # iteration
594
+ while sol.is_zero_matrix:
595
+ dim += 1
596
+
597
+ diff1 = (gen * rowsself[-1])
598
+ rowsself.append(diff1)
599
+
600
+ diff2 = (gen * rowsother[-1])
601
+ rowsother.append(diff2)
602
+
603
+ row = rowsself + rowsother
604
+ r = []
605
+
606
+ for expr in row:
607
+ p = []
608
+ for i in range(dim + 1):
609
+ if i >= len(expr.listofpoly):
610
+ p.append(K.zero)
611
+ else:
612
+ p.append(K.new(expr.listofpoly[i].to_list()))
613
+ r.append(p)
614
+
615
+ # solving the linear system using gauss jordan solver
616
+ r = DomainMatrix(r, (len(row), dim+1), K).transpose()
617
+ homosys = DomainMatrix.zeros((dim+1, 1), K)
618
+ sol = _find_nonzero_solution(r, homosys)
619
+
620
+ # taking only the coefficients needed to multiply with `self`
621
+ # can be also be done the other way by taking R.H.S and multiplying with
622
+ # `other`
623
+ sol = sol.flat()[:dim + 1 - deg1]
624
+ sol1 = _normalize(sol, self.annihilator.parent)
625
+ # annihilator of the solution
626
+ sol = sol1 * (self.annihilator)
627
+ sol = _normalize(sol.listofpoly, self.annihilator.parent, negative=False)
628
+
629
+ if not (self._have_init_cond() and other._have_init_cond()):
630
+ return HolonomicFunction(sol, self.x)
631
+
632
+ # both the functions have ordinary initial conditions
633
+ if self.is_singularics() == False and other.is_singularics() == False:
634
+
635
+ # directly add the corresponding value
636
+ if self.x0 == other.x0:
637
+ # try to extended the initial conditions
638
+ # using the annihilator
639
+ y1 = _extend_y0(self, sol.order)
640
+ y2 = _extend_y0(other, sol.order)
641
+ y0 = [a + b for a, b in zip(y1, y2)]
642
+ return HolonomicFunction(sol, self.x, self.x0, y0)
643
+
644
+ # change the initial conditions to a same point
645
+ selfat0 = self.annihilator.is_singular(0)
646
+ otherat0 = other.annihilator.is_singular(0)
647
+ if self.x0 == 0 and not selfat0 and not otherat0:
648
+ return self + other.change_ics(0)
649
+ if other.x0 == 0 and not selfat0 and not otherat0:
650
+ return self.change_ics(0) + other
651
+
652
+ selfatx0 = self.annihilator.is_singular(self.x0)
653
+ otheratx0 = other.annihilator.is_singular(self.x0)
654
+ if not selfatx0 and not otheratx0:
655
+ return self + other.change_ics(self.x0)
656
+ return self.change_ics(other.x0) + other
657
+
658
+ if self.x0 != other.x0:
659
+ return HolonomicFunction(sol, self.x)
660
+
661
+ # if the functions have singular_ics
662
+ y1 = None
663
+ y2 = None
664
+
665
+ if self.is_singularics() == False and other.is_singularics() == True:
666
+ # convert the ordinary initial condition to singular.
667
+ _y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
668
+ y1 = {S.Zero: _y0}
669
+ y2 = other.y0
670
+ elif self.is_singularics() == True and other.is_singularics() == False:
671
+ _y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
672
+ y1 = self.y0
673
+ y2 = {S.Zero: _y0}
674
+ elif self.is_singularics() == True and other.is_singularics() == True:
675
+ y1 = self.y0
676
+ y2 = other.y0
677
+
678
+ # computing singular initial condition for the result
679
+ # taking union of the series terms of both functions
680
+ y0 = {}
681
+ for i in y1:
682
+ # add corresponding initial terms if the power
683
+ # on `x` is same
684
+ if i in y2:
685
+ y0[i] = [a + b for a, b in zip(y1[i], y2[i])]
686
+ else:
687
+ y0[i] = y1[i]
688
+ for i in y2:
689
+ if i not in y1:
690
+ y0[i] = y2[i]
691
+ return HolonomicFunction(sol, self.x, self.x0, y0)
692
+
693
+ def integrate(self, limits, initcond=False):
694
+ """
695
+ Integrates the given holonomic function.
696
+
697
+ Examples
698
+ ========
699
+
700
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
701
+ >>> from sympy import QQ
702
+ >>> from sympy import symbols
703
+ >>> x = symbols('x')
704
+ >>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
705
+ >>> HolonomicFunction(Dx - 1, x, 0, [1]).integrate((x, 0, x)) # e^x - 1
706
+ HolonomicFunction((-1)*Dx + (1)*Dx**2, x, 0, [0, 1])
707
+ >>> HolonomicFunction(Dx**2 + 1, x, 0, [1, 0]).integrate((x, 0, x))
708
+ HolonomicFunction((1)*Dx + (1)*Dx**3, x, 0, [0, 1, 0])
709
+ """
710
+
711
+ # to get the annihilator, just multiply by Dx from right
712
+ D = self.annihilator.parent.derivative_operator
713
+
714
+ # if the function have initial conditions of the series format
715
+ if self.is_singularics() == True:
716
+
717
+ r = self._singularics_to_ord()
718
+ if r:
719
+ return r.integrate(limits, initcond=initcond)
720
+
721
+ # computing singular initial condition for the function
722
+ # produced after integration.
723
+ y0 = {}
724
+ for i in self.y0:
725
+ c = self.y0[i]
726
+ c2 = []
727
+ for j, cj in enumerate(c):
728
+ if cj == 0:
729
+ c2.append(S.Zero)
730
+
731
+ # if power on `x` is -1, the integration becomes log(x)
732
+ # TODO: Implement this case
733
+ elif i + j + 1 == 0:
734
+ raise NotImplementedError("logarithmic terms in the series are not supported")
735
+ else:
736
+ c2.append(cj / S(i + j + 1))
737
+ y0[i + 1] = c2
738
+
739
+ if hasattr(limits, "__iter__"):
740
+ raise NotImplementedError("Definite integration for singular initial conditions")
741
+
742
+ return HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
743
+
744
+ # if no initial conditions are available for the function
745
+ if not self._have_init_cond():
746
+ if initcond:
747
+ return HolonomicFunction(self.annihilator * D, self.x, self.x0, [S.Zero])
748
+ return HolonomicFunction(self.annihilator * D, self.x)
749
+
750
+ # definite integral
751
+ # initial conditions for the answer will be stored at point `a`,
752
+ # where `a` is the lower limit of the integrand
753
+ if hasattr(limits, "__iter__"):
754
+
755
+ if len(limits) == 3 and limits[0] == self.x:
756
+ x0 = self.x0
757
+ a = limits[1]
758
+ b = limits[2]
759
+ definite = True
760
+
761
+ else:
762
+ definite = False
763
+
764
+ y0 = [S.Zero]
765
+ y0 += self.y0
766
+
767
+ indefinite_integral = HolonomicFunction(self.annihilator * D, self.x, self.x0, y0)
768
+
769
+ if not definite:
770
+ return indefinite_integral
771
+
772
+ # use evalf to get the values at `a`
773
+ if x0 != a:
774
+ try:
775
+ indefinite_expr = indefinite_integral.to_expr()
776
+ except (NotHyperSeriesError, NotPowerSeriesError):
777
+ indefinite_expr = None
778
+
779
+ if indefinite_expr:
780
+ lower = indefinite_expr.subs(self.x, a)
781
+ if isinstance(lower, NaN):
782
+ lower = indefinite_expr.limit(self.x, a)
783
+ else:
784
+ lower = indefinite_integral.evalf(a)
785
+
786
+ if b == self.x:
787
+ y0[0] = y0[0] - lower
788
+ return HolonomicFunction(self.annihilator * D, self.x, x0, y0)
789
+
790
+ elif S(b).is_Number:
791
+ if indefinite_expr:
792
+ upper = indefinite_expr.subs(self.x, b)
793
+ if isinstance(upper, NaN):
794
+ upper = indefinite_expr.limit(self.x, b)
795
+ else:
796
+ upper = indefinite_integral.evalf(b)
797
+
798
+ return upper - lower
799
+
800
+
801
+ # if the upper limit is `x`, the answer will be a function
802
+ if b == self.x:
803
+ return HolonomicFunction(self.annihilator * D, self.x, a, y0)
804
+
805
+ # if the upper limits is a Number, a numerical value will be returned
806
+ elif S(b).is_Number:
807
+ try:
808
+ s = HolonomicFunction(self.annihilator * D, self.x, a,\
809
+ y0).to_expr()
810
+ indefinite = s.subs(self.x, b)
811
+ if not isinstance(indefinite, NaN):
812
+ return indefinite
813
+ else:
814
+ return s.limit(self.x, b)
815
+ except (NotHyperSeriesError, NotPowerSeriesError):
816
+ return HolonomicFunction(self.annihilator * D, self.x, a, y0).evalf(b)
817
+
818
+ return HolonomicFunction(self.annihilator * D, self.x)
819
+
820
+ def diff(self, *args, **kwargs):
821
+ r"""
822
+ Differentiation of the given Holonomic function.
823
+
824
+ Examples
825
+ ========
826
+
827
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
828
+ >>> from sympy import ZZ
829
+ >>> from sympy import symbols
830
+ >>> x = symbols('x')
831
+ >>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
832
+ >>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).diff().to_expr()
833
+ cos(x)
834
+ >>> HolonomicFunction(Dx - 2, x, 0, [1]).diff().to_expr()
835
+ 2*exp(2*x)
836
+
837
+ See Also
838
+ ========
839
+
840
+ integrate
841
+ """
842
+ kwargs.setdefault('evaluate', True)
843
+ if args:
844
+ if args[0] != self.x:
845
+ return S.Zero
846
+ elif len(args) == 2:
847
+ sol = self
848
+ for i in range(args[1]):
849
+ sol = sol.diff(args[0])
850
+ return sol
851
+
852
+ ann = self.annihilator
853
+
854
+ # if the function is constant.
855
+ if ann.listofpoly[0] == ann.parent.base.zero and ann.order == 1:
856
+ return S.Zero
857
+
858
+ # if the coefficient of y in the differential equation is zero.
859
+ # a shifting is done to compute the answer in this case.
860
+ elif ann.listofpoly[0] == ann.parent.base.zero:
861
+
862
+ sol = DifferentialOperator(ann.listofpoly[1:], ann.parent)
863
+
864
+ if self._have_init_cond():
865
+ # if ordinary initial condition
866
+ if self.is_singularics() == False:
867
+ return HolonomicFunction(sol, self.x, self.x0, self.y0[1:])
868
+ # TODO: support for singular initial condition
869
+ return HolonomicFunction(sol, self.x)
870
+ else:
871
+ return HolonomicFunction(sol, self.x)
872
+
873
+ # the general algorithm
874
+ R = ann.parent.base
875
+ K = R.get_field()
876
+
877
+ seq_dmf = [K.new(i.to_list()) for i in ann.listofpoly]
878
+
879
+ # -y = a1*y'/a0 + a2*y''/a0 ... + an*y^n/a0
880
+ rhs = [i / seq_dmf[0] for i in seq_dmf[1:]]
881
+ rhs.insert(0, K.zero)
882
+
883
+ # differentiate both lhs and rhs
884
+ sol = _derivate_diff_eq(rhs, K)
885
+
886
+ # add the term y' in lhs to rhs
887
+ sol = _add_lists(sol, [K.zero, K.one])
888
+
889
+ sol = _normalize(sol[1:], self.annihilator.parent, negative=False)
890
+
891
+ if not self._have_init_cond() or self.is_singularics() == True:
892
+ return HolonomicFunction(sol, self.x)
893
+
894
+ y0 = _extend_y0(self, sol.order + 1)[1:]
895
+ return HolonomicFunction(sol, self.x, self.x0, y0)
896
+
897
+ def __eq__(self, other):
898
+ if self.annihilator != other.annihilator or self.x != other.x:
899
+ return False
900
+ if self._have_init_cond() and other._have_init_cond():
901
+ return self.x0 == other.x0 and self.y0 == other.y0
902
+ return True
903
+
904
+ def __mul__(self, other):
905
+ ann_self = self.annihilator
906
+
907
+ if not isinstance(other, HolonomicFunction):
908
+ other = sympify(other)
909
+
910
+ if other.has(self.x):
911
+ raise NotImplementedError(" Can't multiply a HolonomicFunction and expressions/functions.")
912
+
913
+ if not self._have_init_cond():
914
+ return self
915
+ y0 = _extend_y0(self, ann_self.order)
916
+ y1 = [(Poly.new(j, self.x) * other).rep for j in y0]
917
+ return HolonomicFunction(ann_self, self.x, self.x0, y1)
918
+
919
+ if self.annihilator.parent.base != other.annihilator.parent.base:
920
+ a, b = self.unify(other)
921
+ return a * b
922
+
923
+ ann_other = other.annihilator
924
+
925
+ a = ann_self.order
926
+ b = ann_other.order
927
+
928
+ R = ann_self.parent.base
929
+ K = R.get_field()
930
+
931
+ list_self = [K.new(j.to_list()) for j in ann_self.listofpoly]
932
+ list_other = [K.new(j.to_list()) for j in ann_other.listofpoly]
933
+
934
+ # will be used to reduce the degree
935
+ self_red = [-list_self[i] / list_self[a] for i in range(a)]
936
+
937
+ other_red = [-list_other[i] / list_other[b] for i in range(b)]
938
+
939
+ # coeff_mull[i][j] is the coefficient of Dx^i(f).Dx^j(g)
940
+ coeff_mul = [[K.zero for i in range(b + 1)] for j in range(a + 1)]
941
+ coeff_mul[0][0] = K.one
942
+
943
+ # making the ansatz
944
+ lin_sys_elements = [[coeff_mul[i][j] for i in range(a) for j in range(b)]]
945
+ lin_sys = DomainMatrix(lin_sys_elements, (1, a*b), K).transpose()
946
+
947
+ homo_sys = DomainMatrix.zeros((a*b, 1), K)
948
+
949
+ sol = _find_nonzero_solution(lin_sys, homo_sys)
950
+
951
+ # until a non trivial solution is found
952
+ while sol.is_zero_matrix:
953
+
954
+ # updating the coefficients Dx^i(f).Dx^j(g) for next degree
955
+ for i in range(a - 1, -1, -1):
956
+ for j in range(b - 1, -1, -1):
957
+ coeff_mul[i][j + 1] += coeff_mul[i][j]
958
+ coeff_mul[i + 1][j] += coeff_mul[i][j]
959
+ if isinstance(coeff_mul[i][j], K.dtype):
960
+ coeff_mul[i][j] = DMFdiff(coeff_mul[i][j], K)
961
+ else:
962
+ coeff_mul[i][j] = coeff_mul[i][j].diff(self.x)
963
+
964
+ # reduce the terms to lower power using annihilators of f, g
965
+ for i in range(a + 1):
966
+ if coeff_mul[i][b].is_zero:
967
+ continue
968
+ for j in range(b):
969
+ coeff_mul[i][j] += other_red[j] * coeff_mul[i][b]
970
+ coeff_mul[i][b] = K.zero
971
+
972
+ # not d2 + 1, as that is already covered in previous loop
973
+ for j in range(b):
974
+ if coeff_mul[a][j] == 0:
975
+ continue
976
+ for i in range(a):
977
+ coeff_mul[i][j] += self_red[i] * coeff_mul[a][j]
978
+ coeff_mul[a][j] = K.zero
979
+
980
+ lin_sys_elements.append([coeff_mul[i][j] for i in range(a) for j in range(b)])
981
+ lin_sys = DomainMatrix(lin_sys_elements, (len(lin_sys_elements), a*b), K).transpose()
982
+
983
+ sol = _find_nonzero_solution(lin_sys, homo_sys)
984
+
985
+ sol_ann = _normalize(sol.flat(), self.annihilator.parent, negative=False)
986
+
987
+ if not (self._have_init_cond() and other._have_init_cond()):
988
+ return HolonomicFunction(sol_ann, self.x)
989
+
990
+ if self.is_singularics() == False and other.is_singularics() == False:
991
+
992
+ # if both the conditions are at same point
993
+ if self.x0 == other.x0:
994
+
995
+ # try to find more initial conditions
996
+ y0_self = _extend_y0(self, sol_ann.order)
997
+ y0_other = _extend_y0(other, sol_ann.order)
998
+ # h(x0) = f(x0) * g(x0)
999
+ y0 = [y0_self[0] * y0_other[0]]
1000
+
1001
+ # coefficient of Dx^j(f)*Dx^i(g) in Dx^i(fg)
1002
+ for i in range(1, min(len(y0_self), len(y0_other))):
1003
+ coeff = [[0 for i in range(i + 1)] for j in range(i + 1)]
1004
+ for j in range(i + 1):
1005
+ for k in range(i + 1):
1006
+ if j + k == i:
1007
+ coeff[j][k] = binomial(i, j)
1008
+
1009
+ sol = 0
1010
+ for j in range(i + 1):
1011
+ for k in range(i + 1):
1012
+ sol += coeff[j][k]* y0_self[j] * y0_other[k]
1013
+
1014
+ y0.append(sol)
1015
+
1016
+ return HolonomicFunction(sol_ann, self.x, self.x0, y0)
1017
+
1018
+ # if the points are different, consider one
1019
+ selfat0 = self.annihilator.is_singular(0)
1020
+ otherat0 = other.annihilator.is_singular(0)
1021
+
1022
+ if self.x0 == 0 and not selfat0 and not otherat0:
1023
+ return self * other.change_ics(0)
1024
+ if other.x0 == 0 and not selfat0 and not otherat0:
1025
+ return self.change_ics(0) * other
1026
+
1027
+ selfatx0 = self.annihilator.is_singular(self.x0)
1028
+ otheratx0 = other.annihilator.is_singular(self.x0)
1029
+ if not selfatx0 and not otheratx0:
1030
+ return self * other.change_ics(self.x0)
1031
+ return self.change_ics(other.x0) * other
1032
+
1033
+ if self.x0 != other.x0:
1034
+ return HolonomicFunction(sol_ann, self.x)
1035
+
1036
+ # if the functions have singular_ics
1037
+ y1 = None
1038
+ y2 = None
1039
+
1040
+ if self.is_singularics() == False and other.is_singularics() == True:
1041
+ _y0 = [j / factorial(i) for i, j in enumerate(self.y0)]
1042
+ y1 = {S.Zero: _y0}
1043
+ y2 = other.y0
1044
+ elif self.is_singularics() == True and other.is_singularics() == False:
1045
+ _y0 = [j / factorial(i) for i, j in enumerate(other.y0)]
1046
+ y1 = self.y0
1047
+ y2 = {S.Zero: _y0}
1048
+ elif self.is_singularics() == True and other.is_singularics() == True:
1049
+ y1 = self.y0
1050
+ y2 = other.y0
1051
+
1052
+ y0 = {}
1053
+ # multiply every possible pair of the series terms
1054
+ for i in y1:
1055
+ for j in y2:
1056
+ k = min(len(y1[i]), len(y2[j]))
1057
+ c = [sum((y1[i][b] * y2[j][a - b] for b in range(a + 1)),
1058
+ start=S.Zero) for a in range(k)]
1059
+ if not i + j in y0:
1060
+ y0[i + j] = c
1061
+ else:
1062
+ y0[i + j] = [a + b for a, b in zip(c, y0[i + j])]
1063
+ return HolonomicFunction(sol_ann, self.x, self.x0, y0)
1064
+
1065
+ __rmul__ = __mul__
1066
+
1067
+ def __sub__(self, other):
1068
+ return self + other * -1
1069
+
1070
+ def __rsub__(self, other):
1071
+ return self * -1 + other
1072
+
1073
+ def __neg__(self):
1074
+ return -1 * self
1075
+
1076
+ def __truediv__(self, other):
1077
+ return self * (S.One / other)
1078
+
1079
+ def __pow__(self, n):
1080
+ if self.annihilator.order <= 1:
1081
+ ann = self.annihilator
1082
+ parent = ann.parent
1083
+
1084
+ if self.y0 is None:
1085
+ y0 = None
1086
+ else:
1087
+ y0 = [list(self.y0)[0] ** n]
1088
+
1089
+ p0 = ann.listofpoly[0]
1090
+ p1 = ann.listofpoly[1]
1091
+
1092
+ p0 = (Poly.new(p0, self.x) * n).rep
1093
+
1094
+ sol = [parent.base.to_sympy(i) for i in [p0, p1]]
1095
+ dd = DifferentialOperator(sol, parent)
1096
+ return HolonomicFunction(dd, self.x, self.x0, y0)
1097
+ if n < 0:
1098
+ raise NotHolonomicError("Negative Power on a Holonomic Function")
1099
+ Dx = self.annihilator.parent.derivative_operator
1100
+ result = HolonomicFunction(Dx, self.x, S.Zero, [S.One])
1101
+ if n == 0:
1102
+ return result
1103
+ x = self
1104
+ while True:
1105
+ if n % 2:
1106
+ result *= x
1107
+ n >>= 1
1108
+ if not n:
1109
+ break
1110
+ x *= x
1111
+ return result
1112
+
1113
+ def degree(self):
1114
+ """
1115
+ Returns the highest power of `x` in the annihilator.
1116
+ """
1117
+ return max(i.degree() for i in self.annihilator.listofpoly)
1118
+
1119
+ def composition(self, expr, *args, **kwargs):
1120
+ """
1121
+ Returns function after composition of a holonomic
1122
+ function with an algebraic function. The method cannot compute
1123
+ initial conditions for the result by itself, so they can be also be
1124
+ provided.
1125
+
1126
+ Examples
1127
+ ========
1128
+
1129
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
1130
+ >>> from sympy import QQ
1131
+ >>> from sympy import symbols
1132
+ >>> x = symbols('x')
1133
+ >>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
1134
+ >>> HolonomicFunction(Dx - 1, x).composition(x**2, 0, [1]) # e^(x**2)
1135
+ HolonomicFunction((-2*x) + (1)*Dx, x, 0, [1])
1136
+ >>> HolonomicFunction(Dx**2 + 1, x).composition(x**2 - 1, 1, [1, 0])
1137
+ HolonomicFunction((4*x**3) + (-1)*Dx + (x)*Dx**2, x, 1, [1, 0])
1138
+
1139
+ See Also
1140
+ ========
1141
+
1142
+ from_hyper
1143
+ """
1144
+
1145
+ R = self.annihilator.parent
1146
+ a = self.annihilator.order
1147
+ diff = expr.diff(self.x)
1148
+ listofpoly = self.annihilator.listofpoly
1149
+
1150
+ for i, j in enumerate(listofpoly):
1151
+ if isinstance(j, self.annihilator.parent.base.dtype):
1152
+ listofpoly[i] = self.annihilator.parent.base.to_sympy(j)
1153
+
1154
+ r = listofpoly[a].subs({self.x:expr})
1155
+ subs = [-listofpoly[i].subs({self.x:expr}) / r for i in range (a)]
1156
+ coeffs = [S.Zero for i in range(a)] # coeffs[i] == coeff of (D^i f)(a) in D^k (f(a))
1157
+ coeffs[0] = S.One
1158
+ system = [coeffs]
1159
+ homogeneous = Matrix([[S.Zero for i in range(a)]]).transpose()
1160
+ while True:
1161
+ coeffs_next = [p.diff(self.x) for p in coeffs]
1162
+ for i in range(a - 1):
1163
+ coeffs_next[i + 1] += (coeffs[i] * diff)
1164
+ for i in range(a):
1165
+ coeffs_next[i] += (coeffs[-1] * subs[i] * diff)
1166
+ coeffs = coeffs_next
1167
+ # check for linear relations
1168
+ system.append(coeffs)
1169
+ sol, taus = (Matrix(system).transpose()
1170
+ ).gauss_jordan_solve(homogeneous)
1171
+ if sol.is_zero_matrix is not True:
1172
+ break
1173
+
1174
+ tau = list(taus)[0]
1175
+ sol = sol.subs(tau, 1)
1176
+ sol = _normalize(sol[0:], R, negative=False)
1177
+
1178
+ # if initial conditions are given for the resulting function
1179
+ if args:
1180
+ return HolonomicFunction(sol, self.x, args[0], args[1])
1181
+ return HolonomicFunction(sol, self.x)
1182
+
1183
+ def to_sequence(self, lb=True):
1184
+ r"""
1185
+ Finds recurrence relation for the coefficients in the series expansion
1186
+ of the function about :math:`x_0`, where :math:`x_0` is the point at
1187
+ which the initial condition is stored.
1188
+
1189
+ Explanation
1190
+ ===========
1191
+
1192
+ If the point :math:`x_0` is ordinary, solution of the form :math:`[(R, n_0)]`
1193
+ is returned. Where :math:`R` is the recurrence relation and :math:`n_0` is the
1194
+ smallest ``n`` for which the recurrence holds true.
1195
+
1196
+ If the point :math:`x_0` is regular singular, a list of solutions in
1197
+ the format :math:`(R, p, n_0)` is returned, i.e. `[(R, p, n_0), ... ]`.
1198
+ Each tuple in this vector represents a recurrence relation :math:`R`
1199
+ associated with a root of the indicial equation ``p``. Conditions of
1200
+ a different format can also be provided in this case, see the
1201
+ docstring of HolonomicFunction class.
1202
+
1203
+ If it's not possible to numerically compute a initial condition,
1204
+ it is returned as a symbol :math:`C_j`, denoting the coefficient of
1205
+ :math:`(x - x_0)^j` in the power series about :math:`x_0`.
1206
+
1207
+ Examples
1208
+ ========
1209
+
1210
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
1211
+ >>> from sympy import QQ
1212
+ >>> from sympy import symbols, S
1213
+ >>> x = symbols('x')
1214
+ >>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
1215
+ >>> HolonomicFunction(Dx - 1, x, 0, [1]).to_sequence()
1216
+ [(HolonomicSequence((-1) + (n + 1)Sn, n), u(0) = 1, 0)]
1217
+ >>> HolonomicFunction((1 + x)*Dx**2 + Dx, x, 0, [0, 1]).to_sequence()
1218
+ [(HolonomicSequence((n**2) + (n**2 + n)Sn, n), u(0) = 0, u(1) = 1, u(2) = -1/2, 2)]
1219
+ >>> HolonomicFunction(-S(1)/2 + x*Dx, x, 0, {S(1)/2: [1]}).to_sequence()
1220
+ [(HolonomicSequence((n), n), u(0) = 1, 1/2, 1)]
1221
+
1222
+ See Also
1223
+ ========
1224
+
1225
+ HolonomicFunction.series
1226
+
1227
+ References
1228
+ ==========
1229
+
1230
+ .. [1] https://hal.inria.fr/inria-00070025/document
1231
+ .. [2] https://www3.risc.jku.at/publications/download/risc_2244/DIPLFORM.pdf
1232
+
1233
+ """
1234
+
1235
+ if self.x0 != 0:
1236
+ return self.shift_x(self.x0).to_sequence()
1237
+
1238
+ # check whether a power series exists if the point is singular
1239
+ if self.annihilator.is_singular(self.x0):
1240
+ return self._frobenius(lb=lb)
1241
+
1242
+ dict1 = {}
1243
+ n = Symbol('n', integer=True)
1244
+ dom = self.annihilator.parent.base.dom
1245
+ R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
1246
+
1247
+ # substituting each term of the form `x^k Dx^j` in the
1248
+ # annihilator, according to the formula below:
1249
+ # x^k Dx^j = Sum(rf(n + 1 - k, j) * a(n + j - k) * x^n, (n, k, oo))
1250
+ # for explanation see [2].
1251
+ for i, j in enumerate(self.annihilator.listofpoly):
1252
+
1253
+ listofdmp = j.all_coeffs()
1254
+ degree = len(listofdmp) - 1
1255
+
1256
+ for k in range(degree + 1):
1257
+ coeff = listofdmp[degree - k]
1258
+
1259
+ if coeff == 0:
1260
+ continue
1261
+
1262
+ if (i - k, k) in dict1:
1263
+ dict1[(i - k, k)] += (dom.to_sympy(coeff) * rf(n - k + 1, i))
1264
+ else:
1265
+ dict1[(i - k, k)] = (dom.to_sympy(coeff) * rf(n - k + 1, i))
1266
+
1267
+
1268
+ sol = []
1269
+ keylist = [i[0] for i in dict1]
1270
+ lower = min(keylist)
1271
+ upper = max(keylist)
1272
+ degree = self.degree()
1273
+
1274
+ # the recurrence relation holds for all values of
1275
+ # n greater than smallest_n, i.e. n >= smallest_n
1276
+ smallest_n = lower + degree
1277
+ dummys = {}
1278
+ eqs = []
1279
+ unknowns = []
1280
+
1281
+ # an appropriate shift of the recurrence
1282
+ for j in range(lower, upper + 1):
1283
+ if j in keylist:
1284
+ temp = sum((v.subs(n, n - lower)
1285
+ for k, v in dict1.items() if k[0] == j),
1286
+ start=S.Zero)
1287
+ sol.append(temp)
1288
+ else:
1289
+ sol.append(S.Zero)
1290
+
1291
+ # the recurrence relation
1292
+ sol = RecurrenceOperator(sol, R)
1293
+
1294
+ # computing the initial conditions for recurrence
1295
+ order = sol.order
1296
+ all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
1297
+ all_roots = all_roots.keys()
1298
+
1299
+ if all_roots:
1300
+ max_root = max(all_roots) + 1
1301
+ smallest_n = max(max_root, smallest_n)
1302
+ order += smallest_n
1303
+
1304
+ y0 = _extend_y0(self, order)
1305
+ # u(n) = y^n(0)/factorial(n)
1306
+ u0 = [j / factorial(i) for i, j in enumerate(y0)]
1307
+
1308
+ # if sufficient conditions can't be computed then
1309
+ # try to use the series method i.e.
1310
+ # equate the coefficients of x^k in the equation formed by
1311
+ # substituting the series in differential equation, to zero.
1312
+ if len(u0) < order:
1313
+
1314
+ for i in range(degree):
1315
+ eq = S.Zero
1316
+
1317
+ for j in dict1:
1318
+
1319
+ if i + j[0] < 0:
1320
+ dummys[i + j[0]] = S.Zero
1321
+
1322
+ elif i + j[0] < len(u0):
1323
+ dummys[i + j[0]] = u0[i + j[0]]
1324
+
1325
+ elif not i + j[0] in dummys:
1326
+ dummys[i + j[0]] = Symbol('C_%s' %(i + j[0]))
1327
+ unknowns.append(dummys[i + j[0]])
1328
+
1329
+ if j[1] <= i:
1330
+ eq += dict1[j].subs(n, i) * dummys[i + j[0]]
1331
+
1332
+ eqs.append(eq)
1333
+
1334
+ # solve the system of equations formed
1335
+ soleqs = solve(eqs, *unknowns)
1336
+
1337
+ if isinstance(soleqs, dict):
1338
+
1339
+ for i in range(len(u0), order):
1340
+
1341
+ if i not in dummys:
1342
+ dummys[i] = Symbol('C_%s' %i)
1343
+
1344
+ if dummys[i] in soleqs:
1345
+ u0.append(soleqs[dummys[i]])
1346
+
1347
+ else:
1348
+ u0.append(dummys[i])
1349
+
1350
+ if lb:
1351
+ return [(HolonomicSequence(sol, u0), smallest_n)]
1352
+ return [HolonomicSequence(sol, u0)]
1353
+
1354
+ for i in range(len(u0), order):
1355
+
1356
+ if i not in dummys:
1357
+ dummys[i] = Symbol('C_%s' %i)
1358
+
1359
+ s = False
1360
+ for j in soleqs:
1361
+ if dummys[i] in j:
1362
+ u0.append(j[dummys[i]])
1363
+ s = True
1364
+ if not s:
1365
+ u0.append(dummys[i])
1366
+
1367
+ if lb:
1368
+ return [(HolonomicSequence(sol, u0), smallest_n)]
1369
+
1370
+ return [HolonomicSequence(sol, u0)]
1371
+
1372
+ def _frobenius(self, lb=True):
1373
+ # compute the roots of indicial equation
1374
+ indicialroots = self._indicial()
1375
+
1376
+ reals = []
1377
+ compl = []
1378
+ for i in ordered(indicialroots.keys()):
1379
+ if i.is_real:
1380
+ reals.extend([i] * indicialroots[i])
1381
+ else:
1382
+ a, b = i.as_real_imag()
1383
+ compl.extend([(i, a, b)] * indicialroots[i])
1384
+
1385
+ # sort the roots for a fixed ordering of solution
1386
+ compl.sort(key=lambda x : x[1])
1387
+ compl.sort(key=lambda x : x[2])
1388
+ reals.sort()
1389
+
1390
+ # grouping the roots, roots differ by an integer are put in the same group.
1391
+ grp = []
1392
+
1393
+ for i in reals:
1394
+ if len(grp) == 0:
1395
+ grp.append([i])
1396
+ continue
1397
+ for j in grp:
1398
+ if int_valued(j[0] - i):
1399
+ j.append(i)
1400
+ break
1401
+ else:
1402
+ grp.append([i])
1403
+
1404
+ # True if none of the roots differ by an integer i.e.
1405
+ # each element in group have only one member
1406
+ independent = all(len(i) == 1 for i in grp)
1407
+
1408
+ allpos = all(i >= 0 for i in reals)
1409
+ allint = all(int_valued(i) for i in reals)
1410
+
1411
+ # if initial conditions are provided
1412
+ # then use them.
1413
+ if self.is_singularics() == True:
1414
+ rootstoconsider = []
1415
+ for i in ordered(self.y0.keys()):
1416
+ for j in ordered(indicialroots.keys()):
1417
+ if equal_valued(j, i):
1418
+ rootstoconsider.append(i)
1419
+
1420
+ elif allpos and allint:
1421
+ rootstoconsider = [min(reals)]
1422
+
1423
+ elif independent:
1424
+ rootstoconsider = [i[0] for i in grp] + [j[0] for j in compl]
1425
+
1426
+ elif not allint:
1427
+ rootstoconsider = [i for i in reals if not int(i) == i]
1428
+
1429
+ elif not allpos:
1430
+
1431
+ if not self._have_init_cond() or S(self.y0[0]).is_finite == False:
1432
+ rootstoconsider = [min(reals)]
1433
+
1434
+ else:
1435
+ posroots = [i for i in reals if i >= 0]
1436
+ rootstoconsider = [min(posroots)]
1437
+
1438
+ n = Symbol('n', integer=True)
1439
+ dom = self.annihilator.parent.base.dom
1440
+ R, _ = RecurrenceOperators(dom.old_poly_ring(n), 'Sn')
1441
+
1442
+ finalsol = []
1443
+ char = ord('C')
1444
+
1445
+ for p in rootstoconsider:
1446
+ dict1 = {}
1447
+
1448
+ for i, j in enumerate(self.annihilator.listofpoly):
1449
+
1450
+ listofdmp = j.all_coeffs()
1451
+ degree = len(listofdmp) - 1
1452
+
1453
+ for k in range(degree + 1):
1454
+ coeff = listofdmp[degree - k]
1455
+
1456
+ if coeff == 0:
1457
+ continue
1458
+
1459
+ if (i - k, k - i) in dict1:
1460
+ dict1[(i - k, k - i)] += (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
1461
+ else:
1462
+ dict1[(i - k, k - i)] = (dom.to_sympy(coeff) * rf(n - k + 1 + p, i))
1463
+
1464
+ sol = []
1465
+ keylist = [i[0] for i in dict1]
1466
+ lower = min(keylist)
1467
+ upper = max(keylist)
1468
+ degree = max(i[1] for i in dict1)
1469
+ degree2 = min(i[1] for i in dict1)
1470
+
1471
+ smallest_n = lower + degree
1472
+ dummys = {}
1473
+ eqs = []
1474
+ unknowns = []
1475
+
1476
+ for j in range(lower, upper + 1):
1477
+ if j in keylist:
1478
+ temp = sum((v.subs(n, n - lower)
1479
+ for k, v in dict1.items() if k[0] == j),
1480
+ start=S.Zero)
1481
+ sol.append(temp)
1482
+ else:
1483
+ sol.append(S.Zero)
1484
+
1485
+ # the recurrence relation
1486
+ sol = RecurrenceOperator(sol, R)
1487
+
1488
+ # computing the initial conditions for recurrence
1489
+ order = sol.order
1490
+ all_roots = roots(R.base.to_sympy(sol.listofpoly[-1]), n, filter='Z')
1491
+ all_roots = all_roots.keys()
1492
+
1493
+ if all_roots:
1494
+ max_root = max(all_roots) + 1
1495
+ smallest_n = max(max_root, smallest_n)
1496
+ order += smallest_n
1497
+
1498
+ u0 = []
1499
+
1500
+ if self.is_singularics() == True:
1501
+ u0 = self.y0[p]
1502
+
1503
+ elif self.is_singularics() == False and p >= 0 and int(p) == p and len(rootstoconsider) == 1:
1504
+ y0 = _extend_y0(self, order + int(p))
1505
+ # u(n) = y^n(0)/factorial(n)
1506
+ if len(y0) > int(p):
1507
+ u0 = [y0[i] / factorial(i) for i in range(int(p), len(y0))]
1508
+
1509
+ if len(u0) < order:
1510
+
1511
+ for i in range(degree2, degree):
1512
+ eq = S.Zero
1513
+
1514
+ for j in dict1:
1515
+ if i + j[0] < 0:
1516
+ dummys[i + j[0]] = S.Zero
1517
+
1518
+ elif i + j[0] < len(u0):
1519
+ dummys[i + j[0]] = u0[i + j[0]]
1520
+
1521
+ elif not i + j[0] in dummys:
1522
+ letter = chr(char) + '_%s' %(i + j[0])
1523
+ dummys[i + j[0]] = Symbol(letter)
1524
+ unknowns.append(dummys[i + j[0]])
1525
+
1526
+ if j[1] <= i:
1527
+ eq += dict1[j].subs(n, i) * dummys[i + j[0]]
1528
+
1529
+ eqs.append(eq)
1530
+
1531
+ # solve the system of equations formed
1532
+ soleqs = solve(eqs, *unknowns)
1533
+
1534
+ if isinstance(soleqs, dict):
1535
+
1536
+ for i in range(len(u0), order):
1537
+
1538
+ if i not in dummys:
1539
+ letter = chr(char) + '_%s' %i
1540
+ dummys[i] = Symbol(letter)
1541
+
1542
+ if dummys[i] in soleqs:
1543
+ u0.append(soleqs[dummys[i]])
1544
+
1545
+ else:
1546
+ u0.append(dummys[i])
1547
+
1548
+ if lb:
1549
+ finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
1550
+ continue
1551
+ else:
1552
+ finalsol.append((HolonomicSequence(sol, u0), p))
1553
+ continue
1554
+
1555
+ for i in range(len(u0), order):
1556
+
1557
+ if i not in dummys:
1558
+ letter = chr(char) + '_%s' %i
1559
+ dummys[i] = Symbol(letter)
1560
+
1561
+ s = False
1562
+ for j in soleqs:
1563
+ if dummys[i] in j:
1564
+ u0.append(j[dummys[i]])
1565
+ s = True
1566
+ if not s:
1567
+ u0.append(dummys[i])
1568
+ if lb:
1569
+ finalsol.append((HolonomicSequence(sol, u0), p, smallest_n))
1570
+
1571
+ else:
1572
+ finalsol.append((HolonomicSequence(sol, u0), p))
1573
+ char += 1
1574
+ return finalsol
1575
+
1576
+ def series(self, n=6, coefficient=False, order=True, _recur=None):
1577
+ r"""
1578
+ Finds the power series expansion of given holonomic function about :math:`x_0`.
1579
+
1580
+ Explanation
1581
+ ===========
1582
+
1583
+ A list of series might be returned if :math:`x_0` is a regular point with
1584
+ multiple roots of the indicial equation.
1585
+
1586
+ Examples
1587
+ ========
1588
+
1589
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
1590
+ >>> from sympy import QQ
1591
+ >>> from sympy import symbols
1592
+ >>> x = symbols('x')
1593
+ >>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
1594
+ >>> HolonomicFunction(Dx - 1, x, 0, [1]).series() # e^x
1595
+ 1 + x + x**2/2 + x**3/6 + x**4/24 + x**5/120 + O(x**6)
1596
+ >>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).series(n=8) # sin(x)
1597
+ x - x**3/6 + x**5/120 - x**7/5040 + O(x**8)
1598
+
1599
+ See Also
1600
+ ========
1601
+
1602
+ HolonomicFunction.to_sequence
1603
+ """
1604
+
1605
+ if _recur is None:
1606
+ recurrence = self.to_sequence()
1607
+ else:
1608
+ recurrence = _recur
1609
+
1610
+ if isinstance(recurrence, tuple) and len(recurrence) == 2:
1611
+ recurrence = recurrence[0]
1612
+ constantpower = 0
1613
+ elif isinstance(recurrence, tuple) and len(recurrence) == 3:
1614
+ constantpower = recurrence[1]
1615
+ recurrence = recurrence[0]
1616
+
1617
+ elif len(recurrence) == 1 and len(recurrence[0]) == 2:
1618
+ recurrence = recurrence[0][0]
1619
+ constantpower = 0
1620
+ elif len(recurrence) == 1 and len(recurrence[0]) == 3:
1621
+ constantpower = recurrence[0][1]
1622
+ recurrence = recurrence[0][0]
1623
+ else:
1624
+ return [self.series(_recur=i) for i in recurrence]
1625
+
1626
+ n = n - int(constantpower)
1627
+ l = len(recurrence.u0) - 1
1628
+ k = recurrence.recurrence.order
1629
+ x = self.x
1630
+ x0 = self.x0
1631
+ seq_dmp = recurrence.recurrence.listofpoly
1632
+ R = recurrence.recurrence.parent.base
1633
+ K = R.get_field()
1634
+ seq = [K.new(j.to_list()) for j in seq_dmp]
1635
+ sub = [-seq[i] / seq[k] for i in range(k)]
1636
+ sol = list(recurrence.u0)
1637
+
1638
+ if l + 1 < n:
1639
+ # use the initial conditions to find the next term
1640
+ for i in range(l + 1 - k, n - k):
1641
+ coeff = sum((DMFsubs(sub[j], i) * sol[i + j]
1642
+ for j in range(k) if i + j >= 0), start=S.Zero)
1643
+ sol.append(coeff)
1644
+
1645
+ if coefficient:
1646
+ return sol
1647
+
1648
+ ser = sum((x**(i + constantpower) * j for i, j in enumerate(sol)),
1649
+ start=S.Zero)
1650
+ if order:
1651
+ ser += Order(x**(n + int(constantpower)), x)
1652
+ if x0 != 0:
1653
+ return ser.subs(x, x - x0)
1654
+ return ser
1655
+
1656
+ def _indicial(self):
1657
+ """
1658
+ Computes roots of the Indicial equation.
1659
+ """
1660
+
1661
+ if self.x0 != 0:
1662
+ return self.shift_x(self.x0)._indicial()
1663
+
1664
+ list_coeff = self.annihilator.listofpoly
1665
+ R = self.annihilator.parent.base
1666
+ x = self.x
1667
+ s = R.zero
1668
+ y = R.one
1669
+
1670
+ def _pole_degree(poly):
1671
+ root_all = roots(R.to_sympy(poly), x, filter='Z')
1672
+ if 0 in root_all.keys():
1673
+ return root_all[0]
1674
+ else:
1675
+ return 0
1676
+
1677
+ degree = max(j.degree() for j in list_coeff)
1678
+ inf = 10 * (max(1, degree) + max(1, self.annihilator.order))
1679
+
1680
+ deg = lambda q: inf if q.is_zero else _pole_degree(q)
1681
+ b = min(deg(q) - j for j, q in enumerate(list_coeff))
1682
+
1683
+ for i, j in enumerate(list_coeff):
1684
+ listofdmp = j.all_coeffs()
1685
+ degree = len(listofdmp) - 1
1686
+ if 0 <= i + b <= degree:
1687
+ s = s + listofdmp[degree - i - b] * y
1688
+ y *= R.from_sympy(x - i)
1689
+
1690
+ return roots(R.to_sympy(s), x)
1691
+
1692
+ def evalf(self, points, method='RK4', h=0.05, derivatives=False):
1693
+ r"""
1694
+ Finds numerical value of a holonomic function using numerical methods.
1695
+ (RK4 by default). A set of points (real or complex) must be provided
1696
+ which will be the path for the numerical integration.
1697
+
1698
+ Explanation
1699
+ ===========
1700
+
1701
+ The path should be given as a list :math:`[x_1, x_2, \dots x_n]`. The numerical
1702
+ values will be computed at each point in this order
1703
+ :math:`x_1 \rightarrow x_2 \rightarrow x_3 \dots \rightarrow x_n`.
1704
+
1705
+ Returns values of the function at :math:`x_1, x_2, \dots x_n` in a list.
1706
+
1707
+ Examples
1708
+ ========
1709
+
1710
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
1711
+ >>> from sympy import QQ
1712
+ >>> from sympy import symbols
1713
+ >>> x = symbols('x')
1714
+ >>> R, Dx = DifferentialOperators(QQ.old_poly_ring(x),'Dx')
1715
+
1716
+ A straight line on the real axis from (0 to 1)
1717
+
1718
+ >>> r = [0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1]
1719
+
1720
+ Runge-Kutta 4th order on e^x from 0.1 to 1.
1721
+ Exact solution at 1 is 2.71828182845905
1722
+
1723
+ >>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r)
1724
+ [1.10517083333333, 1.22140257085069, 1.34985849706254, 1.49182424008069,
1725
+ 1.64872063859684, 1.82211796209193, 2.01375162659678, 2.22553956329232,
1726
+ 2.45960141378007, 2.71827974413517]
1727
+
1728
+ Euler's method for the same
1729
+
1730
+ >>> HolonomicFunction(Dx - 1, x, 0, [1]).evalf(r, method='Euler')
1731
+ [1.1, 1.21, 1.331, 1.4641, 1.61051, 1.771561, 1.9487171, 2.14358881,
1732
+ 2.357947691, 2.5937424601]
1733
+
1734
+ One can also observe that the value obtained using Runge-Kutta 4th order
1735
+ is much more accurate than Euler's method.
1736
+ """
1737
+
1738
+ from sympy.holonomic.numerical import _evalf
1739
+ lp = False
1740
+
1741
+ # if a point `b` is given instead of a mesh
1742
+ if not hasattr(points, "__iter__"):
1743
+ lp = True
1744
+ b = S(points)
1745
+ if self.x0 == b:
1746
+ return _evalf(self, [b], method=method, derivatives=derivatives)[-1]
1747
+
1748
+ if not b.is_Number:
1749
+ raise NotImplementedError
1750
+
1751
+ a = self.x0
1752
+ if a > b:
1753
+ h = -h
1754
+ n = int((b - a) / h)
1755
+ points = [a + h]
1756
+ for i in range(n - 1):
1757
+ points.append(points[-1] + h)
1758
+
1759
+ for i in roots(self.annihilator.parent.base.to_sympy(self.annihilator.listofpoly[-1]), self.x):
1760
+ if i == self.x0 or i in points:
1761
+ raise SingularityError(self, i)
1762
+
1763
+ if lp:
1764
+ return _evalf(self, points, method=method, derivatives=derivatives)[-1]
1765
+ return _evalf(self, points, method=method, derivatives=derivatives)
1766
+
1767
+ def change_x(self, z):
1768
+ """
1769
+ Changes only the variable of Holonomic Function, for internal
1770
+ purposes. For composition use HolonomicFunction.composition()
1771
+ """
1772
+
1773
+ dom = self.annihilator.parent.base.dom
1774
+ R = dom.old_poly_ring(z)
1775
+ parent, _ = DifferentialOperators(R, 'Dx')
1776
+ sol = [R(j.to_list()) for j in self.annihilator.listofpoly]
1777
+ sol = DifferentialOperator(sol, parent)
1778
+ return HolonomicFunction(sol, z, self.x0, self.y0)
1779
+
1780
+ def shift_x(self, a):
1781
+ """
1782
+ Substitute `x + a` for `x`.
1783
+ """
1784
+
1785
+ x = self.x
1786
+ listaftershift = self.annihilator.listofpoly
1787
+ base = self.annihilator.parent.base
1788
+
1789
+ sol = [base.from_sympy(base.to_sympy(i).subs(x, x + a)) for i in listaftershift]
1790
+ sol = DifferentialOperator(sol, self.annihilator.parent)
1791
+ x0 = self.x0 - a
1792
+ if not self._have_init_cond():
1793
+ return HolonomicFunction(sol, x)
1794
+ return HolonomicFunction(sol, x, x0, self.y0)
1795
+
1796
+ def to_hyper(self, as_list=False, _recur=None):
1797
+ r"""
1798
+ Returns a hypergeometric function (or linear combination of them)
1799
+ representing the given holonomic function.
1800
+
1801
+ Explanation
1802
+ ===========
1803
+
1804
+ Returns an answer of the form:
1805
+ `a_1 \cdot x^{b_1} \cdot{hyper()} + a_2 \cdot x^{b_2} \cdot{hyper()} \dots`
1806
+
1807
+ This is very useful as one can now use ``hyperexpand`` to find the
1808
+ symbolic expressions/functions.
1809
+
1810
+ Examples
1811
+ ========
1812
+
1813
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
1814
+ >>> from sympy import ZZ
1815
+ >>> from sympy import symbols
1816
+ >>> x = symbols('x')
1817
+ >>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
1818
+ >>> # sin(x)
1819
+ >>> HolonomicFunction(Dx**2 + 1, x, 0, [0, 1]).to_hyper()
1820
+ x*hyper((), (3/2,), -x**2/4)
1821
+ >>> # exp(x)
1822
+ >>> HolonomicFunction(Dx - 1, x, 0, [1]).to_hyper()
1823
+ hyper((), (), x)
1824
+
1825
+ See Also
1826
+ ========
1827
+
1828
+ from_hyper, from_meijerg
1829
+ """
1830
+
1831
+ if _recur is None:
1832
+ recurrence = self.to_sequence()
1833
+ else:
1834
+ recurrence = _recur
1835
+
1836
+ if isinstance(recurrence, tuple) and len(recurrence) == 2:
1837
+ smallest_n = recurrence[1]
1838
+ recurrence = recurrence[0]
1839
+ constantpower = 0
1840
+ elif isinstance(recurrence, tuple) and len(recurrence) == 3:
1841
+ smallest_n = recurrence[2]
1842
+ constantpower = recurrence[1]
1843
+ recurrence = recurrence[0]
1844
+ elif len(recurrence) == 1 and len(recurrence[0]) == 2:
1845
+ smallest_n = recurrence[0][1]
1846
+ recurrence = recurrence[0][0]
1847
+ constantpower = 0
1848
+ elif len(recurrence) == 1 and len(recurrence[0]) == 3:
1849
+ smallest_n = recurrence[0][2]
1850
+ constantpower = recurrence[0][1]
1851
+ recurrence = recurrence[0][0]
1852
+ else:
1853
+ sol = self.to_hyper(as_list=as_list, _recur=recurrence[0])
1854
+ for i in recurrence[1:]:
1855
+ sol += self.to_hyper(as_list=as_list, _recur=i)
1856
+ return sol
1857
+
1858
+ u0 = recurrence.u0
1859
+ r = recurrence.recurrence
1860
+ x = self.x
1861
+ x0 = self.x0
1862
+
1863
+ # order of the recurrence relation
1864
+ m = r.order
1865
+
1866
+ # when no recurrence exists, and the power series have finite terms
1867
+ if m == 0:
1868
+ nonzeroterms = roots(r.parent.base.to_sympy(r.listofpoly[0]), recurrence.n, filter='R')
1869
+
1870
+ sol = S.Zero
1871
+ for j, i in enumerate(nonzeroterms):
1872
+
1873
+ if i < 0 or not int_valued(i):
1874
+ continue
1875
+
1876
+ i = int(i)
1877
+ if i < len(u0):
1878
+ if isinstance(u0[i], (PolyElement, FracElement)):
1879
+ u0[i] = u0[i].as_expr()
1880
+ sol += u0[i] * x**i
1881
+
1882
+ else:
1883
+ sol += Symbol('C_%s' %j) * x**i
1884
+
1885
+ if isinstance(sol, (PolyElement, FracElement)):
1886
+ sol = sol.as_expr() * x**constantpower
1887
+ else:
1888
+ sol = sol * x**constantpower
1889
+ if as_list:
1890
+ if x0 != 0:
1891
+ return [(sol.subs(x, x - x0), )]
1892
+ return [(sol, )]
1893
+ if x0 != 0:
1894
+ return sol.subs(x, x - x0)
1895
+ return sol
1896
+
1897
+ if smallest_n + m > len(u0):
1898
+ raise NotImplementedError("Can't compute sufficient Initial Conditions")
1899
+
1900
+ # check if the recurrence represents a hypergeometric series
1901
+ if any(i != r.parent.base.zero for i in r.listofpoly[1:-1]):
1902
+ raise NotHyperSeriesError(self, self.x0)
1903
+
1904
+ a = r.listofpoly[0]
1905
+ b = r.listofpoly[-1]
1906
+
1907
+ # the constant multiple of argument of hypergeometric function
1908
+ if isinstance(a.LC(), (PolyElement, FracElement)):
1909
+ c = - (S(a.LC().as_expr()) * m**(a.degree())) / (S(b.LC().as_expr()) * m**(b.degree()))
1910
+ else:
1911
+ c = - (S(a.LC()) * m**(a.degree())) / (S(b.LC()) * m**(b.degree()))
1912
+
1913
+ sol = 0
1914
+
1915
+ arg1 = roots(r.parent.base.to_sympy(a), recurrence.n)
1916
+ arg2 = roots(r.parent.base.to_sympy(b), recurrence.n)
1917
+
1918
+ # iterate through the initial conditions to find
1919
+ # the hypergeometric representation of the given
1920
+ # function.
1921
+ # The answer will be a linear combination
1922
+ # of different hypergeometric series which satisfies
1923
+ # the recurrence.
1924
+ if as_list:
1925
+ listofsol = []
1926
+ for i in range(smallest_n + m):
1927
+
1928
+ # if the recurrence relation doesn't hold for `n = i`,
1929
+ # then a Hypergeometric representation doesn't exist.
1930
+ # add the algebraic term a * x**i to the solution,
1931
+ # where a is u0[i]
1932
+ if i < smallest_n:
1933
+ if as_list:
1934
+ listofsol.append(((S(u0[i]) * x**(i+constantpower)).subs(x, x-x0), ))
1935
+ else:
1936
+ sol += S(u0[i]) * x**i
1937
+ continue
1938
+
1939
+ # if the coefficient u0[i] is zero, then the
1940
+ # independent hypergeomtric series starting with
1941
+ # x**i is not a part of the answer.
1942
+ if S(u0[i]) == 0:
1943
+ continue
1944
+
1945
+ ap = []
1946
+ bq = []
1947
+
1948
+ # substitute m * n + i for n
1949
+ for k in ordered(arg1.keys()):
1950
+ ap.extend([nsimplify((i - k) / m)] * arg1[k])
1951
+
1952
+ for k in ordered(arg2.keys()):
1953
+ bq.extend([nsimplify((i - k) / m)] * arg2[k])
1954
+
1955
+ # convention of (k + 1) in the denominator
1956
+ if 1 in bq:
1957
+ bq.remove(1)
1958
+ else:
1959
+ ap.append(1)
1960
+ if as_list:
1961
+ listofsol.append(((S(u0[i])*x**(i+constantpower)).subs(x, x-x0), (hyper(ap, bq, c*x**m)).subs(x, x-x0)))
1962
+ else:
1963
+ sol += S(u0[i]) * hyper(ap, bq, c * x**m) * x**i
1964
+ if as_list:
1965
+ return listofsol
1966
+ sol = sol * x**constantpower
1967
+ if x0 != 0:
1968
+ return sol.subs(x, x - x0)
1969
+
1970
+ return sol
1971
+
1972
+ def to_expr(self):
1973
+ """
1974
+ Converts a Holonomic Function back to elementary functions.
1975
+
1976
+ Examples
1977
+ ========
1978
+
1979
+ >>> from sympy.holonomic.holonomic import HolonomicFunction, DifferentialOperators
1980
+ >>> from sympy import ZZ
1981
+ >>> from sympy import symbols, S
1982
+ >>> x = symbols('x')
1983
+ >>> R, Dx = DifferentialOperators(ZZ.old_poly_ring(x),'Dx')
1984
+ >>> HolonomicFunction(x**2*Dx**2 + x*Dx + (x**2 - 1), x, 0, [0, S(1)/2]).to_expr()
1985
+ besselj(1, x)
1986
+ >>> HolonomicFunction((1 + x)*Dx**3 + Dx**2, x, 0, [1, 1, 1]).to_expr()
1987
+ x*log(x + 1) + log(x + 1) + 1
1988
+
1989
+ """
1990
+
1991
+ return hyperexpand(self.to_hyper()).simplify()
1992
+
1993
+ def change_ics(self, b, lenics=None):
1994
+ """
1995
+ Changes the point `x0` to ``b`` for initial conditions.
1996
+
1997
+ Examples
1998
+ ========
1999
+
2000
+ >>> from sympy.holonomic import expr_to_holonomic
2001
+ >>> from sympy import symbols, sin, exp
2002
+ >>> x = symbols('x')
2003
+
2004
+ >>> expr_to_holonomic(sin(x)).change_ics(1)
2005
+ HolonomicFunction((1) + (1)*Dx**2, x, 1, [sin(1), cos(1)])
2006
+
2007
+ >>> expr_to_holonomic(exp(x)).change_ics(2)
2008
+ HolonomicFunction((-1) + (1)*Dx, x, 2, [exp(2)])
2009
+ """
2010
+
2011
+ symbolic = True
2012
+
2013
+ if lenics is None and len(self.y0) > self.annihilator.order:
2014
+ lenics = len(self.y0)
2015
+ dom = self.annihilator.parent.base.domain
2016
+
2017
+ try:
2018
+ sol = expr_to_holonomic(self.to_expr(), x=self.x, x0=b, lenics=lenics, domain=dom)
2019
+ except (NotPowerSeriesError, NotHyperSeriesError):
2020
+ symbolic = False
2021
+
2022
+ if symbolic and sol.x0 == b:
2023
+ return sol
2024
+
2025
+ y0 = self.evalf(b, derivatives=True)
2026
+ return HolonomicFunction(self.annihilator, self.x, b, y0)
2027
+
2028
+ def to_meijerg(self):
2029
+ """
2030
+ Returns a linear combination of Meijer G-functions.
2031
+
2032
+ Examples
2033
+ ========
2034
+
2035
+ >>> from sympy.holonomic import expr_to_holonomic
2036
+ >>> from sympy import sin, cos, hyperexpand, log, symbols
2037
+ >>> x = symbols('x')
2038
+ >>> hyperexpand(expr_to_holonomic(cos(x) + sin(x)).to_meijerg())
2039
+ sin(x) + cos(x)
2040
+ >>> hyperexpand(expr_to_holonomic(log(x)).to_meijerg()).simplify()
2041
+ log(x)
2042
+
2043
+ See Also
2044
+ ========
2045
+
2046
+ to_hyper
2047
+ """
2048
+
2049
+ # convert to hypergeometric first
2050
+ rep = self.to_hyper(as_list=True)
2051
+ sol = S.Zero
2052
+
2053
+ for i in rep:
2054
+ if len(i) == 1:
2055
+ sol += i[0]
2056
+
2057
+ elif len(i) == 2:
2058
+ sol += i[0] * _hyper_to_meijerg(i[1])
2059
+
2060
+ return sol
2061
+
2062
+
2063
+ def from_hyper(func, x0=0, evalf=False):
2064
+ r"""
2065
+ Converts a hypergeometric function to holonomic.
2066
+ ``func`` is the Hypergeometric Function and ``x0`` is the point at
2067
+ which initial conditions are required.
2068
+
2069
+ Examples
2070
+ ========
2071
+
2072
+ >>> from sympy.holonomic.holonomic import from_hyper
2073
+ >>> from sympy import symbols, hyper, S
2074
+ >>> x = symbols('x')
2075
+ >>> from_hyper(hyper([], [S(3)/2], x**2/4))
2076
+ HolonomicFunction((-x) + (2)*Dx + (x)*Dx**2, x, 1, [sinh(1), -sinh(1) + cosh(1)])
2077
+ """
2078
+
2079
+ a = func.ap
2080
+ b = func.bq
2081
+ z = func.args[2]
2082
+ x = z.atoms(Symbol).pop()
2083
+ R, Dx = DifferentialOperators(QQ.old_poly_ring(x), 'Dx')
2084
+
2085
+ # generalized hypergeometric differential equation
2086
+ xDx = x*Dx
2087
+ r1 = 1
2088
+ for ai in a: # XXX gives sympify error if Mul is used with list of all factors
2089
+ r1 *= xDx + ai
2090
+ xDx_1 = xDx - 1
2091
+ # r2 = Mul(*([Dx] + [xDx_1 + bi for bi in b])) # XXX gives sympify error
2092
+ r2 = Dx
2093
+ for bi in b:
2094
+ r2 *= xDx_1 + bi
2095
+ sol = r1 - r2
2096
+
2097
+ simp = hyperexpand(func)
2098
+
2099
+ if simp in (Infinity, NegativeInfinity):
2100
+ return HolonomicFunction(sol, x).composition(z)
2101
+
2102
+ def _find_conditions(simp, x, x0, order, evalf=False):
2103
+ y0 = []
2104
+ for i in range(order):
2105
+ if evalf:
2106
+ val = simp.subs(x, x0).evalf()
2107
+ else:
2108
+ val = simp.subs(x, x0)
2109
+ # return None if it is Infinite or NaN
2110
+ if val.is_finite is False or isinstance(val, NaN):
2111
+ return None
2112
+ y0.append(val)
2113
+ simp = simp.diff(x)
2114
+ return y0
2115
+
2116
+ # if the function is known symbolically
2117
+ if not isinstance(simp, hyper):
2118
+ y0 = _find_conditions(simp, x, x0, sol.order)
2119
+ while not y0:
2120
+ # if values don't exist at 0, then try to find initial
2121
+ # conditions at 1. If it doesn't exist at 1 too then
2122
+ # try 2 and so on.
2123
+ x0 += 1
2124
+ y0 = _find_conditions(simp, x, x0, sol.order)
2125
+
2126
+ return HolonomicFunction(sol, x).composition(z, x0, y0)
2127
+
2128
+ if isinstance(simp, hyper):
2129
+ x0 = 1
2130
+ # use evalf if the function can't be simplified
2131
+ y0 = _find_conditions(simp, x, x0, sol.order, evalf)
2132
+ while not y0:
2133
+ x0 += 1
2134
+ y0 = _find_conditions(simp, x, x0, sol.order, evalf)
2135
+ return HolonomicFunction(sol, x).composition(z, x0, y0)
2136
+
2137
+ return HolonomicFunction(sol, x).composition(z)
2138
+
2139
+
2140
+ def from_meijerg(func, x0=0, evalf=False, initcond=True, domain=QQ):
2141
+ """
2142
+ Converts a Meijer G-function to Holonomic.
2143
+ ``func`` is the G-Function and ``x0`` is the point at
2144
+ which initial conditions are required.
2145
+
2146
+ Examples
2147
+ ========
2148
+
2149
+ >>> from sympy.holonomic.holonomic import from_meijerg
2150
+ >>> from sympy import symbols, meijerg, S
2151
+ >>> x = symbols('x')
2152
+ >>> from_meijerg(meijerg(([], []), ([S(1)/2], [0]), x**2/4))
2153
+ HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1/sqrt(pi)])
2154
+ """
2155
+
2156
+ a = func.ap
2157
+ b = func.bq
2158
+ n = len(func.an)
2159
+ m = len(func.bm)
2160
+ p = len(a)
2161
+ z = func.args[2]
2162
+ x = z.atoms(Symbol).pop()
2163
+ R, Dx = DifferentialOperators(domain.old_poly_ring(x), 'Dx')
2164
+
2165
+ # compute the differential equation satisfied by the
2166
+ # Meijer G-function.
2167
+ xDx = x*Dx
2168
+ xDx1 = xDx + 1
2169
+ r1 = x*(-1)**(m + n - p)
2170
+ for ai in a: # XXX gives sympify error if args given in list
2171
+ r1 *= xDx1 - ai
2172
+ # r2 = Mul(*[xDx - bi for bi in b]) # gives sympify error
2173
+ r2 = 1
2174
+ for bi in b:
2175
+ r2 *= xDx - bi
2176
+ sol = r1 - r2
2177
+
2178
+ if not initcond:
2179
+ return HolonomicFunction(sol, x).composition(z)
2180
+
2181
+ simp = hyperexpand(func)
2182
+
2183
+ if simp in (Infinity, NegativeInfinity):
2184
+ return HolonomicFunction(sol, x).composition(z)
2185
+
2186
+ def _find_conditions(simp, x, x0, order, evalf=False):
2187
+ y0 = []
2188
+ for i in range(order):
2189
+ if evalf:
2190
+ val = simp.subs(x, x0).evalf()
2191
+ else:
2192
+ val = simp.subs(x, x0)
2193
+ if val.is_finite is False or isinstance(val, NaN):
2194
+ return None
2195
+ y0.append(val)
2196
+ simp = simp.diff(x)
2197
+ return y0
2198
+
2199
+ # computing initial conditions
2200
+ if not isinstance(simp, meijerg):
2201
+ y0 = _find_conditions(simp, x, x0, sol.order)
2202
+ while not y0:
2203
+ x0 += 1
2204
+ y0 = _find_conditions(simp, x, x0, sol.order)
2205
+
2206
+ return HolonomicFunction(sol, x).composition(z, x0, y0)
2207
+
2208
+ if isinstance(simp, meijerg):
2209
+ x0 = 1
2210
+ y0 = _find_conditions(simp, x, x0, sol.order, evalf)
2211
+ while not y0:
2212
+ x0 += 1
2213
+ y0 = _find_conditions(simp, x, x0, sol.order, evalf)
2214
+
2215
+ return HolonomicFunction(sol, x).composition(z, x0, y0)
2216
+
2217
+ return HolonomicFunction(sol, x).composition(z)
2218
+
2219
+
2220
+ x_1 = Dummy('x_1')
2221
+ _lookup_table = None
2222
+ domain_for_table = None
2223
+ from sympy.integrals.meijerint import _mytype
2224
+
2225
+
2226
+ def expr_to_holonomic(func, x=None, x0=0, y0=None, lenics=None, domain=None, initcond=True):
2227
+ """
2228
+ Converts a function or an expression to a holonomic function.
2229
+
2230
+ Parameters
2231
+ ==========
2232
+
2233
+ func:
2234
+ The expression to be converted.
2235
+ x:
2236
+ variable for the function.
2237
+ x0:
2238
+ point at which initial condition must be computed.
2239
+ y0:
2240
+ One can optionally provide initial condition if the method
2241
+ is not able to do it automatically.
2242
+ lenics:
2243
+ Number of terms in the initial condition. By default it is
2244
+ equal to the order of the annihilator.
2245
+ domain:
2246
+ Ground domain for the polynomials in ``x`` appearing as coefficients
2247
+ in the annihilator.
2248
+ initcond:
2249
+ Set it false if you do not want the initial conditions to be computed.
2250
+
2251
+ Examples
2252
+ ========
2253
+
2254
+ >>> from sympy.holonomic.holonomic import expr_to_holonomic
2255
+ >>> from sympy import sin, exp, symbols
2256
+ >>> x = symbols('x')
2257
+ >>> expr_to_holonomic(sin(x))
2258
+ HolonomicFunction((1) + (1)*Dx**2, x, 0, [0, 1])
2259
+ >>> expr_to_holonomic(exp(x))
2260
+ HolonomicFunction((-1) + (1)*Dx, x, 0, [1])
2261
+
2262
+ See Also
2263
+ ========
2264
+
2265
+ sympy.integrals.meijerint._rewrite1, _convert_poly_rat_alg, _create_table
2266
+ """
2267
+ func = sympify(func)
2268
+ syms = func.free_symbols
2269
+
2270
+ if not x:
2271
+ if len(syms) == 1:
2272
+ x= syms.pop()
2273
+ else:
2274
+ raise ValueError("Specify the variable for the function")
2275
+ elif x in syms:
2276
+ syms.remove(x)
2277
+
2278
+ extra_syms = list(syms)
2279
+
2280
+ if domain is None:
2281
+ if func.has(Float):
2282
+ domain = RR
2283
+ else:
2284
+ domain = QQ
2285
+ if len(extra_syms) != 0:
2286
+ domain = domain[extra_syms].get_field()
2287
+
2288
+ # try to convert if the function is polynomial or rational
2289
+ solpoly = _convert_poly_rat_alg(func, x, x0=x0, y0=y0, lenics=lenics, domain=domain, initcond=initcond)
2290
+ if solpoly:
2291
+ return solpoly
2292
+
2293
+ # create the lookup table
2294
+ global _lookup_table, domain_for_table
2295
+ if not _lookup_table:
2296
+ domain_for_table = domain
2297
+ _lookup_table = {}
2298
+ _create_table(_lookup_table, domain=domain)
2299
+ elif domain != domain_for_table:
2300
+ domain_for_table = domain
2301
+ _lookup_table = {}
2302
+ _create_table(_lookup_table, domain=domain)
2303
+
2304
+ # use the table directly to convert to Holonomic
2305
+ if func.is_Function:
2306
+ f = func.subs(x, x_1)
2307
+ t = _mytype(f, x_1)
2308
+ if t in _lookup_table:
2309
+ l = _lookup_table[t]
2310
+ sol = l[0][1].change_x(x)
2311
+ else:
2312
+ sol = _convert_meijerint(func, x, initcond=False, domain=domain)
2313
+ if not sol:
2314
+ raise NotImplementedError
2315
+ if y0:
2316
+ sol.y0 = y0
2317
+ if y0 or not initcond:
2318
+ sol.x0 = x0
2319
+ return sol
2320
+ if not lenics:
2321
+ lenics = sol.annihilator.order
2322
+ _y0 = _find_conditions(func, x, x0, lenics)
2323
+ while not _y0:
2324
+ x0 += 1
2325
+ _y0 = _find_conditions(func, x, x0, lenics)
2326
+ return HolonomicFunction(sol.annihilator, x, x0, _y0)
2327
+
2328
+ if y0 or not initcond:
2329
+ sol = sol.composition(func.args[0])
2330
+ if y0:
2331
+ sol.y0 = y0
2332
+ sol.x0 = x0
2333
+ return sol
2334
+ if not lenics:
2335
+ lenics = sol.annihilator.order
2336
+
2337
+ _y0 = _find_conditions(func, x, x0, lenics)
2338
+ while not _y0:
2339
+ x0 += 1
2340
+ _y0 = _find_conditions(func, x, x0, lenics)
2341
+ return sol.composition(func.args[0], x0, _y0)
2342
+
2343
+ # iterate through the expression recursively
2344
+ args = func.args
2345
+ f = func.func
2346
+ sol = expr_to_holonomic(args[0], x=x, initcond=False, domain=domain)
2347
+
2348
+ if f is Add:
2349
+ for i in range(1, len(args)):
2350
+ sol += expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
2351
+
2352
+ elif f is Mul:
2353
+ for i in range(1, len(args)):
2354
+ sol *= expr_to_holonomic(args[i], x=x, initcond=False, domain=domain)
2355
+
2356
+ elif f is Pow:
2357
+ sol = sol**args[1]
2358
+ sol.x0 = x0
2359
+ if not sol:
2360
+ raise NotImplementedError
2361
+ if y0:
2362
+ sol.y0 = y0
2363
+ if y0 or not initcond:
2364
+ return sol
2365
+ if sol.y0:
2366
+ return sol
2367
+ if not lenics:
2368
+ lenics = sol.annihilator.order
2369
+ if sol.annihilator.is_singular(x0):
2370
+ r = sol._indicial()
2371
+ l = list(r)
2372
+ if len(r) == 1 and r[l[0]] == S.One:
2373
+ r = l[0]
2374
+ g = func / (x - x0)**r
2375
+ singular_ics = _find_conditions(g, x, x0, lenics)
2376
+ singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
2377
+ y0 = {r:singular_ics}
2378
+ return HolonomicFunction(sol.annihilator, x, x0, y0)
2379
+
2380
+ _y0 = _find_conditions(func, x, x0, lenics)
2381
+ while not _y0:
2382
+ x0 += 1
2383
+ _y0 = _find_conditions(func, x, x0, lenics)
2384
+
2385
+ return HolonomicFunction(sol.annihilator, x, x0, _y0)
2386
+
2387
+
2388
+ ## Some helper functions ##
2389
+
2390
+ def _normalize(list_of, parent, negative=True):
2391
+ """
2392
+ Normalize a given annihilator
2393
+ """
2394
+
2395
+ num = []
2396
+ denom = []
2397
+ base = parent.base
2398
+ K = base.get_field()
2399
+ lcm_denom = base.from_sympy(S.One)
2400
+ list_of_coeff = []
2401
+
2402
+ # convert polynomials to the elements of associated
2403
+ # fraction field
2404
+ for i, j in enumerate(list_of):
2405
+ if isinstance(j, base.dtype):
2406
+ list_of_coeff.append(K.new(j.to_list()))
2407
+ elif not isinstance(j, K.dtype):
2408
+ list_of_coeff.append(K.from_sympy(sympify(j)))
2409
+ else:
2410
+ list_of_coeff.append(j)
2411
+
2412
+ # corresponding numerators of the sequence of polynomials
2413
+ num.append(list_of_coeff[i].numer())
2414
+
2415
+ # corresponding denominators
2416
+ denom.append(list_of_coeff[i].denom())
2417
+
2418
+ # lcm of denominators in the coefficients
2419
+ for i in denom:
2420
+ lcm_denom = i.lcm(lcm_denom)
2421
+
2422
+ if negative:
2423
+ lcm_denom = -lcm_denom
2424
+
2425
+ lcm_denom = K.new(lcm_denom.to_list())
2426
+
2427
+ # multiply the coefficients with lcm
2428
+ for i, j in enumerate(list_of_coeff):
2429
+ list_of_coeff[i] = j * lcm_denom
2430
+
2431
+ gcd_numer = base((list_of_coeff[-1].numer() / list_of_coeff[-1].denom()).to_list())
2432
+
2433
+ # gcd of numerators in the coefficients
2434
+ for i in num:
2435
+ gcd_numer = i.gcd(gcd_numer)
2436
+
2437
+ gcd_numer = K.new(gcd_numer.to_list())
2438
+
2439
+ # divide all the coefficients by the gcd
2440
+ for i, j in enumerate(list_of_coeff):
2441
+ frac_ans = j / gcd_numer
2442
+ list_of_coeff[i] = base((frac_ans.numer() / frac_ans.denom()).to_list())
2443
+
2444
+ return DifferentialOperator(list_of_coeff, parent)
2445
+
2446
+
2447
+ def _derivate_diff_eq(listofpoly, K):
2448
+ """
2449
+ Let a differential equation a0(x)y(x) + a1(x)y'(x) + ... = 0
2450
+ where a0, a1,... are polynomials or rational functions. The function
2451
+ returns b0, b1, b2... such that the differential equation
2452
+ b0(x)y(x) + b1(x)y'(x) +... = 0 is formed after differentiating the
2453
+ former equation.
2454
+ """
2455
+
2456
+ sol = []
2457
+ a = len(listofpoly) - 1
2458
+ sol.append(DMFdiff(listofpoly[0], K))
2459
+
2460
+ for i, j in enumerate(listofpoly[1:]):
2461
+ sol.append(DMFdiff(j, K) + listofpoly[i])
2462
+
2463
+ sol.append(listofpoly[a])
2464
+ return sol
2465
+
2466
+
2467
+ def _hyper_to_meijerg(func):
2468
+ """
2469
+ Converts a `hyper` to meijerg.
2470
+ """
2471
+ ap = func.ap
2472
+ bq = func.bq
2473
+
2474
+ if any(i <= 0 and int(i) == i for i in ap):
2475
+ return hyperexpand(func)
2476
+
2477
+ z = func.args[2]
2478
+
2479
+ # parameters of the `meijerg` function.
2480
+ an = (1 - i for i in ap)
2481
+ anp = ()
2482
+ bm = (S.Zero, )
2483
+ bmq = (1 - i for i in bq)
2484
+
2485
+ k = S.One
2486
+
2487
+ for i in bq:
2488
+ k = k * gamma(i)
2489
+
2490
+ for i in ap:
2491
+ k = k / gamma(i)
2492
+
2493
+ return k * meijerg(an, anp, bm, bmq, -z)
2494
+
2495
+
2496
+ def _add_lists(list1, list2):
2497
+ """Takes polynomial sequences of two annihilators a and b and returns
2498
+ the list of polynomials of sum of a and b.
2499
+ """
2500
+ if len(list1) <= len(list2):
2501
+ sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
2502
+ else:
2503
+ sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
2504
+ return sol
2505
+
2506
+
2507
+ def _extend_y0(Holonomic, n):
2508
+ """
2509
+ Tries to find more initial conditions by substituting the initial
2510
+ value point in the differential equation.
2511
+ """
2512
+
2513
+ if Holonomic.annihilator.is_singular(Holonomic.x0) or Holonomic.is_singularics() == True:
2514
+ return Holonomic.y0
2515
+
2516
+ annihilator = Holonomic.annihilator
2517
+ a = annihilator.order
2518
+
2519
+ listofpoly = []
2520
+
2521
+ y0 = Holonomic.y0
2522
+ R = annihilator.parent.base
2523
+ K = R.get_field()
2524
+
2525
+ for j in annihilator.listofpoly:
2526
+ if isinstance(j, annihilator.parent.base.dtype):
2527
+ listofpoly.append(K.new(j.to_list()))
2528
+
2529
+ if len(y0) < a or n <= len(y0):
2530
+ return y0
2531
+ list_red = [-listofpoly[i] / listofpoly[a]
2532
+ for i in range(a)]
2533
+ y1 = y0[:min(len(y0), a)]
2534
+ for _ in range(n - a):
2535
+ sol = 0
2536
+ for a, b in zip(y1, list_red):
2537
+ r = DMFsubs(b, Holonomic.x0)
2538
+ if not getattr(r, 'is_finite', True):
2539
+ return y0
2540
+ if isinstance(r, (PolyElement, FracElement)):
2541
+ r = r.as_expr()
2542
+ sol += a * r
2543
+ y1.append(sol)
2544
+ list_red = _derivate_diff_eq(list_red, K)
2545
+ return y0 + y1[len(y0):]
2546
+
2547
+
2548
+ def DMFdiff(frac, K):
2549
+ # differentiate a DMF object represented as p/q
2550
+ if not isinstance(frac, DMF):
2551
+ return frac.diff()
2552
+
2553
+ p = K.numer(frac)
2554
+ q = K.denom(frac)
2555
+ sol_num = - p * q.diff() + q * p.diff()
2556
+ sol_denom = q**2
2557
+ return K((sol_num.to_list(), sol_denom.to_list()))
2558
+
2559
+
2560
+ def DMFsubs(frac, x0, mpm=False):
2561
+ # substitute the point x0 in DMF object of the form p/q
2562
+ if not isinstance(frac, DMF):
2563
+ return frac
2564
+
2565
+ p = frac.num
2566
+ q = frac.den
2567
+ sol_p = S.Zero
2568
+ sol_q = S.Zero
2569
+
2570
+ if mpm:
2571
+ from mpmath import mp
2572
+
2573
+ for i, j in enumerate(reversed(p)):
2574
+ if mpm:
2575
+ j = sympify(j)._to_mpmath(mp.prec)
2576
+ sol_p += j * x0**i
2577
+
2578
+ for i, j in enumerate(reversed(q)):
2579
+ if mpm:
2580
+ j = sympify(j)._to_mpmath(mp.prec)
2581
+ sol_q += j * x0**i
2582
+
2583
+ if isinstance(sol_p, (PolyElement, FracElement)):
2584
+ sol_p = sol_p.as_expr()
2585
+ if isinstance(sol_q, (PolyElement, FracElement)):
2586
+ sol_q = sol_q.as_expr()
2587
+
2588
+ return sol_p / sol_q
2589
+
2590
+
2591
+ def _convert_poly_rat_alg(func, x, x0=0, y0=None, lenics=None, domain=QQ, initcond=True):
2592
+ """
2593
+ Converts polynomials, rationals and algebraic functions to holonomic.
2594
+ """
2595
+
2596
+ ispoly = func.is_polynomial()
2597
+ if not ispoly:
2598
+ israt = func.is_rational_function()
2599
+ else:
2600
+ israt = True
2601
+
2602
+ if not (ispoly or israt):
2603
+ basepoly, ratexp = func.as_base_exp()
2604
+ if basepoly.is_polynomial() and ratexp.is_Number:
2605
+ if isinstance(ratexp, Float):
2606
+ ratexp = nsimplify(ratexp)
2607
+ m, n = ratexp.p, ratexp.q
2608
+ is_alg = True
2609
+ else:
2610
+ is_alg = False
2611
+ else:
2612
+ is_alg = True
2613
+
2614
+ if not (ispoly or israt or is_alg):
2615
+ return None
2616
+
2617
+ R = domain.old_poly_ring(x)
2618
+ _, Dx = DifferentialOperators(R, 'Dx')
2619
+
2620
+ # if the function is constant
2621
+ if not func.has(x):
2622
+ return HolonomicFunction(Dx, x, 0, [func])
2623
+
2624
+ if ispoly:
2625
+ # differential equation satisfied by polynomial
2626
+ sol = func * Dx - func.diff(x)
2627
+ sol = _normalize(sol.listofpoly, sol.parent, negative=False)
2628
+ is_singular = sol.is_singular(x0)
2629
+
2630
+ # try to compute the conditions for singular points
2631
+ if y0 is None and x0 == 0 and is_singular:
2632
+ rep = R.from_sympy(func).to_list()
2633
+ for i, j in enumerate(reversed(rep)):
2634
+ if j == 0:
2635
+ continue
2636
+ coeff = list(reversed(rep))[i:]
2637
+ indicial = i
2638
+ break
2639
+ for i, j in enumerate(coeff):
2640
+ if isinstance(j, (PolyElement, FracElement)):
2641
+ coeff[i] = j.as_expr()
2642
+ y0 = {indicial: S(coeff)}
2643
+
2644
+ elif israt:
2645
+ p, q = func.as_numer_denom()
2646
+ # differential equation satisfied by rational
2647
+ sol = p * q * Dx + p * q.diff(x) - q * p.diff(x)
2648
+ sol = _normalize(sol.listofpoly, sol.parent, negative=False)
2649
+
2650
+ elif is_alg:
2651
+ sol = n * (x / m) * Dx - 1
2652
+ sol = HolonomicFunction(sol, x).composition(basepoly).annihilator
2653
+ is_singular = sol.is_singular(x0)
2654
+
2655
+ # try to compute the conditions for singular points
2656
+ if y0 is None and x0 == 0 and is_singular and \
2657
+ (lenics is None or lenics <= 1):
2658
+ rep = R.from_sympy(basepoly).to_list()
2659
+ for i, j in enumerate(reversed(rep)):
2660
+ if j == 0:
2661
+ continue
2662
+ if isinstance(j, (PolyElement, FracElement)):
2663
+ j = j.as_expr()
2664
+
2665
+ coeff = S(j)**ratexp
2666
+ indicial = S(i) * ratexp
2667
+ break
2668
+ if isinstance(coeff, (PolyElement, FracElement)):
2669
+ coeff = coeff.as_expr()
2670
+ y0 = {indicial: S([coeff])}
2671
+
2672
+ if y0 or not initcond:
2673
+ return HolonomicFunction(sol, x, x0, y0)
2674
+
2675
+ if not lenics:
2676
+ lenics = sol.order
2677
+
2678
+ if sol.is_singular(x0):
2679
+ r = HolonomicFunction(sol, x, x0)._indicial()
2680
+ l = list(r)
2681
+ if len(r) == 1 and r[l[0]] == S.One:
2682
+ r = l[0]
2683
+ g = func / (x - x0)**r
2684
+ singular_ics = _find_conditions(g, x, x0, lenics)
2685
+ singular_ics = [j / factorial(i) for i, j in enumerate(singular_ics)]
2686
+ y0 = {r:singular_ics}
2687
+ return HolonomicFunction(sol, x, x0, y0)
2688
+
2689
+ y0 = _find_conditions(func, x, x0, lenics)
2690
+ while not y0:
2691
+ x0 += 1
2692
+ y0 = _find_conditions(func, x, x0, lenics)
2693
+
2694
+ return HolonomicFunction(sol, x, x0, y0)
2695
+
2696
+
2697
+ def _convert_meijerint(func, x, initcond=True, domain=QQ):
2698
+ args = meijerint._rewrite1(func, x)
2699
+
2700
+ if args:
2701
+ fac, po, g, _ = args
2702
+ else:
2703
+ return None
2704
+
2705
+ # lists for sum of meijerg functions
2706
+ fac_list = [fac * i[0] for i in g]
2707
+ t = po.as_base_exp()
2708
+ s = t[1] if t[0] == x else S.Zero
2709
+ po_list = [s + i[1] for i in g]
2710
+ G_list = [i[2] for i in g]
2711
+
2712
+ # finds meijerg representation of x**s * meijerg(a1 ... ap, b1 ... bq, z)
2713
+ def _shift(func, s):
2714
+ z = func.args[-1]
2715
+ if z.has(I):
2716
+ z = z.subs(exp_polar, exp)
2717
+
2718
+ d = z.collect(x, evaluate=False)
2719
+ b = list(d)[0]
2720
+ a = d[b]
2721
+
2722
+ t = b.as_base_exp()
2723
+ b = t[1] if t[0] == x else S.Zero
2724
+ r = s / b
2725
+ an = (i + r for i in func.args[0][0])
2726
+ ap = (i + r for i in func.args[0][1])
2727
+ bm = (i + r for i in func.args[1][0])
2728
+ bq = (i + r for i in func.args[1][1])
2729
+
2730
+ return a**-r, meijerg((an, ap), (bm, bq), z)
2731
+
2732
+ coeff, m = _shift(G_list[0], po_list[0])
2733
+ sol = fac_list[0] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
2734
+
2735
+ # add all the meijerg functions after converting to holonomic
2736
+ for i in range(1, len(G_list)):
2737
+ coeff, m = _shift(G_list[i], po_list[i])
2738
+ sol += fac_list[i] * coeff * from_meijerg(m, initcond=initcond, domain=domain)
2739
+
2740
+ return sol
2741
+
2742
+
2743
+ def _create_table(table, domain=QQ):
2744
+ """
2745
+ Creates the look-up table. For a similar implementation
2746
+ see meijerint._create_lookup_table.
2747
+ """
2748
+
2749
+ def add(formula, annihilator, arg, x0=0, y0=()):
2750
+ """
2751
+ Adds a formula in the dictionary
2752
+ """
2753
+ table.setdefault(_mytype(formula, x_1), []).append((formula,
2754
+ HolonomicFunction(annihilator, arg, x0, y0)))
2755
+
2756
+ R = domain.old_poly_ring(x_1)
2757
+ _, Dx = DifferentialOperators(R, 'Dx')
2758
+
2759
+ # add some basic functions
2760
+ add(sin(x_1), Dx**2 + 1, x_1, 0, [0, 1])
2761
+ add(cos(x_1), Dx**2 + 1, x_1, 0, [1, 0])
2762
+ add(exp(x_1), Dx - 1, x_1, 0, 1)
2763
+ add(log(x_1), Dx + x_1*Dx**2, x_1, 1, [0, 1])
2764
+
2765
+ add(erf(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
2766
+ add(erfc(x_1), 2*x_1*Dx + Dx**2, x_1, 0, [1, -2/sqrt(pi)])
2767
+ add(erfi(x_1), -2*x_1*Dx + Dx**2, x_1, 0, [0, 2/sqrt(pi)])
2768
+
2769
+ add(sinh(x_1), Dx**2 - 1, x_1, 0, [0, 1])
2770
+ add(cosh(x_1), Dx**2 - 1, x_1, 0, [1, 0])
2771
+
2772
+ add(sinc(x_1), x_1 + 2*Dx + x_1*Dx**2, x_1)
2773
+
2774
+ add(Si(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
2775
+ add(Ci(x_1), x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
2776
+
2777
+ add(Shi(x_1), -x_1*Dx + 2*Dx**2 + x_1*Dx**3, x_1)
2778
+
2779
+
2780
+ def _find_conditions(func, x, x0, order):
2781
+ y0 = []
2782
+ for i in range(order):
2783
+ val = func.subs(x, x0)
2784
+ if isinstance(val, NaN):
2785
+ val = limit(func, x, x0)
2786
+ if val.is_finite is False or isinstance(val, NaN):
2787
+ return None
2788
+ y0.append(val)
2789
+ func = func.diff(x)
2790
+ return y0
parrot/lib/python3.10/site-packages/sympy/holonomic/holonomicerrors.py ADDED
@@ -0,0 +1,49 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Common Exceptions for `holonomic` module. """
2
+
3
+ class BaseHolonomicError(Exception):
4
+
5
+ def new(self, *args):
6
+ raise NotImplementedError("abstract base class")
7
+
8
+ class NotPowerSeriesError(BaseHolonomicError):
9
+
10
+ def __init__(self, holonomic, x0):
11
+ self.holonomic = holonomic
12
+ self.x0 = x0
13
+
14
+ def __str__(self):
15
+ s = 'A Power Series does not exists for '
16
+ s += str(self.holonomic)
17
+ s += ' about %s.' %self.x0
18
+ return s
19
+
20
+ class NotHolonomicError(BaseHolonomicError):
21
+
22
+ def __init__(self, m):
23
+ self.m = m
24
+
25
+ def __str__(self):
26
+ return self.m
27
+
28
+ class SingularityError(BaseHolonomicError):
29
+
30
+ def __init__(self, holonomic, x0):
31
+ self.holonomic = holonomic
32
+ self.x0 = x0
33
+
34
+ def __str__(self):
35
+ s = str(self.holonomic)
36
+ s += ' has a singularity at %s.' %self.x0
37
+ return s
38
+
39
+ class NotHyperSeriesError(BaseHolonomicError):
40
+
41
+ def __init__(self, holonomic, x0):
42
+ self.holonomic = holonomic
43
+ self.x0 = x0
44
+
45
+ def __str__(self):
46
+ s = 'Power series expansion of '
47
+ s += str(self.holonomic)
48
+ s += ' about %s is not hypergeometric' %self.x0
49
+ return s
parrot/lib/python3.10/site-packages/sympy/holonomic/numerical.py ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Numerical Methods for Holonomic Functions"""
2
+
3
+ from sympy.core.sympify import sympify
4
+ from sympy.holonomic.holonomic import DMFsubs
5
+
6
+ from mpmath import mp
7
+
8
+
9
+ def _evalf(func, points, derivatives=False, method='RK4'):
10
+ """
11
+ Numerical methods for numerical integration along a given set of
12
+ points in the complex plane.
13
+ """
14
+
15
+ ann = func.annihilator
16
+ a = ann.order
17
+ R = ann.parent.base
18
+ K = R.get_field()
19
+
20
+ if method == 'Euler':
21
+ meth = _euler
22
+ else:
23
+ meth = _rk4
24
+
25
+ dmf = []
26
+ for j in ann.listofpoly:
27
+ dmf.append(K.new(j.to_list()))
28
+
29
+ red = [-dmf[i] / dmf[a] for i in range(a)]
30
+
31
+ y0 = func.y0
32
+ if len(y0) < a:
33
+ raise TypeError("Not Enough Initial Conditions")
34
+ x0 = func.x0
35
+ sol = [meth(red, x0, points[0], y0, a)]
36
+
37
+ for i, j in enumerate(points[1:]):
38
+ sol.append(meth(red, points[i], j, sol[-1], a))
39
+
40
+ if not derivatives:
41
+ return [sympify(i[0]) for i in sol]
42
+ else:
43
+ return sympify(sol)
44
+
45
+
46
+ def _euler(red, x0, x1, y0, a):
47
+ """
48
+ Euler's method for numerical integration.
49
+ From x0 to x1 with initial values given at x0 as vector y0.
50
+ """
51
+
52
+ A = sympify(x0)._to_mpmath(mp.prec)
53
+ B = sympify(x1)._to_mpmath(mp.prec)
54
+ y_0 = [sympify(i)._to_mpmath(mp.prec) for i in y0]
55
+ h = B - A
56
+ f_0 = y_0[1:]
57
+ f_0_n = 0
58
+
59
+ for i in range(a):
60
+ f_0_n += sympify(DMFsubs(red[i], A, mpm=True))._to_mpmath(mp.prec) * y_0[i]
61
+ f_0.append(f_0_n)
62
+
63
+ sol = []
64
+ for i in range(a):
65
+ sol.append(y_0[i] + h * f_0[i])
66
+
67
+ return sol
68
+
69
+
70
+ def _rk4(red, x0, x1, y0, a):
71
+ """
72
+ Runge-Kutta 4th order numerical method.
73
+ """
74
+
75
+ A = sympify(x0)._to_mpmath(mp.prec)
76
+ B = sympify(x1)._to_mpmath(mp.prec)
77
+ y_0 = [sympify(i)._to_mpmath(mp.prec) for i in y0]
78
+ h = B - A
79
+
80
+ f_0_n = 0
81
+ f_1_n = 0
82
+ f_2_n = 0
83
+ f_3_n = 0
84
+
85
+ f_0 = y_0[1:]
86
+ for i in range(a):
87
+ f_0_n += sympify(DMFsubs(red[i], A, mpm=True))._to_mpmath(mp.prec) * y_0[i]
88
+ f_0.append(f_0_n)
89
+
90
+ f_1 = [y_0[i] + f_0[i]*h/2 for i in range(1, a)]
91
+ for i in range(a):
92
+ f_1_n += sympify(DMFsubs(red[i], A + h/2, mpm=True))._to_mpmath(mp.prec) * (y_0[i] + f_0[i]*h/2)
93
+ f_1.append(f_1_n)
94
+
95
+ f_2 = [y_0[i] + f_1[i]*h/2 for i in range(1, a)]
96
+ for i in range(a):
97
+ f_2_n += sympify(DMFsubs(red[i], A + h/2, mpm=True))._to_mpmath(mp.prec) * (y_0[i] + f_1[i]*h/2)
98
+ f_2.append(f_2_n)
99
+
100
+ f_3 = [y_0[i] + f_2[i]*h for i in range(1, a)]
101
+ for i in range(a):
102
+ f_3_n += sympify(DMFsubs(red[i], A + h, mpm=True))._to_mpmath(mp.prec) * (y_0[i] + f_2[i]*h)
103
+ f_3.append(f_3_n)
104
+
105
+ sol = []
106
+ for i in range(a):
107
+ sol.append(y_0[i] + h * (f_0[i]+2*f_1[i]+2*f_2[i]+f_3[i])/6)
108
+
109
+ return sol
parrot/lib/python3.10/site-packages/sympy/holonomic/recurrence.py ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Recurrence Operators"""
2
+
3
+ from sympy.core.singleton import S
4
+ from sympy.core.symbol import (Symbol, symbols)
5
+ from sympy.printing import sstr
6
+ from sympy.core.sympify import sympify
7
+
8
+
9
+ def RecurrenceOperators(base, generator):
10
+ """
11
+ Returns an Algebra of Recurrence Operators and the operator for
12
+ shifting i.e. the `Sn` operator.
13
+ The first argument needs to be the base polynomial ring for the algebra
14
+ and the second argument must be a generator which can be either a
15
+ noncommutative Symbol or a string.
16
+
17
+ Examples
18
+ ========
19
+
20
+ >>> from sympy import ZZ
21
+ >>> from sympy import symbols
22
+ >>> from sympy.holonomic.recurrence import RecurrenceOperators
23
+ >>> n = symbols('n', integer=True)
24
+ >>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
25
+ """
26
+
27
+ ring = RecurrenceOperatorAlgebra(base, generator)
28
+ return (ring, ring.shift_operator)
29
+
30
+
31
+ class RecurrenceOperatorAlgebra:
32
+ """
33
+ A Recurrence Operator Algebra is a set of noncommutative polynomials
34
+ in intermediate `Sn` and coefficients in a base ring A. It follows the
35
+ commutation rule:
36
+ Sn * a(n) = a(n + 1) * Sn
37
+
38
+ This class represents a Recurrence Operator Algebra and serves as the parent ring
39
+ for Recurrence Operators.
40
+
41
+ Examples
42
+ ========
43
+
44
+ >>> from sympy import ZZ
45
+ >>> from sympy import symbols
46
+ >>> from sympy.holonomic.recurrence import RecurrenceOperators
47
+ >>> n = symbols('n', integer=True)
48
+ >>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n), 'Sn')
49
+ >>> R
50
+ Univariate Recurrence Operator Algebra in intermediate Sn over the base ring
51
+ ZZ[n]
52
+
53
+ See Also
54
+ ========
55
+
56
+ RecurrenceOperator
57
+ """
58
+
59
+ def __init__(self, base, generator):
60
+ # the base ring for the algebra
61
+ self.base = base
62
+ # the operator representing shift i.e. `Sn`
63
+ self.shift_operator = RecurrenceOperator(
64
+ [base.zero, base.one], self)
65
+
66
+ if generator is None:
67
+ self.gen_symbol = symbols('Sn', commutative=False)
68
+ else:
69
+ if isinstance(generator, str):
70
+ self.gen_symbol = symbols(generator, commutative=False)
71
+ elif isinstance(generator, Symbol):
72
+ self.gen_symbol = generator
73
+
74
+ def __str__(self):
75
+ string = 'Univariate Recurrence Operator Algebra in intermediate '\
76
+ + sstr(self.gen_symbol) + ' over the base ring ' + \
77
+ (self.base).__str__()
78
+
79
+ return string
80
+
81
+ __repr__ = __str__
82
+
83
+ def __eq__(self, other):
84
+ if self.base == other.base and self.gen_symbol == other.gen_symbol:
85
+ return True
86
+ else:
87
+ return False
88
+
89
+
90
+ def _add_lists(list1, list2):
91
+ if len(list1) <= len(list2):
92
+ sol = [a + b for a, b in zip(list1, list2)] + list2[len(list1):]
93
+ else:
94
+ sol = [a + b for a, b in zip(list1, list2)] + list1[len(list2):]
95
+ return sol
96
+
97
+
98
+ class RecurrenceOperator:
99
+ """
100
+ The Recurrence Operators are defined by a list of polynomials
101
+ in the base ring and the parent ring of the Operator.
102
+
103
+ Explanation
104
+ ===========
105
+
106
+ Takes a list of polynomials for each power of Sn and the
107
+ parent ring which must be an instance of RecurrenceOperatorAlgebra.
108
+
109
+ A Recurrence Operator can be created easily using
110
+ the operator `Sn`. See examples below.
111
+
112
+ Examples
113
+ ========
114
+
115
+ >>> from sympy.holonomic.recurrence import RecurrenceOperator, RecurrenceOperators
116
+ >>> from sympy import ZZ
117
+ >>> from sympy import symbols
118
+ >>> n = symbols('n', integer=True)
119
+ >>> R, Sn = RecurrenceOperators(ZZ.old_poly_ring(n),'Sn')
120
+
121
+ >>> RecurrenceOperator([0, 1, n**2], R)
122
+ (1)Sn + (n**2)Sn**2
123
+
124
+ >>> Sn*n
125
+ (n + 1)Sn
126
+
127
+ >>> n*Sn*n + 1 - Sn**2*n
128
+ (1) + (n**2 + n)Sn + (-n - 2)Sn**2
129
+
130
+ See Also
131
+ ========
132
+
133
+ DifferentialOperatorAlgebra
134
+ """
135
+
136
+ _op_priority = 20
137
+
138
+ def __init__(self, list_of_poly, parent):
139
+ # the parent ring for this operator
140
+ # must be an RecurrenceOperatorAlgebra object
141
+ self.parent = parent
142
+ # sequence of polynomials in n for each power of Sn
143
+ # represents the operator
144
+ # convert the expressions into ring elements using from_sympy
145
+ if isinstance(list_of_poly, list):
146
+ for i, j in enumerate(list_of_poly):
147
+ if isinstance(j, int):
148
+ list_of_poly[i] = self.parent.base.from_sympy(S(j))
149
+ elif not isinstance(j, self.parent.base.dtype):
150
+ list_of_poly[i] = self.parent.base.from_sympy(j)
151
+
152
+ self.listofpoly = list_of_poly
153
+ self.order = len(self.listofpoly) - 1
154
+
155
+ def __mul__(self, other):
156
+ """
157
+ Multiplies two Operators and returns another
158
+ RecurrenceOperator instance using the commutation rule
159
+ Sn * a(n) = a(n + 1) * Sn
160
+ """
161
+
162
+ listofself = self.listofpoly
163
+ base = self.parent.base
164
+
165
+ if not isinstance(other, RecurrenceOperator):
166
+ if not isinstance(other, self.parent.base.dtype):
167
+ listofother = [self.parent.base.from_sympy(sympify(other))]
168
+
169
+ else:
170
+ listofother = [other]
171
+ else:
172
+ listofother = other.listofpoly
173
+ # multiply a polynomial `b` with a list of polynomials
174
+
175
+ def _mul_dmp_diffop(b, listofother):
176
+ if isinstance(listofother, list):
177
+ sol = []
178
+ for i in listofother:
179
+ sol.append(i * b)
180
+ return sol
181
+ else:
182
+ return [b * listofother]
183
+
184
+ sol = _mul_dmp_diffop(listofself[0], listofother)
185
+
186
+ # compute Sn^i * b
187
+ def _mul_Sni_b(b):
188
+ sol = [base.zero]
189
+
190
+ if isinstance(b, list):
191
+ for i in b:
192
+ j = base.to_sympy(i).subs(base.gens[0], base.gens[0] + S.One)
193
+ sol.append(base.from_sympy(j))
194
+
195
+ else:
196
+ j = b.subs(base.gens[0], base.gens[0] + S.One)
197
+ sol.append(base.from_sympy(j))
198
+
199
+ return sol
200
+
201
+ for i in range(1, len(listofself)):
202
+ # find Sn^i * b in ith iteration
203
+ listofother = _mul_Sni_b(listofother)
204
+ # solution = solution + listofself[i] * (Sn^i * b)
205
+ sol = _add_lists(sol, _mul_dmp_diffop(listofself[i], listofother))
206
+
207
+ return RecurrenceOperator(sol, self.parent)
208
+
209
+ def __rmul__(self, other):
210
+ if not isinstance(other, RecurrenceOperator):
211
+
212
+ if isinstance(other, int):
213
+ other = S(other)
214
+
215
+ if not isinstance(other, self.parent.base.dtype):
216
+ other = (self.parent.base).from_sympy(other)
217
+
218
+ sol = []
219
+ for j in self.listofpoly:
220
+ sol.append(other * j)
221
+
222
+ return RecurrenceOperator(sol, self.parent)
223
+
224
+ def __add__(self, other):
225
+ if isinstance(other, RecurrenceOperator):
226
+
227
+ sol = _add_lists(self.listofpoly, other.listofpoly)
228
+ return RecurrenceOperator(sol, self.parent)
229
+
230
+ else:
231
+
232
+ if isinstance(other, int):
233
+ other = S(other)
234
+ list_self = self.listofpoly
235
+ if not isinstance(other, self.parent.base.dtype):
236
+ list_other = [((self.parent).base).from_sympy(other)]
237
+ else:
238
+ list_other = [other]
239
+ sol = []
240
+ sol.append(list_self[0] + list_other[0])
241
+ sol += list_self[1:]
242
+
243
+ return RecurrenceOperator(sol, self.parent)
244
+
245
+ __radd__ = __add__
246
+
247
+ def __sub__(self, other):
248
+ return self + (-1) * other
249
+
250
+ def __rsub__(self, other):
251
+ return (-1) * self + other
252
+
253
+ def __pow__(self, n):
254
+ if n == 1:
255
+ return self
256
+ result = RecurrenceOperator([self.parent.base.one], self.parent)
257
+ if n == 0:
258
+ return result
259
+ # if self is `Sn`
260
+ if self.listofpoly == self.parent.shift_operator.listofpoly:
261
+ sol = [self.parent.base.zero] * n + [self.parent.base.one]
262
+ return RecurrenceOperator(sol, self.parent)
263
+ x = self
264
+ while True:
265
+ if n % 2:
266
+ result *= x
267
+ n >>= 1
268
+ if not n:
269
+ break
270
+ x *= x
271
+ return result
272
+
273
+ def __str__(self):
274
+ listofpoly = self.listofpoly
275
+ print_str = ''
276
+
277
+ for i, j in enumerate(listofpoly):
278
+ if j == self.parent.base.zero:
279
+ continue
280
+
281
+ j = self.parent.base.to_sympy(j)
282
+
283
+ if i == 0:
284
+ print_str += '(' + sstr(j) + ')'
285
+ continue
286
+
287
+ if print_str:
288
+ print_str += ' + '
289
+
290
+ if i == 1:
291
+ print_str += '(' + sstr(j) + ')Sn'
292
+ continue
293
+
294
+ print_str += '(' + sstr(j) + ')' + 'Sn**' + sstr(i)
295
+
296
+ return print_str
297
+
298
+ __repr__ = __str__
299
+
300
+ def __eq__(self, other):
301
+ if isinstance(other, RecurrenceOperator):
302
+ if self.listofpoly == other.listofpoly and self.parent == other.parent:
303
+ return True
304
+ else:
305
+ return False
306
+ else:
307
+ if self.listofpoly[0] == other:
308
+ for i in self.listofpoly[1:]:
309
+ if i is not self.parent.base.zero:
310
+ return False
311
+ return True
312
+ else:
313
+ return False
314
+
315
+
316
+ class HolonomicSequence:
317
+ """
318
+ A Holonomic Sequence is a type of sequence satisfying a linear homogeneous
319
+ recurrence relation with Polynomial coefficients. Alternatively, A sequence
320
+ is Holonomic if and only if its generating function is a Holonomic Function.
321
+ """
322
+
323
+ def __init__(self, recurrence, u0=[]):
324
+ self.recurrence = recurrence
325
+ if not isinstance(u0, list):
326
+ self.u0 = [u0]
327
+ else:
328
+ self.u0 = u0
329
+
330
+ if len(self.u0) == 0:
331
+ self._have_init_cond = False
332
+ else:
333
+ self._have_init_cond = True
334
+ self.n = recurrence.parent.base.gens[0]
335
+
336
+ def __repr__(self):
337
+ str_sol = 'HolonomicSequence(%s, %s)' % ((self.recurrence).__repr__(), sstr(self.n))
338
+ if not self._have_init_cond:
339
+ return str_sol
340
+ else:
341
+ cond_str = ''
342
+ seq_str = 0
343
+ for i in self.u0:
344
+ cond_str += ', u(%s) = %s' % (sstr(seq_str), sstr(i))
345
+ seq_str += 1
346
+
347
+ sol = str_sol + cond_str
348
+ return sol
349
+
350
+ __str__ = __repr__
351
+
352
+ def __eq__(self, other):
353
+ if self.recurrence == other.recurrence:
354
+ if self.n == other.n:
355
+ if self._have_init_cond and other._have_init_cond:
356
+ if self.u0 == other.u0:
357
+ return True
358
+ else:
359
+ return False
360
+ else:
361
+ return True
362
+ else:
363
+ return False
364
+ else:
365
+ return False
parrot/lib/python3.10/site-packages/sympy/holonomic/tests/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/sympy/holonomic/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (173 Bytes). View file
 
parrot/lib/python3.10/site-packages/sympy/holonomic/tests/__pycache__/test_holonomic.cpython-310.pyc ADDED
Binary file (32 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/holonomic/tests/test_recurrence.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.holonomic.recurrence import RecurrenceOperators, RecurrenceOperator
2
+ from sympy.core.symbol import symbols
3
+ from sympy.polys.domains.rationalfield import QQ
4
+
5
+
6
+ def test_RecurrenceOperator():
7
+ n = symbols('n', integer=True)
8
+ R, Sn = RecurrenceOperators(QQ.old_poly_ring(n), 'Sn')
9
+ assert Sn*n == (n + 1)*Sn
10
+ assert Sn*n**2 == (n**2+1+2*n)*Sn
11
+ assert Sn**2*n**2 == (n**2 + 4*n + 4)*Sn**2
12
+ p = (Sn**3*n**2 + Sn*n)**2
13
+ q = (n**2 + 3*n + 2)*Sn**2 + (2*n**3 + 19*n**2 + 57*n + 52)*Sn**4 + (n**4 + 18*n**3 + \
14
+ 117*n**2 + 324*n + 324)*Sn**6
15
+ assert p == q
16
+
17
+
18
+ def test_RecurrenceOperatorEqPoly():
19
+ n = symbols('n', integer=True)
20
+ R, Sn = RecurrenceOperators(QQ.old_poly_ring(n), 'Sn')
21
+ rr = RecurrenceOperator([n**2, 0, 0], R)
22
+ rr2 = RecurrenceOperator([n**2, 1, n], R)
23
+ assert not rr == rr2
24
+
25
+ # polynomial comparison issue, see https://github.com/sympy/sympy/pull/15799
26
+ # should work once that is solved
27
+ # d = rr.listofpoly[0]
28
+ # assert rr == d
29
+
30
+ d2 = rr2.listofpoly[0]
31
+ assert not rr2 == d2
32
+
33
+
34
+ def test_RecurrenceOperatorPow():
35
+ n = symbols('n', integer=True)
36
+ R, _ = RecurrenceOperators(QQ.old_poly_ring(n), 'Sn')
37
+ rr = RecurrenceOperator([n**2, 0, 0], R)
38
+ a = RecurrenceOperator([R.base.one], R)
39
+ for m in range(10):
40
+ assert a == rr**m
41
+ a *= rr
parrot/lib/python3.10/site-packages/sympy/stats/__init__.py ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ SymPy statistics module
3
+
4
+ Introduces a random variable type into the SymPy language.
5
+
6
+ Random variables may be declared using prebuilt functions such as
7
+ Normal, Exponential, Coin, Die, etc... or built with functions like FiniteRV.
8
+
9
+ Queries on random expressions can be made using the functions
10
+
11
+ ========================= =============================
12
+ Expression Meaning
13
+ ------------------------- -----------------------------
14
+ ``P(condition)`` Probability
15
+ ``E(expression)`` Expected value
16
+ ``H(expression)`` Entropy
17
+ ``variance(expression)`` Variance
18
+ ``density(expression)`` Probability Density Function
19
+ ``sample(expression)`` Produce a realization
20
+ ``where(condition)`` Where the condition is true
21
+ ========================= =============================
22
+
23
+ Examples
24
+ ========
25
+
26
+ >>> from sympy.stats import P, E, variance, Die, Normal
27
+ >>> from sympy import simplify
28
+ >>> X, Y = Die('X', 6), Die('Y', 6) # Define two six sided dice
29
+ >>> Z = Normal('Z', 0, 1) # Declare a Normal random variable with mean 0, std 1
30
+ >>> P(X>3) # Probability X is greater than 3
31
+ 1/2
32
+ >>> E(X+Y) # Expectation of the sum of two dice
33
+ 7
34
+ >>> variance(X+Y) # Variance of the sum of two dice
35
+ 35/6
36
+ >>> simplify(P(Z>1)) # Probability of Z being greater than 1
37
+ 1/2 - erf(sqrt(2)/2)/2
38
+
39
+
40
+ One could also create custom distribution and define custom random variables
41
+ as follows:
42
+
43
+ 1. If you want to create a Continuous Random Variable:
44
+
45
+ >>> from sympy.stats import ContinuousRV, P, E
46
+ >>> from sympy import exp, Symbol, Interval, oo
47
+ >>> x = Symbol('x')
48
+ >>> pdf = exp(-x) # pdf of the Continuous Distribution
49
+ >>> Z = ContinuousRV(x, pdf, set=Interval(0, oo))
50
+ >>> E(Z)
51
+ 1
52
+ >>> P(Z > 5)
53
+ exp(-5)
54
+
55
+ 1.1 To create an instance of Continuous Distribution:
56
+
57
+ >>> from sympy.stats import ContinuousDistributionHandmade
58
+ >>> from sympy import Lambda
59
+ >>> dist = ContinuousDistributionHandmade(Lambda(x, pdf), set=Interval(0, oo))
60
+ >>> dist.pdf(x)
61
+ exp(-x)
62
+
63
+ 2. If you want to create a Discrete Random Variable:
64
+
65
+ >>> from sympy.stats import DiscreteRV, P, E
66
+ >>> from sympy import Symbol, S
67
+ >>> p = S(1)/2
68
+ >>> x = Symbol('x', integer=True, positive=True)
69
+ >>> pdf = p*(1 - p)**(x - 1)
70
+ >>> D = DiscreteRV(x, pdf, set=S.Naturals)
71
+ >>> E(D)
72
+ 2
73
+ >>> P(D > 3)
74
+ 1/8
75
+
76
+ 2.1 To create an instance of Discrete Distribution:
77
+
78
+ >>> from sympy.stats import DiscreteDistributionHandmade
79
+ >>> from sympy import Lambda
80
+ >>> dist = DiscreteDistributionHandmade(Lambda(x, pdf), set=S.Naturals)
81
+ >>> dist.pdf(x)
82
+ 2**(1 - x)/2
83
+
84
+ 3. If you want to create a Finite Random Variable:
85
+
86
+ >>> from sympy.stats import FiniteRV, P, E
87
+ >>> from sympy import Rational, Eq
88
+ >>> pmf = {1: Rational(1, 3), 2: Rational(1, 6), 3: Rational(1, 4), 4: Rational(1, 4)}
89
+ >>> X = FiniteRV('X', pmf)
90
+ >>> E(X)
91
+ 29/12
92
+ >>> P(X > 3)
93
+ 1/4
94
+
95
+ 3.1 To create an instance of Finite Distribution:
96
+
97
+ >>> from sympy.stats import FiniteDistributionHandmade
98
+ >>> dist = FiniteDistributionHandmade(pmf)
99
+ >>> dist.pmf(x)
100
+ Lambda(x, Piecewise((1/3, Eq(x, 1)), (1/6, Eq(x, 2)), (1/4, Eq(x, 3) | Eq(x, 4)), (0, True)))
101
+ """
102
+
103
+ __all__ = [
104
+ 'P', 'E', 'H', 'density', 'where', 'given', 'sample', 'cdf','median',
105
+ 'characteristic_function', 'pspace', 'sample_iter', 'variance', 'std',
106
+ 'skewness', 'kurtosis', 'covariance', 'dependent', 'entropy', 'independent',
107
+ 'random_symbols', 'correlation', 'factorial_moment', 'moment', 'cmoment',
108
+ 'sampling_density', 'moment_generating_function', 'smoment', 'quantile',
109
+ 'coskewness', 'sample_stochastic_process',
110
+
111
+ 'FiniteRV', 'DiscreteUniform', 'Die', 'Bernoulli', 'Coin', 'Binomial',
112
+ 'BetaBinomial', 'Hypergeometric', 'Rademacher', 'IdealSoliton', 'RobustSoliton',
113
+ 'FiniteDistributionHandmade',
114
+
115
+ 'ContinuousRV', 'Arcsin', 'Benini', 'Beta', 'BetaNoncentral', 'BetaPrime',
116
+ 'BoundedPareto', 'Cauchy', 'Chi', 'ChiNoncentral', 'ChiSquared', 'Dagum', 'Davis', 'Erlang',
117
+ 'ExGaussian', 'Exponential', 'ExponentialPower', 'FDistribution',
118
+ 'FisherZ', 'Frechet', 'Gamma', 'GammaInverse', 'Gompertz', 'Gumbel',
119
+ 'Kumaraswamy', 'Laplace', 'Levy', 'Logistic','LogCauchy', 'LogLogistic', 'LogitNormal', 'LogNormal', 'Lomax',
120
+ 'Moyal', 'Maxwell', 'Nakagami', 'Normal', 'GaussianInverse', 'Pareto', 'PowerFunction',
121
+ 'QuadraticU', 'RaisedCosine', 'Rayleigh','Reciprocal', 'StudentT', 'ShiftedGompertz',
122
+ 'Trapezoidal', 'Triangular', 'Uniform', 'UniformSum', 'VonMises', 'Wald',
123
+ 'Weibull', 'WignerSemicircle', 'ContinuousDistributionHandmade',
124
+
125
+ 'FlorySchulz', 'Geometric','Hermite', 'Logarithmic', 'NegativeBinomial', 'Poisson', 'Skellam',
126
+ 'YuleSimon', 'Zeta', 'DiscreteRV', 'DiscreteDistributionHandmade',
127
+
128
+ 'JointRV', 'Dirichlet', 'GeneralizedMultivariateLogGamma',
129
+ 'GeneralizedMultivariateLogGammaOmega', 'Multinomial', 'MultivariateBeta',
130
+ 'MultivariateEwens', 'MultivariateT', 'NegativeMultinomial',
131
+ 'NormalGamma', 'MultivariateNormal', 'MultivariateLaplace', 'marginal_distribution',
132
+
133
+ 'StochasticProcess', 'DiscreteTimeStochasticProcess',
134
+ 'DiscreteMarkovChain', 'TransitionMatrixOf', 'StochasticStateSpaceOf',
135
+ 'GeneratorMatrixOf', 'ContinuousMarkovChain', 'BernoulliProcess',
136
+ 'PoissonProcess', 'WienerProcess', 'GammaProcess',
137
+
138
+ 'CircularEnsemble', 'CircularUnitaryEnsemble',
139
+ 'CircularOrthogonalEnsemble', 'CircularSymplecticEnsemble',
140
+ 'GaussianEnsemble', 'GaussianUnitaryEnsemble',
141
+ 'GaussianOrthogonalEnsemble', 'GaussianSymplecticEnsemble',
142
+ 'joint_eigen_distribution', 'JointEigenDistribution',
143
+ 'level_spacing_distribution',
144
+
145
+ 'MatrixGamma', 'Wishart', 'MatrixNormal', 'MatrixStudentT',
146
+
147
+ 'Probability', 'Expectation', 'Variance', 'Covariance', 'Moment',
148
+ 'CentralMoment',
149
+
150
+ 'ExpectationMatrix', 'VarianceMatrix', 'CrossCovarianceMatrix'
151
+
152
+ ]
153
+ from .rv_interface import (P, E, H, density, where, given, sample, cdf, median,
154
+ characteristic_function, pspace, sample_iter, variance, std, skewness,
155
+ kurtosis, covariance, dependent, entropy, independent, random_symbols,
156
+ correlation, factorial_moment, moment, cmoment, sampling_density,
157
+ moment_generating_function, smoment, quantile, coskewness,
158
+ sample_stochastic_process)
159
+
160
+ from .frv_types import (FiniteRV, DiscreteUniform, Die, Bernoulli, Coin,
161
+ Binomial, BetaBinomial, Hypergeometric, Rademacher,
162
+ FiniteDistributionHandmade, IdealSoliton, RobustSoliton)
163
+
164
+ from .crv_types import (ContinuousRV, Arcsin, Benini, Beta, BetaNoncentral,
165
+ BetaPrime, BoundedPareto, Cauchy, Chi, ChiNoncentral, ChiSquared,
166
+ Dagum, Davis, Erlang, ExGaussian, Exponential, ExponentialPower,
167
+ FDistribution, FisherZ, Frechet, Gamma, GammaInverse, GaussianInverse,
168
+ Gompertz, Gumbel, Kumaraswamy, Laplace, Levy, Logistic, LogCauchy,
169
+ LogLogistic, LogitNormal, LogNormal, Lomax, Maxwell, Moyal, Nakagami,
170
+ Normal, Pareto, QuadraticU, RaisedCosine, Rayleigh, Reciprocal,
171
+ StudentT, PowerFunction, ShiftedGompertz, Trapezoidal, Triangular,
172
+ Uniform, UniformSum, VonMises, Wald, Weibull, WignerSemicircle,
173
+ ContinuousDistributionHandmade)
174
+
175
+ from .drv_types import (FlorySchulz, Geometric, Hermite, Logarithmic, NegativeBinomial, Poisson,
176
+ Skellam, YuleSimon, Zeta, DiscreteRV, DiscreteDistributionHandmade)
177
+
178
+ from .joint_rv_types import (JointRV, Dirichlet,
179
+ GeneralizedMultivariateLogGamma, GeneralizedMultivariateLogGammaOmega,
180
+ Multinomial, MultivariateBeta, MultivariateEwens, MultivariateT,
181
+ NegativeMultinomial, NormalGamma, MultivariateNormal, MultivariateLaplace,
182
+ marginal_distribution)
183
+
184
+ from .stochastic_process_types import (StochasticProcess,
185
+ DiscreteTimeStochasticProcess, DiscreteMarkovChain,
186
+ TransitionMatrixOf, StochasticStateSpaceOf, GeneratorMatrixOf,
187
+ ContinuousMarkovChain, BernoulliProcess, PoissonProcess, WienerProcess,
188
+ GammaProcess)
189
+
190
+ from .random_matrix_models import (CircularEnsemble, CircularUnitaryEnsemble,
191
+ CircularOrthogonalEnsemble, CircularSymplecticEnsemble,
192
+ GaussianEnsemble, GaussianUnitaryEnsemble, GaussianOrthogonalEnsemble,
193
+ GaussianSymplecticEnsemble, joint_eigen_distribution,
194
+ JointEigenDistribution, level_spacing_distribution)
195
+
196
+ from .matrix_distributions import MatrixGamma, Wishart, MatrixNormal, MatrixStudentT
197
+
198
+ from .symbolic_probability import (Probability, Expectation, Variance,
199
+ Covariance, Moment, CentralMoment)
200
+
201
+ from .symbolic_multivariate_probability import (ExpectationMatrix, VarianceMatrix,
202
+ CrossCovarianceMatrix)
parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/crv.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/frv_types.cpython-310.pyc ADDED
Binary file (27.4 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/joint_rv.cpython-310.pyc ADDED
Binary file (18.7 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/random_matrix_models.cpython-310.pyc ADDED
Binary file (16.9 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/rv_interface.cpython-310.pyc ADDED
Binary file (14 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/stochastic_process.cpython-310.pyc ADDED
Binary file (2.71 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/stats/__pycache__/symbolic_probability.cpython-310.pyc ADDED
Binary file (21.3 kB). View file
 
parrot/lib/python3.10/site-packages/sympy/stats/crv.py ADDED
@@ -0,0 +1,570 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Continuous Random Variables Module
3
+
4
+ See Also
5
+ ========
6
+ sympy.stats.crv_types
7
+ sympy.stats.rv
8
+ sympy.stats.frv
9
+ """
10
+
11
+
12
+ from sympy.core.basic import Basic
13
+ from sympy.core.cache import cacheit
14
+ from sympy.core.function import Lambda, PoleError
15
+ from sympy.core.numbers import (I, nan, oo)
16
+ from sympy.core.relational import (Eq, Ne)
17
+ from sympy.core.singleton import S
18
+ from sympy.core.symbol import (Dummy, symbols)
19
+ from sympy.core.sympify import _sympify, sympify
20
+ from sympy.functions.combinatorial.factorials import factorial
21
+ from sympy.functions.elementary.exponential import exp
22
+ from sympy.functions.elementary.piecewise import Piecewise
23
+ from sympy.functions.special.delta_functions import DiracDelta
24
+ from sympy.integrals.integrals import (Integral, integrate)
25
+ from sympy.logic.boolalg import (And, Or)
26
+ from sympy.polys.polyerrors import PolynomialError
27
+ from sympy.polys.polytools import poly
28
+ from sympy.series.series import series
29
+ from sympy.sets.sets import (FiniteSet, Intersection, Interval, Union)
30
+ from sympy.solvers.solveset import solveset
31
+ from sympy.solvers.inequalities import reduce_rational_inequalities
32
+ from sympy.stats.rv import (RandomDomain, SingleDomain, ConditionalDomain, is_random,
33
+ ProductDomain, PSpace, SinglePSpace, random_symbols, NamedArgsMixin, Distribution)
34
+
35
+
36
+ class ContinuousDomain(RandomDomain):
37
+ """
38
+ A domain with continuous support
39
+
40
+ Represented using symbols and Intervals.
41
+ """
42
+ is_Continuous = True
43
+
44
+ def as_boolean(self):
45
+ raise NotImplementedError("Not Implemented for generic Domains")
46
+
47
+
48
+ class SingleContinuousDomain(ContinuousDomain, SingleDomain):
49
+ """
50
+ A univariate domain with continuous support
51
+
52
+ Represented using a single symbol and interval.
53
+ """
54
+ def compute_expectation(self, expr, variables=None, **kwargs):
55
+ if variables is None:
56
+ variables = self.symbols
57
+ if not variables:
58
+ return expr
59
+ if frozenset(variables) != frozenset(self.symbols):
60
+ raise ValueError("Values should be equal")
61
+ # assumes only intervals
62
+ return Integral(expr, (self.symbol, self.set), **kwargs)
63
+
64
+ def as_boolean(self):
65
+ return self.set.as_relational(self.symbol)
66
+
67
+
68
+ class ProductContinuousDomain(ProductDomain, ContinuousDomain):
69
+ """
70
+ A collection of independent domains with continuous support
71
+ """
72
+
73
+ def compute_expectation(self, expr, variables=None, **kwargs):
74
+ if variables is None:
75
+ variables = self.symbols
76
+ for domain in self.domains:
77
+ domain_vars = frozenset(variables) & frozenset(domain.symbols)
78
+ if domain_vars:
79
+ expr = domain.compute_expectation(expr, domain_vars, **kwargs)
80
+ return expr
81
+
82
+ def as_boolean(self):
83
+ return And(*[domain.as_boolean() for domain in self.domains])
84
+
85
+
86
+ class ConditionalContinuousDomain(ContinuousDomain, ConditionalDomain):
87
+ """
88
+ A domain with continuous support that has been further restricted by a
89
+ condition such as $x > 3$.
90
+ """
91
+
92
+ def compute_expectation(self, expr, variables=None, **kwargs):
93
+ if variables is None:
94
+ variables = self.symbols
95
+ if not variables:
96
+ return expr
97
+ # Extract the full integral
98
+ fullintgrl = self.fulldomain.compute_expectation(expr, variables)
99
+ # separate into integrand and limits
100
+ integrand, limits = fullintgrl.function, list(fullintgrl.limits)
101
+
102
+ conditions = [self.condition]
103
+ while conditions:
104
+ cond = conditions.pop()
105
+ if cond.is_Boolean:
106
+ if isinstance(cond, And):
107
+ conditions.extend(cond.args)
108
+ elif isinstance(cond, Or):
109
+ raise NotImplementedError("Or not implemented here")
110
+ elif cond.is_Relational:
111
+ if cond.is_Equality:
112
+ # Add the appropriate Delta to the integrand
113
+ integrand *= DiracDelta(cond.lhs - cond.rhs)
114
+ else:
115
+ symbols = cond.free_symbols & set(self.symbols)
116
+ if len(symbols) != 1: # Can't handle x > y
117
+ raise NotImplementedError(
118
+ "Multivariate Inequalities not yet implemented")
119
+ # Can handle x > 0
120
+ symbol = symbols.pop()
121
+ # Find the limit with x, such as (x, -oo, oo)
122
+ for i, limit in enumerate(limits):
123
+ if limit[0] == symbol:
124
+ # Make condition into an Interval like [0, oo]
125
+ cintvl = reduce_rational_inequalities_wrap(
126
+ cond, symbol)
127
+ # Make limit into an Interval like [-oo, oo]
128
+ lintvl = Interval(limit[1], limit[2])
129
+ # Intersect them to get [0, oo]
130
+ intvl = cintvl.intersect(lintvl)
131
+ # Put back into limits list
132
+ limits[i] = (symbol, intvl.left, intvl.right)
133
+ else:
134
+ raise TypeError(
135
+ "Condition %s is not a relational or Boolean" % cond)
136
+
137
+ return Integral(integrand, *limits, **kwargs)
138
+
139
+ def as_boolean(self):
140
+ return And(self.fulldomain.as_boolean(), self.condition)
141
+
142
+ @property
143
+ def set(self):
144
+ if len(self.symbols) == 1:
145
+ return (self.fulldomain.set & reduce_rational_inequalities_wrap(
146
+ self.condition, tuple(self.symbols)[0]))
147
+ else:
148
+ raise NotImplementedError(
149
+ "Set of Conditional Domain not Implemented")
150
+
151
+
152
+ class ContinuousDistribution(Distribution):
153
+ def __call__(self, *args):
154
+ return self.pdf(*args)
155
+
156
+
157
+ class SingleContinuousDistribution(ContinuousDistribution, NamedArgsMixin):
158
+ """ Continuous distribution of a single variable.
159
+
160
+ Explanation
161
+ ===========
162
+
163
+ Serves as superclass for Normal/Exponential/UniformDistribution etc....
164
+
165
+ Represented by parameters for each of the specific classes. E.g
166
+ NormalDistribution is represented by a mean and standard deviation.
167
+
168
+ Provides methods for pdf, cdf, and sampling.
169
+
170
+ See Also
171
+ ========
172
+
173
+ sympy.stats.crv_types.*
174
+ """
175
+
176
+ set = Interval(-oo, oo)
177
+
178
+ def __new__(cls, *args):
179
+ args = list(map(sympify, args))
180
+ return Basic.__new__(cls, *args)
181
+
182
+ @staticmethod
183
+ def check(*args):
184
+ pass
185
+
186
+ @cacheit
187
+ def compute_cdf(self, **kwargs):
188
+ """ Compute the CDF from the PDF.
189
+
190
+ Returns a Lambda.
191
+ """
192
+ x, z = symbols('x, z', real=True, cls=Dummy)
193
+ left_bound = self.set.start
194
+
195
+ # CDF is integral of PDF from left bound to z
196
+ pdf = self.pdf(x)
197
+ cdf = integrate(pdf.doit(), (x, left_bound, z), **kwargs)
198
+ # CDF Ensure that CDF left of left_bound is zero
199
+ cdf = Piecewise((cdf, z >= left_bound), (0, True))
200
+ return Lambda(z, cdf)
201
+
202
+ def _cdf(self, x):
203
+ return None
204
+
205
+ def cdf(self, x, **kwargs):
206
+ """ Cumulative density function """
207
+ if len(kwargs) == 0:
208
+ cdf = self._cdf(x)
209
+ if cdf is not None:
210
+ return cdf
211
+ return self.compute_cdf(**kwargs)(x)
212
+
213
+ @cacheit
214
+ def compute_characteristic_function(self, **kwargs):
215
+ """ Compute the characteristic function from the PDF.
216
+
217
+ Returns a Lambda.
218
+ """
219
+ x, t = symbols('x, t', real=True, cls=Dummy)
220
+ pdf = self.pdf(x)
221
+ cf = integrate(exp(I*t*x)*pdf, (x, self.set))
222
+ return Lambda(t, cf)
223
+
224
+ def _characteristic_function(self, t):
225
+ return None
226
+
227
+ def characteristic_function(self, t, **kwargs):
228
+ """ Characteristic function """
229
+ if len(kwargs) == 0:
230
+ cf = self._characteristic_function(t)
231
+ if cf is not None:
232
+ return cf
233
+ return self.compute_characteristic_function(**kwargs)(t)
234
+
235
+ @cacheit
236
+ def compute_moment_generating_function(self, **kwargs):
237
+ """ Compute the moment generating function from the PDF.
238
+
239
+ Returns a Lambda.
240
+ """
241
+ x, t = symbols('x, t', real=True, cls=Dummy)
242
+ pdf = self.pdf(x)
243
+ mgf = integrate(exp(t * x) * pdf, (x, self.set))
244
+ return Lambda(t, mgf)
245
+
246
+ def _moment_generating_function(self, t):
247
+ return None
248
+
249
+ def moment_generating_function(self, t, **kwargs):
250
+ """ Moment generating function """
251
+ if not kwargs:
252
+ mgf = self._moment_generating_function(t)
253
+ if mgf is not None:
254
+ return mgf
255
+ return self.compute_moment_generating_function(**kwargs)(t)
256
+
257
+ def expectation(self, expr, var, evaluate=True, **kwargs):
258
+ """ Expectation of expression over distribution """
259
+ if evaluate:
260
+ try:
261
+ p = poly(expr, var)
262
+ if p.is_zero:
263
+ return S.Zero
264
+ t = Dummy('t', real=True)
265
+ mgf = self._moment_generating_function(t)
266
+ if mgf is None:
267
+ return integrate(expr * self.pdf(var), (var, self.set), **kwargs)
268
+ deg = p.degree()
269
+ taylor = poly(series(mgf, t, 0, deg + 1).removeO(), t)
270
+ result = 0
271
+ for k in range(deg+1):
272
+ result += p.coeff_monomial(var ** k) * taylor.coeff_monomial(t ** k) * factorial(k)
273
+ return result
274
+ except PolynomialError:
275
+ return integrate(expr * self.pdf(var), (var, self.set), **kwargs)
276
+ else:
277
+ return Integral(expr * self.pdf(var), (var, self.set), **kwargs)
278
+
279
+ @cacheit
280
+ def compute_quantile(self, **kwargs):
281
+ """ Compute the Quantile from the PDF.
282
+
283
+ Returns a Lambda.
284
+ """
285
+ x, p = symbols('x, p', real=True, cls=Dummy)
286
+ left_bound = self.set.start
287
+
288
+ pdf = self.pdf(x)
289
+ cdf = integrate(pdf, (x, left_bound, x), **kwargs)
290
+ quantile = solveset(cdf - p, x, self.set)
291
+ return Lambda(p, Piecewise((quantile, (p >= 0) & (p <= 1) ), (nan, True)))
292
+
293
+ def _quantile(self, x):
294
+ return None
295
+
296
+ def quantile(self, x, **kwargs):
297
+ """ Cumulative density function """
298
+ if len(kwargs) == 0:
299
+ quantile = self._quantile(x)
300
+ if quantile is not None:
301
+ return quantile
302
+ return self.compute_quantile(**kwargs)(x)
303
+
304
+
305
+ class ContinuousPSpace(PSpace):
306
+ """ Continuous Probability Space
307
+
308
+ Represents the likelihood of an event space defined over a continuum.
309
+
310
+ Represented with a ContinuousDomain and a PDF (Lambda-Like)
311
+ """
312
+
313
+ is_Continuous = True
314
+ is_real = True
315
+
316
+ @property
317
+ def pdf(self):
318
+ return self.density(*self.domain.symbols)
319
+
320
+ def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
321
+ if rvs is None:
322
+ rvs = self.values
323
+ else:
324
+ rvs = frozenset(rvs)
325
+
326
+ expr = expr.xreplace({rv: rv.symbol for rv in rvs})
327
+
328
+ domain_symbols = frozenset(rv.symbol for rv in rvs)
329
+
330
+ return self.domain.compute_expectation(self.pdf * expr,
331
+ domain_symbols, **kwargs)
332
+
333
+ def compute_density(self, expr, **kwargs):
334
+ # Common case Density(X) where X in self.values
335
+ if expr in self.values:
336
+ # Marginalize all other random symbols out of the density
337
+ randomsymbols = tuple(set(self.values) - frozenset([expr]))
338
+ symbols = tuple(rs.symbol for rs in randomsymbols)
339
+ pdf = self.domain.compute_expectation(self.pdf, symbols, **kwargs)
340
+ return Lambda(expr.symbol, pdf)
341
+
342
+ z = Dummy('z', real=True)
343
+ return Lambda(z, self.compute_expectation(DiracDelta(expr - z), **kwargs))
344
+
345
+ @cacheit
346
+ def compute_cdf(self, expr, **kwargs):
347
+ if not self.domain.set.is_Interval:
348
+ raise ValueError(
349
+ "CDF not well defined on multivariate expressions")
350
+
351
+ d = self.compute_density(expr, **kwargs)
352
+ x, z = symbols('x, z', real=True, cls=Dummy)
353
+ left_bound = self.domain.set.start
354
+
355
+ # CDF is integral of PDF from left bound to z
356
+ cdf = integrate(d(x), (x, left_bound, z), **kwargs)
357
+ # CDF Ensure that CDF left of left_bound is zero
358
+ cdf = Piecewise((cdf, z >= left_bound), (0, True))
359
+ return Lambda(z, cdf)
360
+
361
+ @cacheit
362
+ def compute_characteristic_function(self, expr, **kwargs):
363
+ if not self.domain.set.is_Interval:
364
+ raise NotImplementedError("Characteristic function of multivariate expressions not implemented")
365
+
366
+ d = self.compute_density(expr, **kwargs)
367
+ x, t = symbols('x, t', real=True, cls=Dummy)
368
+ cf = integrate(exp(I*t*x)*d(x), (x, -oo, oo), **kwargs)
369
+ return Lambda(t, cf)
370
+
371
+ @cacheit
372
+ def compute_moment_generating_function(self, expr, **kwargs):
373
+ if not self.domain.set.is_Interval:
374
+ raise NotImplementedError("Moment generating function of multivariate expressions not implemented")
375
+
376
+ d = self.compute_density(expr, **kwargs)
377
+ x, t = symbols('x, t', real=True, cls=Dummy)
378
+ mgf = integrate(exp(t * x) * d(x), (x, -oo, oo), **kwargs)
379
+ return Lambda(t, mgf)
380
+
381
+ @cacheit
382
+ def compute_quantile(self, expr, **kwargs):
383
+ if not self.domain.set.is_Interval:
384
+ raise ValueError(
385
+ "Quantile not well defined on multivariate expressions")
386
+
387
+ d = self.compute_cdf(expr, **kwargs)
388
+ x = Dummy('x', real=True)
389
+ p = Dummy('p', positive=True)
390
+
391
+ quantile = solveset(d(x) - p, x, self.set)
392
+
393
+ return Lambda(p, quantile)
394
+
395
+ def probability(self, condition, **kwargs):
396
+ z = Dummy('z', real=True)
397
+ cond_inv = False
398
+ if isinstance(condition, Ne):
399
+ condition = Eq(condition.args[0], condition.args[1])
400
+ cond_inv = True
401
+ # Univariate case can be handled by where
402
+ try:
403
+ domain = self.where(condition)
404
+ rv = [rv for rv in self.values if rv.symbol == domain.symbol][0]
405
+ # Integrate out all other random variables
406
+ pdf = self.compute_density(rv, **kwargs)
407
+ # return S.Zero if `domain` is empty set
408
+ if domain.set is S.EmptySet or isinstance(domain.set, FiniteSet):
409
+ return S.Zero if not cond_inv else S.One
410
+ if isinstance(domain.set, Union):
411
+ return sum(
412
+ Integral(pdf(z), (z, subset), **kwargs) for subset in
413
+ domain.set.args if isinstance(subset, Interval))
414
+ # Integrate out the last variable over the special domain
415
+ return Integral(pdf(z), (z, domain.set), **kwargs)
416
+
417
+ # Other cases can be turned into univariate case
418
+ # by computing a density handled by density computation
419
+ except NotImplementedError:
420
+ from sympy.stats.rv import density
421
+ expr = condition.lhs - condition.rhs
422
+ if not is_random(expr):
423
+ dens = self.density
424
+ comp = condition.rhs
425
+ else:
426
+ dens = density(expr, **kwargs)
427
+ comp = 0
428
+ if not isinstance(dens, ContinuousDistribution):
429
+ from sympy.stats.crv_types import ContinuousDistributionHandmade
430
+ dens = ContinuousDistributionHandmade(dens, set=self.domain.set)
431
+ # Turn problem into univariate case
432
+ space = SingleContinuousPSpace(z, dens)
433
+ result = space.probability(condition.__class__(space.value, comp))
434
+ return result if not cond_inv else S.One - result
435
+
436
+ def where(self, condition):
437
+ rvs = frozenset(random_symbols(condition))
438
+ if not (len(rvs) == 1 and rvs.issubset(self.values)):
439
+ raise NotImplementedError(
440
+ "Multiple continuous random variables not supported")
441
+ rv = tuple(rvs)[0]
442
+ interval = reduce_rational_inequalities_wrap(condition, rv)
443
+ interval = interval.intersect(self.domain.set)
444
+ return SingleContinuousDomain(rv.symbol, interval)
445
+
446
+ def conditional_space(self, condition, normalize=True, **kwargs):
447
+ condition = condition.xreplace({rv: rv.symbol for rv in self.values})
448
+ domain = ConditionalContinuousDomain(self.domain, condition)
449
+ if normalize:
450
+ # create a clone of the variable to
451
+ # make sure that variables in nested integrals are different
452
+ # from the variables outside the integral
453
+ # this makes sure that they are evaluated separately
454
+ # and in the correct order
455
+ replacement = {rv: Dummy(str(rv)) for rv in self.symbols}
456
+ norm = domain.compute_expectation(self.pdf, **kwargs)
457
+ pdf = self.pdf / norm.xreplace(replacement)
458
+ # XXX: Converting set to tuple. The order matters to Lambda though
459
+ # so we shouldn't be starting with a set here...
460
+ density = Lambda(tuple(domain.symbols), pdf)
461
+
462
+ return ContinuousPSpace(domain, density)
463
+
464
+
465
+ class SingleContinuousPSpace(ContinuousPSpace, SinglePSpace):
466
+ """
467
+ A continuous probability space over a single univariate variable.
468
+
469
+ These consist of a Symbol and a SingleContinuousDistribution
470
+
471
+ This class is normally accessed through the various random variable
472
+ functions, Normal, Exponential, Uniform, etc....
473
+ """
474
+
475
+ @property
476
+ def set(self):
477
+ return self.distribution.set
478
+
479
+ @property
480
+ def domain(self):
481
+ return SingleContinuousDomain(sympify(self.symbol), self.set)
482
+
483
+ def sample(self, size=(), library='scipy', seed=None):
484
+ """
485
+ Internal sample method.
486
+
487
+ Returns dictionary mapping RandomSymbol to realization value.
488
+ """
489
+ return {self.value: self.distribution.sample(size, library=library, seed=seed)}
490
+
491
+ def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
492
+ rvs = rvs or (self.value,)
493
+ if self.value not in rvs:
494
+ return expr
495
+
496
+ expr = _sympify(expr)
497
+ expr = expr.xreplace({rv: rv.symbol for rv in rvs})
498
+
499
+ x = self.value.symbol
500
+ try:
501
+ return self.distribution.expectation(expr, x, evaluate=evaluate, **kwargs)
502
+ except PoleError:
503
+ return Integral(expr * self.pdf, (x, self.set), **kwargs)
504
+
505
+ def compute_cdf(self, expr, **kwargs):
506
+ if expr == self.value:
507
+ z = Dummy("z", real=True)
508
+ return Lambda(z, self.distribution.cdf(z, **kwargs))
509
+ else:
510
+ return ContinuousPSpace.compute_cdf(self, expr, **kwargs)
511
+
512
+ def compute_characteristic_function(self, expr, **kwargs):
513
+ if expr == self.value:
514
+ t = Dummy("t", real=True)
515
+ return Lambda(t, self.distribution.characteristic_function(t, **kwargs))
516
+ else:
517
+ return ContinuousPSpace.compute_characteristic_function(self, expr, **kwargs)
518
+
519
+ def compute_moment_generating_function(self, expr, **kwargs):
520
+ if expr == self.value:
521
+ t = Dummy("t", real=True)
522
+ return Lambda(t, self.distribution.moment_generating_function(t, **kwargs))
523
+ else:
524
+ return ContinuousPSpace.compute_moment_generating_function(self, expr, **kwargs)
525
+
526
+ def compute_density(self, expr, **kwargs):
527
+ # https://en.wikipedia.org/wiki/Random_variable#Functions_of_random_variables
528
+ if expr == self.value:
529
+ return self.density
530
+ y = Dummy('y', real=True)
531
+
532
+ gs = solveset(expr - y, self.value, S.Reals)
533
+
534
+ if isinstance(gs, Intersection):
535
+ if len(gs.args) == 2 and gs.args[0] is S.Reals:
536
+ gs = gs.args[1]
537
+ if not gs.is_FiniteSet:
538
+ raise ValueError("Can not solve %s for %s" % (expr, self.value))
539
+ fx = self.compute_density(self.value)
540
+ fy = sum(fx(g) * abs(g.diff(y)) for g in gs)
541
+ return Lambda(y, fy)
542
+
543
+ def compute_quantile(self, expr, **kwargs):
544
+
545
+ if expr == self.value:
546
+ p = Dummy("p", real=True)
547
+ return Lambda(p, self.distribution.quantile(p, **kwargs))
548
+ else:
549
+ return ContinuousPSpace.compute_quantile(self, expr, **kwargs)
550
+
551
+ def _reduce_inequalities(conditions, var, **kwargs):
552
+ try:
553
+ return reduce_rational_inequalities(conditions, var, **kwargs)
554
+ except PolynomialError:
555
+ raise ValueError("Reduction of condition failed %s\n" % conditions[0])
556
+
557
+
558
+ def reduce_rational_inequalities_wrap(condition, var):
559
+ if condition.is_Relational:
560
+ return _reduce_inequalities([[condition]], var, relational=False)
561
+ if isinstance(condition, Or):
562
+ return Union(*[_reduce_inequalities([[arg]], var, relational=False)
563
+ for arg in condition.args])
564
+ if isinstance(condition, And):
565
+ intervals = [_reduce_inequalities([[arg]], var, relational=False)
566
+ for arg in condition.args]
567
+ I = intervals[0]
568
+ for i in intervals:
569
+ I = I.intersect(i)
570
+ return I
parrot/lib/python3.10/site-packages/sympy/stats/drv_types.py ADDED
@@ -0,0 +1,835 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+
3
+ Contains
4
+ ========
5
+ FlorySchulz
6
+ Geometric
7
+ Hermite
8
+ Logarithmic
9
+ NegativeBinomial
10
+ Poisson
11
+ Skellam
12
+ YuleSimon
13
+ Zeta
14
+ """
15
+
16
+
17
+
18
+ from sympy.concrete.summations import Sum
19
+ from sympy.core.basic import Basic
20
+ from sympy.core.function import Lambda
21
+ from sympy.core.numbers import I
22
+ from sympy.core.relational import Eq
23
+ from sympy.core.singleton import S
24
+ from sympy.core.symbol import Dummy
25
+ from sympy.core.sympify import sympify
26
+ from sympy.functions.combinatorial.factorials import (binomial, factorial)
27
+ from sympy.functions.elementary.exponential import (exp, log)
28
+ from sympy.functions.elementary.integers import floor
29
+ from sympy.functions.elementary.miscellaneous import sqrt
30
+ from sympy.functions.elementary.piecewise import Piecewise
31
+ from sympy.functions.special.bessel import besseli
32
+ from sympy.functions.special.beta_functions import beta
33
+ from sympy.functions.special.hyper import hyper
34
+ from sympy.functions.special.zeta_functions import (polylog, zeta)
35
+ from sympy.stats.drv import SingleDiscreteDistribution, SingleDiscretePSpace
36
+ from sympy.stats.rv import _value_check, is_random
37
+
38
+
39
+ __all__ = ['FlorySchulz',
40
+ 'Geometric',
41
+ 'Hermite',
42
+ 'Logarithmic',
43
+ 'NegativeBinomial',
44
+ 'Poisson',
45
+ 'Skellam',
46
+ 'YuleSimon',
47
+ 'Zeta'
48
+ ]
49
+
50
+
51
+ def rv(symbol, cls, *args, **kwargs):
52
+ args = list(map(sympify, args))
53
+ dist = cls(*args)
54
+ if kwargs.pop('check', True):
55
+ dist.check(*args)
56
+ pspace = SingleDiscretePSpace(symbol, dist)
57
+ if any(is_random(arg) for arg in args):
58
+ from sympy.stats.compound_rv import CompoundPSpace, CompoundDistribution
59
+ pspace = CompoundPSpace(symbol, CompoundDistribution(dist))
60
+ return pspace.value
61
+
62
+
63
+ class DiscreteDistributionHandmade(SingleDiscreteDistribution):
64
+ _argnames = ('pdf',)
65
+
66
+ def __new__(cls, pdf, set=S.Integers):
67
+ return Basic.__new__(cls, pdf, set)
68
+
69
+ @property
70
+ def set(self):
71
+ return self.args[1]
72
+
73
+ @staticmethod
74
+ def check(pdf, set):
75
+ x = Dummy('x')
76
+ val = Sum(pdf(x), (x, set._inf, set._sup)).doit()
77
+ _value_check(Eq(val, 1) != S.false, "The pdf is incorrect on the given set.")
78
+
79
+
80
+
81
+ def DiscreteRV(symbol, density, set=S.Integers, **kwargs):
82
+ """
83
+ Create a Discrete Random Variable given the following:
84
+
85
+ Parameters
86
+ ==========
87
+
88
+ symbol : Symbol
89
+ Represents name of the random variable.
90
+ density : Expression containing symbol
91
+ Represents probability density function.
92
+ set : set
93
+ Represents the region where the pdf is valid, by default is real line.
94
+ check : bool
95
+ If True, it will check whether the given density
96
+ integrates to 1 over the given set. If False, it
97
+ will not perform this check. Default is False.
98
+
99
+ Examples
100
+ ========
101
+
102
+ >>> from sympy.stats import DiscreteRV, P, E
103
+ >>> from sympy import Rational, Symbol
104
+ >>> x = Symbol('x')
105
+ >>> n = 10
106
+ >>> density = Rational(1, 10)
107
+ >>> X = DiscreteRV(x, density, set=set(range(n)))
108
+ >>> E(X)
109
+ 9/2
110
+ >>> P(X>3)
111
+ 3/5
112
+
113
+ Returns
114
+ =======
115
+
116
+ RandomSymbol
117
+
118
+ """
119
+ set = sympify(set)
120
+ pdf = Piecewise((density, set.as_relational(symbol)), (0, True))
121
+ pdf = Lambda(symbol, pdf)
122
+ # have a default of False while `rv` should have a default of True
123
+ kwargs['check'] = kwargs.pop('check', False)
124
+ return rv(symbol.name, DiscreteDistributionHandmade, pdf, set, **kwargs)
125
+
126
+
127
+ #-------------------------------------------------------------------------------
128
+ # Flory-Schulz distribution ------------------------------------------------------------
129
+
130
+ class FlorySchulzDistribution(SingleDiscreteDistribution):
131
+ _argnames = ('a',)
132
+ set = S.Naturals
133
+
134
+ @staticmethod
135
+ def check(a):
136
+ _value_check((0 < a, a < 1), "a must be between 0 and 1")
137
+
138
+ def pdf(self, k):
139
+ a = self.a
140
+ return (a**2 * k * (1 - a)**(k - 1))
141
+
142
+ def _characteristic_function(self, t):
143
+ a = self.a
144
+ return a**2*exp(I*t)/((1 + (a - 1)*exp(I*t))**2)
145
+
146
+ def _moment_generating_function(self, t):
147
+ a = self.a
148
+ return a**2*exp(t)/((1 + (a - 1)*exp(t))**2)
149
+
150
+
151
+ def FlorySchulz(name, a):
152
+ r"""
153
+ Create a discrete random variable with a FlorySchulz distribution.
154
+
155
+ The density of the FlorySchulz distribution is given by
156
+
157
+ .. math::
158
+ f(k) := (a^2) k (1 - a)^{k-1}
159
+
160
+ Parameters
161
+ ==========
162
+
163
+ a : A real number between 0 and 1
164
+
165
+ Returns
166
+ =======
167
+
168
+ RandomSymbol
169
+
170
+ Examples
171
+ ========
172
+
173
+ >>> from sympy.stats import density, E, variance, FlorySchulz
174
+ >>> from sympy import Symbol, S
175
+
176
+ >>> a = S.One / 5
177
+ >>> z = Symbol("z")
178
+
179
+ >>> X = FlorySchulz("x", a)
180
+
181
+ >>> density(X)(z)
182
+ (5/4)**(1 - z)*z/25
183
+
184
+ >>> E(X)
185
+ 9
186
+
187
+ >>> variance(X)
188
+ 40
189
+
190
+ References
191
+ ==========
192
+
193
+ https://en.wikipedia.org/wiki/Flory%E2%80%93Schulz_distribution
194
+ """
195
+ return rv(name, FlorySchulzDistribution, a)
196
+
197
+
198
+ #-------------------------------------------------------------------------------
199
+ # Geometric distribution ------------------------------------------------------------
200
+
201
+ class GeometricDistribution(SingleDiscreteDistribution):
202
+ _argnames = ('p',)
203
+ set = S.Naturals
204
+
205
+ @staticmethod
206
+ def check(p):
207
+ _value_check((0 < p, p <= 1), "p must be between 0 and 1")
208
+
209
+ def pdf(self, k):
210
+ return (1 - self.p)**(k - 1) * self.p
211
+
212
+ def _characteristic_function(self, t):
213
+ p = self.p
214
+ return p * exp(I*t) / (1 - (1 - p)*exp(I*t))
215
+
216
+ def _moment_generating_function(self, t):
217
+ p = self.p
218
+ return p * exp(t) / (1 - (1 - p) * exp(t))
219
+
220
+
221
+ def Geometric(name, p):
222
+ r"""
223
+ Create a discrete random variable with a Geometric distribution.
224
+
225
+ Explanation
226
+ ===========
227
+
228
+ The density of the Geometric distribution is given by
229
+
230
+ .. math::
231
+ f(k) := p (1 - p)^{k - 1}
232
+
233
+ Parameters
234
+ ==========
235
+
236
+ p : A probability between 0 and 1
237
+
238
+ Returns
239
+ =======
240
+
241
+ RandomSymbol
242
+
243
+ Examples
244
+ ========
245
+
246
+ >>> from sympy.stats import Geometric, density, E, variance
247
+ >>> from sympy import Symbol, S
248
+
249
+ >>> p = S.One / 5
250
+ >>> z = Symbol("z")
251
+
252
+ >>> X = Geometric("x", p)
253
+
254
+ >>> density(X)(z)
255
+ (5/4)**(1 - z)/5
256
+
257
+ >>> E(X)
258
+ 5
259
+
260
+ >>> variance(X)
261
+ 20
262
+
263
+ References
264
+ ==========
265
+
266
+ .. [1] https://en.wikipedia.org/wiki/Geometric_distribution
267
+ .. [2] https://mathworld.wolfram.com/GeometricDistribution.html
268
+
269
+ """
270
+ return rv(name, GeometricDistribution, p)
271
+
272
+
273
+ #-------------------------------------------------------------------------------
274
+ # Hermite distribution ---------------------------------------------------------
275
+
276
+
277
+ class HermiteDistribution(SingleDiscreteDistribution):
278
+ _argnames = ('a1', 'a2')
279
+ set = S.Naturals0
280
+
281
+ @staticmethod
282
+ def check(a1, a2):
283
+ _value_check(a1.is_nonnegative, 'Parameter a1 must be >= 0.')
284
+ _value_check(a2.is_nonnegative, 'Parameter a2 must be >= 0.')
285
+
286
+ def pdf(self, k):
287
+ a1, a2 = self.a1, self.a2
288
+ term1 = exp(-(a1 + a2))
289
+ j = Dummy("j", integer=True)
290
+ num = a1**(k - 2*j) * a2**j
291
+ den = factorial(k - 2*j) * factorial(j)
292
+ return term1 * Sum(num/den, (j, 0, k//2)).doit()
293
+
294
+ def _moment_generating_function(self, t):
295
+ a1, a2 = self.a1, self.a2
296
+ term1 = a1 * (exp(t) - 1)
297
+ term2 = a2 * (exp(2*t) - 1)
298
+ return exp(term1 + term2)
299
+
300
+ def _characteristic_function(self, t):
301
+ a1, a2 = self.a1, self.a2
302
+ term1 = a1 * (exp(I*t) - 1)
303
+ term2 = a2 * (exp(2*I*t) - 1)
304
+ return exp(term1 + term2)
305
+
306
+ def Hermite(name, a1, a2):
307
+ r"""
308
+ Create a discrete random variable with a Hermite distribution.
309
+
310
+ Explanation
311
+ ===========
312
+
313
+ The density of the Hermite distribution is given by
314
+
315
+ .. math::
316
+ f(x):= e^{-a_1 -a_2}\sum_{j=0}^{\left \lfloor x/2 \right \rfloor}
317
+ \frac{a_{1}^{x-2j}a_{2}^{j}}{(x-2j)!j!}
318
+
319
+ Parameters
320
+ ==========
321
+
322
+ a1 : A Positive number greater than equal to 0.
323
+ a2 : A Positive number greater than equal to 0.
324
+
325
+ Returns
326
+ =======
327
+
328
+ RandomSymbol
329
+
330
+ Examples
331
+ ========
332
+
333
+ >>> from sympy.stats import Hermite, density, E, variance
334
+ >>> from sympy import Symbol
335
+
336
+ >>> a1 = Symbol("a1", positive=True)
337
+ >>> a2 = Symbol("a2", positive=True)
338
+ >>> x = Symbol("x")
339
+
340
+ >>> H = Hermite("H", a1=5, a2=4)
341
+
342
+ >>> density(H)(2)
343
+ 33*exp(-9)/2
344
+
345
+ >>> E(H)
346
+ 13
347
+
348
+ >>> variance(H)
349
+ 21
350
+
351
+ References
352
+ ==========
353
+
354
+ .. [1] https://en.wikipedia.org/wiki/Hermite_distribution
355
+
356
+ """
357
+
358
+ return rv(name, HermiteDistribution, a1, a2)
359
+
360
+
361
+ #-------------------------------------------------------------------------------
362
+ # Logarithmic distribution ------------------------------------------------------------
363
+
364
+ class LogarithmicDistribution(SingleDiscreteDistribution):
365
+ _argnames = ('p',)
366
+
367
+ set = S.Naturals
368
+
369
+ @staticmethod
370
+ def check(p):
371
+ _value_check((p > 0, p < 1), "p should be between 0 and 1")
372
+
373
+ def pdf(self, k):
374
+ p = self.p
375
+ return (-1) * p**k / (k * log(1 - p))
376
+
377
+ def _characteristic_function(self, t):
378
+ p = self.p
379
+ return log(1 - p * exp(I*t)) / log(1 - p)
380
+
381
+ def _moment_generating_function(self, t):
382
+ p = self.p
383
+ return log(1 - p * exp(t)) / log(1 - p)
384
+
385
+
386
+ def Logarithmic(name, p):
387
+ r"""
388
+ Create a discrete random variable with a Logarithmic distribution.
389
+
390
+ Explanation
391
+ ===========
392
+
393
+ The density of the Logarithmic distribution is given by
394
+
395
+ .. math::
396
+ f(k) := \frac{-p^k}{k \ln{(1 - p)}}
397
+
398
+ Parameters
399
+ ==========
400
+
401
+ p : A value between 0 and 1
402
+
403
+ Returns
404
+ =======
405
+
406
+ RandomSymbol
407
+
408
+ Examples
409
+ ========
410
+
411
+ >>> from sympy.stats import Logarithmic, density, E, variance
412
+ >>> from sympy import Symbol, S
413
+
414
+ >>> p = S.One / 5
415
+ >>> z = Symbol("z")
416
+
417
+ >>> X = Logarithmic("x", p)
418
+
419
+ >>> density(X)(z)
420
+ -1/(5**z*z*log(4/5))
421
+
422
+ >>> E(X)
423
+ -1/(-4*log(5) + 8*log(2))
424
+
425
+ >>> variance(X)
426
+ -1/((-4*log(5) + 8*log(2))*(-2*log(5) + 4*log(2))) + 1/(-64*log(2)*log(5) + 64*log(2)**2 + 16*log(5)**2) - 10/(-32*log(5) + 64*log(2))
427
+
428
+ References
429
+ ==========
430
+
431
+ .. [1] https://en.wikipedia.org/wiki/Logarithmic_distribution
432
+ .. [2] https://mathworld.wolfram.com/LogarithmicDistribution.html
433
+
434
+ """
435
+ return rv(name, LogarithmicDistribution, p)
436
+
437
+
438
+ #-------------------------------------------------------------------------------
439
+ # Negative binomial distribution ------------------------------------------------------------
440
+
441
+ class NegativeBinomialDistribution(SingleDiscreteDistribution):
442
+ _argnames = ('r', 'p')
443
+ set = S.Naturals0
444
+
445
+ @staticmethod
446
+ def check(r, p):
447
+ _value_check(r > 0, 'r should be positive')
448
+ _value_check((p > 0, p < 1), 'p should be between 0 and 1')
449
+
450
+ def pdf(self, k):
451
+ r = self.r
452
+ p = self.p
453
+
454
+ return binomial(k + r - 1, k) * (1 - p)**r * p**k
455
+
456
+ def _characteristic_function(self, t):
457
+ r = self.r
458
+ p = self.p
459
+
460
+ return ((1 - p) / (1 - p * exp(I*t)))**r
461
+
462
+ def _moment_generating_function(self, t):
463
+ r = self.r
464
+ p = self.p
465
+
466
+ return ((1 - p) / (1 - p * exp(t)))**r
467
+
468
+ def NegativeBinomial(name, r, p):
469
+ r"""
470
+ Create a discrete random variable with a Negative Binomial distribution.
471
+
472
+ Explanation
473
+ ===========
474
+
475
+ The density of the Negative Binomial distribution is given by
476
+
477
+ .. math::
478
+ f(k) := \binom{k + r - 1}{k} (1 - p)^r p^k
479
+
480
+ Parameters
481
+ ==========
482
+
483
+ r : A positive value
484
+ p : A value between 0 and 1
485
+
486
+ Returns
487
+ =======
488
+
489
+ RandomSymbol
490
+
491
+ Examples
492
+ ========
493
+
494
+ >>> from sympy.stats import NegativeBinomial, density, E, variance
495
+ >>> from sympy import Symbol, S
496
+
497
+ >>> r = 5
498
+ >>> p = S.One / 5
499
+ >>> z = Symbol("z")
500
+
501
+ >>> X = NegativeBinomial("x", r, p)
502
+
503
+ >>> density(X)(z)
504
+ 1024*binomial(z + 4, z)/(3125*5**z)
505
+
506
+ >>> E(X)
507
+ 5/4
508
+
509
+ >>> variance(X)
510
+ 25/16
511
+
512
+ References
513
+ ==========
514
+
515
+ .. [1] https://en.wikipedia.org/wiki/Negative_binomial_distribution
516
+ .. [2] https://mathworld.wolfram.com/NegativeBinomialDistribution.html
517
+
518
+ """
519
+ return rv(name, NegativeBinomialDistribution, r, p)
520
+
521
+
522
+ #-------------------------------------------------------------------------------
523
+ # Poisson distribution ------------------------------------------------------------
524
+
525
+ class PoissonDistribution(SingleDiscreteDistribution):
526
+ _argnames = ('lamda',)
527
+
528
+ set = S.Naturals0
529
+
530
+ @staticmethod
531
+ def check(lamda):
532
+ _value_check(lamda > 0, "Lambda must be positive")
533
+
534
+ def pdf(self, k):
535
+ return self.lamda**k / factorial(k) * exp(-self.lamda)
536
+
537
+ def _characteristic_function(self, t):
538
+ return exp(self.lamda * (exp(I*t) - 1))
539
+
540
+ def _moment_generating_function(self, t):
541
+ return exp(self.lamda * (exp(t) - 1))
542
+
543
+
544
+ def Poisson(name, lamda):
545
+ r"""
546
+ Create a discrete random variable with a Poisson distribution.
547
+
548
+ Explanation
549
+ ===========
550
+
551
+ The density of the Poisson distribution is given by
552
+
553
+ .. math::
554
+ f(k) := \frac{\lambda^{k} e^{- \lambda}}{k!}
555
+
556
+ Parameters
557
+ ==========
558
+
559
+ lamda : Positive number, a rate
560
+
561
+ Returns
562
+ =======
563
+
564
+ RandomSymbol
565
+
566
+ Examples
567
+ ========
568
+
569
+ >>> from sympy.stats import Poisson, density, E, variance
570
+ >>> from sympy import Symbol, simplify
571
+
572
+ >>> rate = Symbol("lambda", positive=True)
573
+ >>> z = Symbol("z")
574
+
575
+ >>> X = Poisson("x", rate)
576
+
577
+ >>> density(X)(z)
578
+ lambda**z*exp(-lambda)/factorial(z)
579
+
580
+ >>> E(X)
581
+ lambda
582
+
583
+ >>> simplify(variance(X))
584
+ lambda
585
+
586
+ References
587
+ ==========
588
+
589
+ .. [1] https://en.wikipedia.org/wiki/Poisson_distribution
590
+ .. [2] https://mathworld.wolfram.com/PoissonDistribution.html
591
+
592
+ """
593
+ return rv(name, PoissonDistribution, lamda)
594
+
595
+
596
+ # -----------------------------------------------------------------------------
597
+ # Skellam distribution --------------------------------------------------------
598
+
599
+
600
+ class SkellamDistribution(SingleDiscreteDistribution):
601
+ _argnames = ('mu1', 'mu2')
602
+ set = S.Integers
603
+
604
+ @staticmethod
605
+ def check(mu1, mu2):
606
+ _value_check(mu1 >= 0, 'Parameter mu1 must be >= 0')
607
+ _value_check(mu2 >= 0, 'Parameter mu2 must be >= 0')
608
+
609
+ def pdf(self, k):
610
+ (mu1, mu2) = (self.mu1, self.mu2)
611
+ term1 = exp(-(mu1 + mu2)) * (mu1 / mu2) ** (k / 2)
612
+ term2 = besseli(k, 2 * sqrt(mu1 * mu2))
613
+ return term1 * term2
614
+
615
+ def _cdf(self, x):
616
+ raise NotImplementedError(
617
+ "Skellam doesn't have closed form for the CDF.")
618
+
619
+ def _characteristic_function(self, t):
620
+ (mu1, mu2) = (self.mu1, self.mu2)
621
+ return exp(-(mu1 + mu2) + mu1 * exp(I * t) + mu2 * exp(-I * t))
622
+
623
+ def _moment_generating_function(self, t):
624
+ (mu1, mu2) = (self.mu1, self.mu2)
625
+ return exp(-(mu1 + mu2) + mu1 * exp(t) + mu2 * exp(-t))
626
+
627
+
628
+ def Skellam(name, mu1, mu2):
629
+ r"""
630
+ Create a discrete random variable with a Skellam distribution.
631
+
632
+ Explanation
633
+ ===========
634
+
635
+ The Skellam is the distribution of the difference N1 - N2
636
+ of two statistically independent random variables N1 and N2
637
+ each Poisson-distributed with respective expected values mu1 and mu2.
638
+
639
+ The density of the Skellam distribution is given by
640
+
641
+ .. math::
642
+ f(k) := e^{-(\mu_1+\mu_2)}(\frac{\mu_1}{\mu_2})^{k/2}I_k(2\sqrt{\mu_1\mu_2})
643
+
644
+ Parameters
645
+ ==========
646
+
647
+ mu1 : A non-negative value
648
+ mu2 : A non-negative value
649
+
650
+ Returns
651
+ =======
652
+
653
+ RandomSymbol
654
+
655
+ Examples
656
+ ========
657
+
658
+ >>> from sympy.stats import Skellam, density, E, variance
659
+ >>> from sympy import Symbol, pprint
660
+
661
+ >>> z = Symbol("z", integer=True)
662
+ >>> mu1 = Symbol("mu1", positive=True)
663
+ >>> mu2 = Symbol("mu2", positive=True)
664
+ >>> X = Skellam("x", mu1, mu2)
665
+
666
+ >>> pprint(density(X)(z), use_unicode=False)
667
+ z
668
+ -
669
+ 2
670
+ /mu1\ -mu1 - mu2 / _____ _____\
671
+ |---| *e *besseli\z, 2*\/ mu1 *\/ mu2 /
672
+ \mu2/
673
+ >>> E(X)
674
+ mu1 - mu2
675
+ >>> variance(X).expand()
676
+ mu1 + mu2
677
+
678
+ References
679
+ ==========
680
+
681
+ .. [1] https://en.wikipedia.org/wiki/Skellam_distribution
682
+
683
+ """
684
+ return rv(name, SkellamDistribution, mu1, mu2)
685
+
686
+
687
+ #-------------------------------------------------------------------------------
688
+ # Yule-Simon distribution ------------------------------------------------------------
689
+
690
+ class YuleSimonDistribution(SingleDiscreteDistribution):
691
+ _argnames = ('rho',)
692
+ set = S.Naturals
693
+
694
+ @staticmethod
695
+ def check(rho):
696
+ _value_check(rho > 0, 'rho should be positive')
697
+
698
+ def pdf(self, k):
699
+ rho = self.rho
700
+ return rho * beta(k, rho + 1)
701
+
702
+ def _cdf(self, x):
703
+ return Piecewise((1 - floor(x) * beta(floor(x), self.rho + 1), x >= 1), (0, True))
704
+
705
+ def _characteristic_function(self, t):
706
+ rho = self.rho
707
+ return rho * hyper((1, 1), (rho + 2,), exp(I*t)) * exp(I*t) / (rho + 1)
708
+
709
+ def _moment_generating_function(self, t):
710
+ rho = self.rho
711
+ return rho * hyper((1, 1), (rho + 2,), exp(t)) * exp(t) / (rho + 1)
712
+
713
+
714
+ def YuleSimon(name, rho):
715
+ r"""
716
+ Create a discrete random variable with a Yule-Simon distribution.
717
+
718
+ Explanation
719
+ ===========
720
+
721
+ The density of the Yule-Simon distribution is given by
722
+
723
+ .. math::
724
+ f(k) := \rho B(k, \rho + 1)
725
+
726
+ Parameters
727
+ ==========
728
+
729
+ rho : A positive value
730
+
731
+ Returns
732
+ =======
733
+
734
+ RandomSymbol
735
+
736
+ Examples
737
+ ========
738
+
739
+ >>> from sympy.stats import YuleSimon, density, E, variance
740
+ >>> from sympy import Symbol, simplify
741
+
742
+ >>> p = 5
743
+ >>> z = Symbol("z")
744
+
745
+ >>> X = YuleSimon("x", p)
746
+
747
+ >>> density(X)(z)
748
+ 5*beta(z, 6)
749
+
750
+ >>> simplify(E(X))
751
+ 5/4
752
+
753
+ >>> simplify(variance(X))
754
+ 25/48
755
+
756
+ References
757
+ ==========
758
+
759
+ .. [1] https://en.wikipedia.org/wiki/Yule%E2%80%93Simon_distribution
760
+
761
+ """
762
+ return rv(name, YuleSimonDistribution, rho)
763
+
764
+
765
+ #-------------------------------------------------------------------------------
766
+ # Zeta distribution ------------------------------------------------------------
767
+
768
+ class ZetaDistribution(SingleDiscreteDistribution):
769
+ _argnames = ('s',)
770
+ set = S.Naturals
771
+
772
+ @staticmethod
773
+ def check(s):
774
+ _value_check(s > 1, 's should be greater than 1')
775
+
776
+ def pdf(self, k):
777
+ s = self.s
778
+ return 1 / (k**s * zeta(s))
779
+
780
+ def _characteristic_function(self, t):
781
+ return polylog(self.s, exp(I*t)) / zeta(self.s)
782
+
783
+ def _moment_generating_function(self, t):
784
+ return polylog(self.s, exp(t)) / zeta(self.s)
785
+
786
+
787
+ def Zeta(name, s):
788
+ r"""
789
+ Create a discrete random variable with a Zeta distribution.
790
+
791
+ Explanation
792
+ ===========
793
+
794
+ The density of the Zeta distribution is given by
795
+
796
+ .. math::
797
+ f(k) := \frac{1}{k^s \zeta{(s)}}
798
+
799
+ Parameters
800
+ ==========
801
+
802
+ s : A value greater than 1
803
+
804
+ Returns
805
+ =======
806
+
807
+ RandomSymbol
808
+
809
+ Examples
810
+ ========
811
+
812
+ >>> from sympy.stats import Zeta, density, E, variance
813
+ >>> from sympy import Symbol
814
+
815
+ >>> s = 5
816
+ >>> z = Symbol("z")
817
+
818
+ >>> X = Zeta("x", s)
819
+
820
+ >>> density(X)(z)
821
+ 1/(z**5*zeta(5))
822
+
823
+ >>> E(X)
824
+ pi**4/(90*zeta(5))
825
+
826
+ >>> variance(X)
827
+ -pi**8/(8100*zeta(5)**2) + zeta(3)/zeta(5)
828
+
829
+ References
830
+ ==========
831
+
832
+ .. [1] https://en.wikipedia.org/wiki/Zeta_distribution
833
+
834
+ """
835
+ return rv(name, ZetaDistribution, s)
parrot/lib/python3.10/site-packages/sympy/stats/joint_rv.py ADDED
@@ -0,0 +1,426 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Joint Random Variables Module
3
+
4
+ See Also
5
+ ========
6
+ sympy.stats.rv
7
+ sympy.stats.frv
8
+ sympy.stats.crv
9
+ sympy.stats.drv
10
+ """
11
+ from math import prod
12
+
13
+ from sympy.core.basic import Basic
14
+ from sympy.core.function import Lambda
15
+ from sympy.core.singleton import S
16
+ from sympy.core.symbol import (Dummy, Symbol)
17
+ from sympy.core.sympify import sympify
18
+ from sympy.sets.sets import ProductSet
19
+ from sympy.tensor.indexed import Indexed
20
+ from sympy.concrete.products import Product
21
+ from sympy.concrete.summations import Sum, summation
22
+ from sympy.core.containers import Tuple
23
+ from sympy.integrals.integrals import Integral, integrate
24
+ from sympy.matrices import ImmutableMatrix, matrix2numpy, list2numpy
25
+ from sympy.stats.crv import SingleContinuousDistribution, SingleContinuousPSpace
26
+ from sympy.stats.drv import SingleDiscreteDistribution, SingleDiscretePSpace
27
+ from sympy.stats.rv import (ProductPSpace, NamedArgsMixin, Distribution,
28
+ ProductDomain, RandomSymbol, random_symbols,
29
+ SingleDomain, _symbol_converter)
30
+ from sympy.utilities.iterables import iterable
31
+ from sympy.utilities.misc import filldedent
32
+ from sympy.external import import_module
33
+
34
+ # __all__ = ['marginal_distribution']
35
+
36
+ class JointPSpace(ProductPSpace):
37
+ """
38
+ Represents a joint probability space. Represented using symbols for
39
+ each component and a distribution.
40
+ """
41
+ def __new__(cls, sym, dist):
42
+ if isinstance(dist, SingleContinuousDistribution):
43
+ return SingleContinuousPSpace(sym, dist)
44
+ if isinstance(dist, SingleDiscreteDistribution):
45
+ return SingleDiscretePSpace(sym, dist)
46
+ sym = _symbol_converter(sym)
47
+ return Basic.__new__(cls, sym, dist)
48
+
49
+ @property
50
+ def set(self):
51
+ return self.domain.set
52
+
53
+ @property
54
+ def symbol(self):
55
+ return self.args[0]
56
+
57
+ @property
58
+ def distribution(self):
59
+ return self.args[1]
60
+
61
+ @property
62
+ def value(self):
63
+ return JointRandomSymbol(self.symbol, self)
64
+
65
+ @property
66
+ def component_count(self):
67
+ _set = self.distribution.set
68
+ if isinstance(_set, ProductSet):
69
+ return S(len(_set.args))
70
+ elif isinstance(_set, Product):
71
+ return _set.limits[0][-1]
72
+ return S.One
73
+
74
+ @property
75
+ def pdf(self):
76
+ sym = [Indexed(self.symbol, i) for i in range(self.component_count)]
77
+ return self.distribution(*sym)
78
+
79
+ @property
80
+ def domain(self):
81
+ rvs = random_symbols(self.distribution)
82
+ if not rvs:
83
+ return SingleDomain(self.symbol, self.distribution.set)
84
+ return ProductDomain(*[rv.pspace.domain for rv in rvs])
85
+
86
+ def component_domain(self, index):
87
+ return self.set.args[index]
88
+
89
+ def marginal_distribution(self, *indices):
90
+ count = self.component_count
91
+ if count.atoms(Symbol):
92
+ raise ValueError("Marginal distributions cannot be computed "
93
+ "for symbolic dimensions. It is a work under progress.")
94
+ orig = [Indexed(self.symbol, i) for i in range(count)]
95
+ all_syms = [Symbol(str(i)) for i in orig]
96
+ replace_dict = dict(zip(all_syms, orig))
97
+ sym = tuple(Symbol(str(Indexed(self.symbol, i))) for i in indices)
98
+ limits = [[i,] for i in all_syms if i not in sym]
99
+ index = 0
100
+ for i in range(count):
101
+ if i not in indices:
102
+ limits[index].append(self.distribution.set.args[i])
103
+ limits[index] = tuple(limits[index])
104
+ index += 1
105
+ if self.distribution.is_Continuous:
106
+ f = Lambda(sym, integrate(self.distribution(*all_syms), *limits))
107
+ elif self.distribution.is_Discrete:
108
+ f = Lambda(sym, summation(self.distribution(*all_syms), *limits))
109
+ return f.xreplace(replace_dict)
110
+
111
+ def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
112
+ syms = tuple(self.value[i] for i in range(self.component_count))
113
+ rvs = rvs or syms
114
+ if not any(i in rvs for i in syms):
115
+ return expr
116
+ expr = expr*self.pdf
117
+ for rv in rvs:
118
+ if isinstance(rv, Indexed):
119
+ expr = expr.xreplace({rv: Indexed(str(rv.base), rv.args[1])})
120
+ elif isinstance(rv, RandomSymbol):
121
+ expr = expr.xreplace({rv: rv.symbol})
122
+ if self.value in random_symbols(expr):
123
+ raise NotImplementedError(filldedent('''
124
+ Expectations of expression with unindexed joint random symbols
125
+ cannot be calculated yet.'''))
126
+ limits = tuple((Indexed(str(rv.base),rv.args[1]),
127
+ self.distribution.set.args[rv.args[1]]) for rv in syms)
128
+ return Integral(expr, *limits)
129
+
130
+ def where(self, condition):
131
+ raise NotImplementedError()
132
+
133
+ def compute_density(self, expr):
134
+ raise NotImplementedError()
135
+
136
+ def sample(self, size=(), library='scipy', seed=None):
137
+ """
138
+ Internal sample method
139
+
140
+ Returns dictionary mapping RandomSymbol to realization value.
141
+ """
142
+ return {RandomSymbol(self.symbol, self): self.distribution.sample(size,
143
+ library=library, seed=seed)}
144
+
145
+ def probability(self, condition):
146
+ raise NotImplementedError()
147
+
148
+
149
+ class SampleJointScipy:
150
+ """Returns the sample from scipy of the given distribution"""
151
+ def __new__(cls, dist, size, seed=None):
152
+ return cls._sample_scipy(dist, size, seed)
153
+
154
+ @classmethod
155
+ def _sample_scipy(cls, dist, size, seed):
156
+ """Sample from SciPy."""
157
+
158
+ import numpy
159
+ if seed is None or isinstance(seed, int):
160
+ rand_state = numpy.random.default_rng(seed=seed)
161
+ else:
162
+ rand_state = seed
163
+ from scipy import stats as scipy_stats
164
+ scipy_rv_map = {
165
+ 'MultivariateNormalDistribution': lambda dist, size: scipy_stats.multivariate_normal.rvs(
166
+ mean=matrix2numpy(dist.mu).flatten(),
167
+ cov=matrix2numpy(dist.sigma), size=size, random_state=rand_state),
168
+ 'MultivariateBetaDistribution': lambda dist, size: scipy_stats.dirichlet.rvs(
169
+ alpha=list2numpy(dist.alpha, float).flatten(), size=size, random_state=rand_state),
170
+ 'MultinomialDistribution': lambda dist, size: scipy_stats.multinomial.rvs(
171
+ n=int(dist.n), p=list2numpy(dist.p, float).flatten(), size=size, random_state=rand_state)
172
+ }
173
+
174
+ sample_shape = {
175
+ 'MultivariateNormalDistribution': lambda dist: matrix2numpy(dist.mu).flatten().shape,
176
+ 'MultivariateBetaDistribution': lambda dist: list2numpy(dist.alpha).flatten().shape,
177
+ 'MultinomialDistribution': lambda dist: list2numpy(dist.p).flatten().shape
178
+ }
179
+
180
+ dist_list = scipy_rv_map.keys()
181
+
182
+ if dist.__class__.__name__ not in dist_list:
183
+ return None
184
+
185
+ samples = scipy_rv_map[dist.__class__.__name__](dist, size)
186
+ return samples.reshape(size + sample_shape[dist.__class__.__name__](dist))
187
+
188
+ class SampleJointNumpy:
189
+ """Returns the sample from numpy of the given distribution"""
190
+
191
+ def __new__(cls, dist, size, seed=None):
192
+ return cls._sample_numpy(dist, size, seed)
193
+
194
+ @classmethod
195
+ def _sample_numpy(cls, dist, size, seed):
196
+ """Sample from NumPy."""
197
+
198
+ import numpy
199
+ if seed is None or isinstance(seed, int):
200
+ rand_state = numpy.random.default_rng(seed=seed)
201
+ else:
202
+ rand_state = seed
203
+ numpy_rv_map = {
204
+ 'MultivariateNormalDistribution': lambda dist, size: rand_state.multivariate_normal(
205
+ mean=matrix2numpy(dist.mu, float).flatten(),
206
+ cov=matrix2numpy(dist.sigma, float), size=size),
207
+ 'MultivariateBetaDistribution': lambda dist, size: rand_state.dirichlet(
208
+ alpha=list2numpy(dist.alpha, float).flatten(), size=size),
209
+ 'MultinomialDistribution': lambda dist, size: rand_state.multinomial(
210
+ n=int(dist.n), pvals=list2numpy(dist.p, float).flatten(), size=size)
211
+ }
212
+
213
+ sample_shape = {
214
+ 'MultivariateNormalDistribution': lambda dist: matrix2numpy(dist.mu).flatten().shape,
215
+ 'MultivariateBetaDistribution': lambda dist: list2numpy(dist.alpha).flatten().shape,
216
+ 'MultinomialDistribution': lambda dist: list2numpy(dist.p).flatten().shape
217
+ }
218
+
219
+ dist_list = numpy_rv_map.keys()
220
+
221
+ if dist.__class__.__name__ not in dist_list:
222
+ return None
223
+
224
+ samples = numpy_rv_map[dist.__class__.__name__](dist, prod(size))
225
+ return samples.reshape(size + sample_shape[dist.__class__.__name__](dist))
226
+
227
+ class SampleJointPymc:
228
+ """Returns the sample from pymc of the given distribution"""
229
+
230
+ def __new__(cls, dist, size, seed=None):
231
+ return cls._sample_pymc(dist, size, seed)
232
+
233
+ @classmethod
234
+ def _sample_pymc(cls, dist, size, seed):
235
+ """Sample from PyMC."""
236
+
237
+ try:
238
+ import pymc
239
+ except ImportError:
240
+ import pymc3 as pymc
241
+ pymc_rv_map = {
242
+ 'MultivariateNormalDistribution': lambda dist:
243
+ pymc.MvNormal('X', mu=matrix2numpy(dist.mu, float).flatten(),
244
+ cov=matrix2numpy(dist.sigma, float), shape=(1, dist.mu.shape[0])),
245
+ 'MultivariateBetaDistribution': lambda dist:
246
+ pymc.Dirichlet('X', a=list2numpy(dist.alpha, float).flatten()),
247
+ 'MultinomialDistribution': lambda dist:
248
+ pymc.Multinomial('X', n=int(dist.n),
249
+ p=list2numpy(dist.p, float).flatten(), shape=(1, len(dist.p)))
250
+ }
251
+
252
+ sample_shape = {
253
+ 'MultivariateNormalDistribution': lambda dist: matrix2numpy(dist.mu).flatten().shape,
254
+ 'MultivariateBetaDistribution': lambda dist: list2numpy(dist.alpha).flatten().shape,
255
+ 'MultinomialDistribution': lambda dist: list2numpy(dist.p).flatten().shape
256
+ }
257
+
258
+ dist_list = pymc_rv_map.keys()
259
+
260
+ if dist.__class__.__name__ not in dist_list:
261
+ return None
262
+
263
+ import logging
264
+ logging.getLogger("pymc3").setLevel(logging.ERROR)
265
+ with pymc.Model():
266
+ pymc_rv_map[dist.__class__.__name__](dist)
267
+ samples = pymc.sample(draws=prod(size), chains=1, progressbar=False, random_seed=seed, return_inferencedata=False, compute_convergence_checks=False)[:]['X']
268
+ return samples.reshape(size + sample_shape[dist.__class__.__name__](dist))
269
+
270
+
271
+ _get_sample_class_jrv = {
272
+ 'scipy': SampleJointScipy,
273
+ 'pymc3': SampleJointPymc,
274
+ 'pymc': SampleJointPymc,
275
+ 'numpy': SampleJointNumpy
276
+ }
277
+
278
+ class JointDistribution(Distribution, NamedArgsMixin):
279
+ """
280
+ Represented by the random variables part of the joint distribution.
281
+ Contains methods for PDF, CDF, sampling, marginal densities, etc.
282
+ """
283
+
284
+ _argnames = ('pdf', )
285
+
286
+ def __new__(cls, *args):
287
+ args = list(map(sympify, args))
288
+ for i in range(len(args)):
289
+ if isinstance(args[i], list):
290
+ args[i] = ImmutableMatrix(args[i])
291
+ return Basic.__new__(cls, *args)
292
+
293
+ @property
294
+ def domain(self):
295
+ return ProductDomain(self.symbols)
296
+
297
+ @property
298
+ def pdf(self):
299
+ return self.density.args[1]
300
+
301
+ def cdf(self, other):
302
+ if not isinstance(other, dict):
303
+ raise ValueError("%s should be of type dict, got %s"%(other, type(other)))
304
+ rvs = other.keys()
305
+ _set = self.domain.set.sets
306
+ expr = self.pdf(tuple(i.args[0] for i in self.symbols))
307
+ for i in range(len(other)):
308
+ if rvs[i].is_Continuous:
309
+ density = Integral(expr, (rvs[i], _set[i].inf,
310
+ other[rvs[i]]))
311
+ elif rvs[i].is_Discrete:
312
+ density = Sum(expr, (rvs[i], _set[i].inf,
313
+ other[rvs[i]]))
314
+ return density
315
+
316
+ def sample(self, size=(), library='scipy', seed=None):
317
+ """ A random realization from the distribution """
318
+
319
+ libraries = ('scipy', 'numpy', 'pymc3', 'pymc')
320
+ if library not in libraries:
321
+ raise NotImplementedError("Sampling from %s is not supported yet."
322
+ % str(library))
323
+ if not import_module(library):
324
+ raise ValueError("Failed to import %s" % library)
325
+
326
+ samps = _get_sample_class_jrv[library](self, size, seed=seed)
327
+
328
+ if samps is not None:
329
+ return samps
330
+ raise NotImplementedError(
331
+ "Sampling for %s is not currently implemented from %s"
332
+ % (self.__class__.__name__, library)
333
+ )
334
+
335
+ def __call__(self, *args):
336
+ return self.pdf(*args)
337
+
338
+ class JointRandomSymbol(RandomSymbol):
339
+ """
340
+ Representation of random symbols with joint probability distributions
341
+ to allow indexing."
342
+ """
343
+ def __getitem__(self, key):
344
+ if isinstance(self.pspace, JointPSpace):
345
+ if (self.pspace.component_count <= key) == True:
346
+ raise ValueError("Index keys for %s can only up to %s." %
347
+ (self.name, self.pspace.component_count - 1))
348
+ return Indexed(self, key)
349
+
350
+
351
+
352
+ class MarginalDistribution(Distribution):
353
+ """
354
+ Represents the marginal distribution of a joint probability space.
355
+
356
+ Initialised using a probability distribution and random variables(or
357
+ their indexed components) which should be a part of the resultant
358
+ distribution.
359
+ """
360
+
361
+ def __new__(cls, dist, *rvs):
362
+ if len(rvs) == 1 and iterable(rvs[0]):
363
+ rvs = tuple(rvs[0])
364
+ if not all(isinstance(rv, (Indexed, RandomSymbol)) for rv in rvs):
365
+ raise ValueError(filldedent('''Marginal distribution can be
366
+ intitialised only in terms of random variables or indexed random
367
+ variables'''))
368
+ rvs = Tuple.fromiter(rv for rv in rvs)
369
+ if not isinstance(dist, JointDistribution) and len(random_symbols(dist)) == 0:
370
+ return dist
371
+ return Basic.__new__(cls, dist, rvs)
372
+
373
+ def check(self):
374
+ pass
375
+
376
+ @property
377
+ def set(self):
378
+ rvs = [i for i in self.args[1] if isinstance(i, RandomSymbol)]
379
+ return ProductSet(*[rv.pspace.set for rv in rvs])
380
+
381
+ @property
382
+ def symbols(self):
383
+ rvs = self.args[1]
384
+ return {rv.pspace.symbol for rv in rvs}
385
+
386
+ def pdf(self, *x):
387
+ expr, rvs = self.args[0], self.args[1]
388
+ marginalise_out = [i for i in random_symbols(expr) if i not in rvs]
389
+ if isinstance(expr, JointDistribution):
390
+ count = len(expr.domain.args)
391
+ x = Dummy('x', real=True)
392
+ syms = tuple(Indexed(x, i) for i in count)
393
+ expr = expr.pdf(syms)
394
+ else:
395
+ syms = tuple(rv.pspace.symbol if isinstance(rv, RandomSymbol) else rv.args[0] for rv in rvs)
396
+ return Lambda(syms, self.compute_pdf(expr, marginalise_out))(*x)
397
+
398
+ def compute_pdf(self, expr, rvs):
399
+ for rv in rvs:
400
+ lpdf = 1
401
+ if isinstance(rv, RandomSymbol):
402
+ lpdf = rv.pspace.pdf
403
+ expr = self.marginalise_out(expr*lpdf, rv)
404
+ return expr
405
+
406
+ def marginalise_out(self, expr, rv):
407
+ from sympy.concrete.summations import Sum
408
+ if isinstance(rv, RandomSymbol):
409
+ dom = rv.pspace.set
410
+ elif isinstance(rv, Indexed):
411
+ dom = rv.base.component_domain(
412
+ rv.pspace.component_domain(rv.args[1]))
413
+ expr = expr.xreplace({rv: rv.pspace.symbol})
414
+ if rv.pspace.is_Continuous:
415
+ #TODO: Modify to support integration
416
+ #for all kinds of sets.
417
+ expr = Integral(expr, (rv.pspace.symbol, dom))
418
+ elif rv.pspace.is_Discrete:
419
+ #incorporate this into `Sum`/`summation`
420
+ if dom in (S.Integers, S.Naturals, S.Naturals0):
421
+ dom = (dom.inf, dom.sup)
422
+ expr = Sum(expr, (rv.pspace.symbol, dom))
423
+ return expr
424
+
425
+ def __call__(self, *args):
426
+ return self.pdf(*args)
parrot/lib/python3.10/site-packages/sympy/stats/joint_rv_types.py ADDED
@@ -0,0 +1,945 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.concrete.products import Product
2
+ from sympy.concrete.summations import Sum
3
+ from sympy.core.add import Add
4
+ from sympy.core.function import Lambda
5
+ from sympy.core.mul import Mul
6
+ from sympy.core.numbers import (Integer, Rational, pi)
7
+ from sympy.core.power import Pow
8
+ from sympy.core.relational import Eq
9
+ from sympy.core.singleton import S
10
+ from sympy.core.symbol import (Symbol, symbols)
11
+ from sympy.core.sympify import sympify
12
+ from sympy.functions.combinatorial.factorials import (rf, factorial)
13
+ from sympy.functions.elementary.exponential import exp
14
+ from sympy.functions.elementary.miscellaneous import sqrt
15
+ from sympy.functions.elementary.piecewise import Piecewise
16
+ from sympy.functions.special.bessel import besselk
17
+ from sympy.functions.special.gamma_functions import gamma
18
+ from sympy.matrices.dense import (Matrix, ones)
19
+ from sympy.sets.fancysets import Range
20
+ from sympy.sets.sets import (Intersection, Interval)
21
+ from sympy.tensor.indexed import (Indexed, IndexedBase)
22
+ from sympy.matrices import ImmutableMatrix, MatrixSymbol
23
+ from sympy.matrices.expressions.determinant import det
24
+ from sympy.matrices.expressions.matexpr import MatrixElement
25
+ from sympy.stats.joint_rv import JointDistribution, JointPSpace, MarginalDistribution
26
+ from sympy.stats.rv import _value_check, random_symbols
27
+
28
+ __all__ = ['JointRV',
29
+ 'MultivariateNormal',
30
+ 'MultivariateLaplace',
31
+ 'Dirichlet',
32
+ 'GeneralizedMultivariateLogGamma',
33
+ 'GeneralizedMultivariateLogGammaOmega',
34
+ 'Multinomial',
35
+ 'MultivariateBeta',
36
+ 'MultivariateEwens',
37
+ 'MultivariateT',
38
+ 'NegativeMultinomial',
39
+ 'NormalGamma'
40
+ ]
41
+
42
+ def multivariate_rv(cls, sym, *args):
43
+ args = list(map(sympify, args))
44
+ dist = cls(*args)
45
+ args = dist.args
46
+ dist.check(*args)
47
+ return JointPSpace(sym, dist).value
48
+
49
+
50
+ def marginal_distribution(rv, *indices):
51
+ """
52
+ Marginal distribution function of a joint random variable.
53
+
54
+ Parameters
55
+ ==========
56
+
57
+ rv : A random variable with a joint probability distribution.
58
+ indices : Component indices or the indexed random symbol
59
+ for which the joint distribution is to be calculated
60
+
61
+ Returns
62
+ =======
63
+
64
+ A Lambda expression in `sym`.
65
+
66
+ Examples
67
+ ========
68
+
69
+ >>> from sympy.stats import MultivariateNormal, marginal_distribution
70
+ >>> m = MultivariateNormal('X', [1, 2], [[2, 1], [1, 2]])
71
+ >>> marginal_distribution(m, m[0])(1)
72
+ 1/(2*sqrt(pi))
73
+
74
+ """
75
+ indices = list(indices)
76
+ for i in range(len(indices)):
77
+ if isinstance(indices[i], Indexed):
78
+ indices[i] = indices[i].args[1]
79
+ prob_space = rv.pspace
80
+ if not indices:
81
+ raise ValueError(
82
+ "At least one component for marginal density is needed.")
83
+ if hasattr(prob_space.distribution, '_marginal_distribution'):
84
+ return prob_space.distribution._marginal_distribution(indices, rv.symbol)
85
+ return prob_space.marginal_distribution(*indices)
86
+
87
+
88
+ class JointDistributionHandmade(JointDistribution):
89
+
90
+ _argnames = ('pdf',)
91
+ is_Continuous = True
92
+
93
+ @property
94
+ def set(self):
95
+ return self.args[1]
96
+
97
+
98
+ def JointRV(symbol, pdf, _set=None):
99
+ """
100
+ Create a Joint Random Variable where each of its component is continuous,
101
+ given the following:
102
+
103
+ Parameters
104
+ ==========
105
+
106
+ symbol : Symbol
107
+ Represents name of the random variable.
108
+ pdf : A PDF in terms of indexed symbols of the symbol given
109
+ as the first argument
110
+
111
+ NOTE
112
+ ====
113
+
114
+ As of now, the set for each component for a ``JointRV`` is
115
+ equal to the set of all integers, which cannot be changed.
116
+
117
+ Examples
118
+ ========
119
+
120
+ >>> from sympy import exp, pi, Indexed, S
121
+ >>> from sympy.stats import density, JointRV
122
+ >>> x1, x2 = (Indexed('x', i) for i in (1, 2))
123
+ >>> pdf = exp(-x1**2/2 + x1 - x2**2/2 - S(1)/2)/(2*pi)
124
+ >>> N1 = JointRV('x', pdf) #Multivariate Normal distribution
125
+ >>> density(N1)(1, 2)
126
+ exp(-2)/(2*pi)
127
+
128
+ Returns
129
+ =======
130
+
131
+ RandomSymbol
132
+
133
+ """
134
+ #TODO: Add support for sets provided by the user
135
+ symbol = sympify(symbol)
136
+ syms = [i for i in pdf.free_symbols if isinstance(i, Indexed)
137
+ and i.base == IndexedBase(symbol)]
138
+ syms = tuple(sorted(syms, key = lambda index: index.args[1]))
139
+ _set = S.Reals**len(syms)
140
+ pdf = Lambda(syms, pdf)
141
+ dist = JointDistributionHandmade(pdf, _set)
142
+ jrv = JointPSpace(symbol, dist).value
143
+ rvs = random_symbols(pdf)
144
+ if len(rvs) != 0:
145
+ dist = MarginalDistribution(dist, (jrv,))
146
+ return JointPSpace(symbol, dist).value
147
+ return jrv
148
+
149
+ #-------------------------------------------------------------------------------
150
+ # Multivariate Normal distribution ---------------------------------------------
151
+
152
+ class MultivariateNormalDistribution(JointDistribution):
153
+ _argnames = ('mu', 'sigma')
154
+
155
+ is_Continuous=True
156
+
157
+ @property
158
+ def set(self):
159
+ k = self.mu.shape[0]
160
+ return S.Reals**k
161
+
162
+ @staticmethod
163
+ def check(mu, sigma):
164
+ _value_check(mu.shape[0] == sigma.shape[0],
165
+ "Size of the mean vector and covariance matrix are incorrect.")
166
+ #check if covariance matrix is positive semi definite or not.
167
+ if not isinstance(sigma, MatrixSymbol):
168
+ _value_check(sigma.is_positive_semidefinite,
169
+ "The covariance matrix must be positive semi definite. ")
170
+
171
+ def pdf(self, *args):
172
+ mu, sigma = self.mu, self.sigma
173
+ k = mu.shape[0]
174
+ if len(args) == 1 and args[0].is_Matrix:
175
+ args = args[0]
176
+ else:
177
+ args = ImmutableMatrix(args)
178
+ x = args - mu
179
+ density = S.One/sqrt((2*pi)**(k)*det(sigma))*exp(
180
+ Rational(-1, 2)*x.transpose()*(sigma.inv()*x))
181
+ return MatrixElement(density, 0, 0)
182
+
183
+ def _marginal_distribution(self, indices, sym):
184
+ sym = ImmutableMatrix([Indexed(sym, i) for i in indices])
185
+ _mu, _sigma = self.mu, self.sigma
186
+ k = self.mu.shape[0]
187
+ for i in range(k):
188
+ if i not in indices:
189
+ _mu = _mu.row_del(i)
190
+ _sigma = _sigma.col_del(i)
191
+ _sigma = _sigma.row_del(i)
192
+ return Lambda(tuple(sym), S.One/sqrt((2*pi)**(len(_mu))*det(_sigma))*exp(
193
+ Rational(-1, 2)*(_mu - sym).transpose()*(_sigma.inv()*\
194
+ (_mu - sym)))[0])
195
+
196
+ def MultivariateNormal(name, mu, sigma):
197
+ r"""
198
+ Creates a continuous random variable with Multivariate Normal
199
+ Distribution.
200
+
201
+ The density of the multivariate normal distribution can be found at [1].
202
+
203
+ Parameters
204
+ ==========
205
+
206
+ mu : List representing the mean or the mean vector
207
+ sigma : Positive semidefinite square matrix
208
+ Represents covariance Matrix.
209
+ If `\sigma` is noninvertible then only sampling is supported currently
210
+
211
+ Returns
212
+ =======
213
+
214
+ RandomSymbol
215
+
216
+ Examples
217
+ ========
218
+
219
+ >>> from sympy.stats import MultivariateNormal, density, marginal_distribution
220
+ >>> from sympy import symbols, MatrixSymbol
221
+ >>> X = MultivariateNormal('X', [3, 4], [[2, 1], [1, 2]])
222
+ >>> y, z = symbols('y z')
223
+ >>> density(X)(y, z)
224
+ sqrt(3)*exp(-y**2/3 + y*z/3 + 2*y/3 - z**2/3 + 5*z/3 - 13/3)/(6*pi)
225
+ >>> density(X)(1, 2)
226
+ sqrt(3)*exp(-4/3)/(6*pi)
227
+ >>> marginal_distribution(X, X[1])(y)
228
+ exp(-(y - 4)**2/4)/(2*sqrt(pi))
229
+ >>> marginal_distribution(X, X[0])(y)
230
+ exp(-(y - 3)**2/4)/(2*sqrt(pi))
231
+
232
+ The example below shows that it is also possible to use
233
+ symbolic parameters to define the MultivariateNormal class.
234
+
235
+ >>> n = symbols('n', integer=True, positive=True)
236
+ >>> Sg = MatrixSymbol('Sg', n, n)
237
+ >>> mu = MatrixSymbol('mu', n, 1)
238
+ >>> obs = MatrixSymbol('obs', n, 1)
239
+ >>> X = MultivariateNormal('X', mu, Sg)
240
+
241
+ The density of a multivariate normal can be
242
+ calculated using a matrix argument, as shown below.
243
+
244
+ >>> density(X)(obs)
245
+ (exp(((1/2)*mu.T - (1/2)*obs.T)*Sg**(-1)*(-mu + obs))/sqrt((2*pi)**n*Determinant(Sg)))[0, 0]
246
+
247
+ References
248
+ ==========
249
+
250
+ .. [1] https://en.wikipedia.org/wiki/Multivariate_normal_distribution
251
+
252
+ """
253
+ return multivariate_rv(MultivariateNormalDistribution, name, mu, sigma)
254
+
255
+ #-------------------------------------------------------------------------------
256
+ # Multivariate Laplace distribution --------------------------------------------
257
+
258
+ class MultivariateLaplaceDistribution(JointDistribution):
259
+ _argnames = ('mu', 'sigma')
260
+ is_Continuous=True
261
+
262
+ @property
263
+ def set(self):
264
+ k = self.mu.shape[0]
265
+ return S.Reals**k
266
+
267
+ @staticmethod
268
+ def check(mu, sigma):
269
+ _value_check(mu.shape[0] == sigma.shape[0],
270
+ "Size of the mean vector and covariance matrix are incorrect.")
271
+ # check if covariance matrix is positive definite or not.
272
+ if not isinstance(sigma, MatrixSymbol):
273
+ _value_check(sigma.is_positive_definite,
274
+ "The covariance matrix must be positive definite. ")
275
+
276
+ def pdf(self, *args):
277
+ mu, sigma = self.mu, self.sigma
278
+ mu_T = mu.transpose()
279
+ k = S(mu.shape[0])
280
+ sigma_inv = sigma.inv()
281
+ args = ImmutableMatrix(args)
282
+ args_T = args.transpose()
283
+ x = (mu_T*sigma_inv*mu)[0]
284
+ y = (args_T*sigma_inv*args)[0]
285
+ v = 1 - k/2
286
+ return (2 * (y/(2 + x))**(v/2) * besselk(v, sqrt((2 + x)*y)) *
287
+ exp((args_T * sigma_inv * mu)[0]) /
288
+ ((2 * pi)**(k/2) * sqrt(det(sigma))))
289
+
290
+
291
+ def MultivariateLaplace(name, mu, sigma):
292
+ """
293
+ Creates a continuous random variable with Multivariate Laplace
294
+ Distribution.
295
+
296
+ The density of the multivariate Laplace distribution can be found at [1].
297
+
298
+ Parameters
299
+ ==========
300
+
301
+ mu : List representing the mean or the mean vector
302
+ sigma : Positive definite square matrix
303
+ Represents covariance Matrix
304
+
305
+ Returns
306
+ =======
307
+
308
+ RandomSymbol
309
+
310
+ Examples
311
+ ========
312
+
313
+ >>> from sympy.stats import MultivariateLaplace, density
314
+ >>> from sympy import symbols
315
+ >>> y, z = symbols('y z')
316
+ >>> X = MultivariateLaplace('X', [2, 4], [[3, 1], [1, 3]])
317
+ >>> density(X)(y, z)
318
+ sqrt(2)*exp(y/4 + 5*z/4)*besselk(0, sqrt(15*y*(3*y/8 - z/8)/2 + 15*z*(-y/8 + 3*z/8)/2))/(4*pi)
319
+ >>> density(X)(1, 2)
320
+ sqrt(2)*exp(11/4)*besselk(0, sqrt(165)/4)/(4*pi)
321
+
322
+ References
323
+ ==========
324
+
325
+ .. [1] https://en.wikipedia.org/wiki/Multivariate_Laplace_distribution
326
+
327
+ """
328
+ return multivariate_rv(MultivariateLaplaceDistribution, name, mu, sigma)
329
+
330
+ #-------------------------------------------------------------------------------
331
+ # Multivariate StudentT distribution -------------------------------------------
332
+
333
+ class MultivariateTDistribution(JointDistribution):
334
+ _argnames = ('mu', 'shape_mat', 'dof')
335
+ is_Continuous=True
336
+
337
+ @property
338
+ def set(self):
339
+ k = self.mu.shape[0]
340
+ return S.Reals**k
341
+
342
+ @staticmethod
343
+ def check(mu, sigma, v):
344
+ _value_check(mu.shape[0] == sigma.shape[0],
345
+ "Size of the location vector and shape matrix are incorrect.")
346
+ # check if covariance matrix is positive definite or not.
347
+ if not isinstance(sigma, MatrixSymbol):
348
+ _value_check(sigma.is_positive_definite,
349
+ "The shape matrix must be positive definite. ")
350
+
351
+ def pdf(self, *args):
352
+ mu, sigma = self.mu, self.shape_mat
353
+ v = S(self.dof)
354
+ k = S(mu.shape[0])
355
+ sigma_inv = sigma.inv()
356
+ args = ImmutableMatrix(args)
357
+ x = args - mu
358
+ return gamma((k + v)/2)/(gamma(v/2)*(v*pi)**(k/2)*sqrt(det(sigma)))\
359
+ *(1 + 1/v*(x.transpose()*sigma_inv*x)[0])**((-v - k)/2)
360
+
361
+ def MultivariateT(syms, mu, sigma, v):
362
+ """
363
+ Creates a joint random variable with multivariate T-distribution.
364
+
365
+ Parameters
366
+ ==========
367
+
368
+ syms : A symbol/str
369
+ For identifying the random variable.
370
+ mu : A list/matrix
371
+ Representing the location vector
372
+ sigma : The shape matrix for the distribution
373
+
374
+ Examples
375
+ ========
376
+
377
+ >>> from sympy.stats import density, MultivariateT
378
+ >>> from sympy import Symbol
379
+
380
+ >>> x = Symbol("x")
381
+ >>> X = MultivariateT("x", [1, 1], [[1, 0], [0, 1]], 2)
382
+
383
+ >>> density(X)(1, 2)
384
+ 2/(9*pi)
385
+
386
+ Returns
387
+ =======
388
+
389
+ RandomSymbol
390
+
391
+ """
392
+ return multivariate_rv(MultivariateTDistribution, syms, mu, sigma, v)
393
+
394
+
395
+ #-------------------------------------------------------------------------------
396
+ # Multivariate Normal Gamma distribution ---------------------------------------
397
+
398
+ class NormalGammaDistribution(JointDistribution):
399
+
400
+ _argnames = ('mu', 'lamda', 'alpha', 'beta')
401
+ is_Continuous=True
402
+
403
+ @staticmethod
404
+ def check(mu, lamda, alpha, beta):
405
+ _value_check(mu.is_real, "Location must be real.")
406
+ _value_check(lamda > 0, "Lambda must be positive")
407
+ _value_check(alpha > 0, "alpha must be positive")
408
+ _value_check(beta > 0, "beta must be positive")
409
+
410
+ @property
411
+ def set(self):
412
+ return S.Reals*Interval(0, S.Infinity)
413
+
414
+ def pdf(self, x, tau):
415
+ beta, alpha, lamda = self.beta, self.alpha, self.lamda
416
+ mu = self.mu
417
+
418
+ return beta**alpha*sqrt(lamda)/(gamma(alpha)*sqrt(2*pi))*\
419
+ tau**(alpha - S.Half)*exp(-1*beta*tau)*\
420
+ exp(-1*(lamda*tau*(x - mu)**2)/S(2))
421
+
422
+ def _marginal_distribution(self, indices, *sym):
423
+ if len(indices) == 2:
424
+ return self.pdf(*sym)
425
+ if indices[0] == 0:
426
+ #For marginal over `x`, return non-standardized Student-T's
427
+ #distribution
428
+ x = sym[0]
429
+ v, mu, sigma = self.alpha - S.Half, self.mu, \
430
+ S(self.beta)/(self.lamda * self.alpha)
431
+ return Lambda(sym, gamma((v + 1)/2)/(gamma(v/2)*sqrt(pi*v)*sigma)*\
432
+ (1 + 1/v*((x - mu)/sigma)**2)**((-v -1)/2))
433
+ #For marginal over `tau`, return Gamma distribution as per construction
434
+ from sympy.stats.crv_types import GammaDistribution
435
+ return Lambda(sym, GammaDistribution(self.alpha, self.beta)(sym[0]))
436
+
437
+ def NormalGamma(sym, mu, lamda, alpha, beta):
438
+ """
439
+ Creates a bivariate joint random variable with multivariate Normal gamma
440
+ distribution.
441
+
442
+ Parameters
443
+ ==========
444
+
445
+ sym : A symbol/str
446
+ For identifying the random variable.
447
+ mu : A real number
448
+ The mean of the normal distribution
449
+ lamda : A positive integer
450
+ Parameter of joint distribution
451
+ alpha : A positive integer
452
+ Parameter of joint distribution
453
+ beta : A positive integer
454
+ Parameter of joint distribution
455
+
456
+ Returns
457
+ =======
458
+
459
+ RandomSymbol
460
+
461
+ Examples
462
+ ========
463
+
464
+ >>> from sympy.stats import density, NormalGamma
465
+ >>> from sympy import symbols
466
+
467
+ >>> X = NormalGamma('x', 0, 1, 2, 3)
468
+ >>> y, z = symbols('y z')
469
+
470
+ >>> density(X)(y, z)
471
+ 9*sqrt(2)*z**(3/2)*exp(-3*z)*exp(-y**2*z/2)/(2*sqrt(pi))
472
+
473
+ References
474
+ ==========
475
+
476
+ .. [1] https://en.wikipedia.org/wiki/Normal-gamma_distribution
477
+
478
+ """
479
+ return multivariate_rv(NormalGammaDistribution, sym, mu, lamda, alpha, beta)
480
+
481
+ #-------------------------------------------------------------------------------
482
+ # Multivariate Beta/Dirichlet distribution -------------------------------------
483
+
484
+ class MultivariateBetaDistribution(JointDistribution):
485
+
486
+ _argnames = ('alpha',)
487
+ is_Continuous = True
488
+
489
+ @staticmethod
490
+ def check(alpha):
491
+ _value_check(len(alpha) >= 2, "At least two categories should be passed.")
492
+ for a_k in alpha:
493
+ _value_check((a_k > 0) != False, "Each concentration parameter"
494
+ " should be positive.")
495
+
496
+ @property
497
+ def set(self):
498
+ k = len(self.alpha)
499
+ return Interval(0, 1)**k
500
+
501
+ def pdf(self, *syms):
502
+ alpha = self.alpha
503
+ B = Mul.fromiter(map(gamma, alpha))/gamma(Add(*alpha))
504
+ return Mul.fromiter(sym**(a_k - 1) for a_k, sym in zip(alpha, syms))/B
505
+
506
+ def MultivariateBeta(syms, *alpha):
507
+ """
508
+ Creates a continuous random variable with Dirichlet/Multivariate Beta
509
+ Distribution.
510
+
511
+ The density of the Dirichlet distribution can be found at [1].
512
+
513
+ Parameters
514
+ ==========
515
+
516
+ alpha : Positive real numbers
517
+ Signifies concentration numbers.
518
+
519
+ Returns
520
+ =======
521
+
522
+ RandomSymbol
523
+
524
+ Examples
525
+ ========
526
+
527
+ >>> from sympy.stats import density, MultivariateBeta, marginal_distribution
528
+ >>> from sympy import Symbol
529
+ >>> a1 = Symbol('a1', positive=True)
530
+ >>> a2 = Symbol('a2', positive=True)
531
+ >>> B = MultivariateBeta('B', [a1, a2])
532
+ >>> C = MultivariateBeta('C', a1, a2)
533
+ >>> x = Symbol('x')
534
+ >>> y = Symbol('y')
535
+ >>> density(B)(x, y)
536
+ x**(a1 - 1)*y**(a2 - 1)*gamma(a1 + a2)/(gamma(a1)*gamma(a2))
537
+ >>> marginal_distribution(C, C[0])(x)
538
+ x**(a1 - 1)*gamma(a1 + a2)/(a2*gamma(a1)*gamma(a2))
539
+
540
+ References
541
+ ==========
542
+
543
+ .. [1] https://en.wikipedia.org/wiki/Dirichlet_distribution
544
+ .. [2] https://mathworld.wolfram.com/DirichletDistribution.html
545
+
546
+ """
547
+ if not isinstance(alpha[0], list):
548
+ alpha = (list(alpha),)
549
+ return multivariate_rv(MultivariateBetaDistribution, syms, alpha[0])
550
+
551
+ Dirichlet = MultivariateBeta
552
+
553
+ #-------------------------------------------------------------------------------
554
+ # Multivariate Ewens distribution ----------------------------------------------
555
+
556
+ class MultivariateEwensDistribution(JointDistribution):
557
+
558
+ _argnames = ('n', 'theta')
559
+ is_Discrete = True
560
+ is_Continuous = False
561
+
562
+ @staticmethod
563
+ def check(n, theta):
564
+ _value_check((n > 0),
565
+ "sample size should be positive integer.")
566
+ _value_check(theta.is_positive, "mutation rate should be positive.")
567
+
568
+ @property
569
+ def set(self):
570
+ if not isinstance(self.n, Integer):
571
+ i = Symbol('i', integer=True, positive=True)
572
+ return Product(Intersection(S.Naturals0, Interval(0, self.n//i)),
573
+ (i, 1, self.n))
574
+ prod_set = Range(0, self.n + 1)
575
+ for i in range(2, self.n + 1):
576
+ prod_set *= Range(0, self.n//i + 1)
577
+ return prod_set.flatten()
578
+
579
+ def pdf(self, *syms):
580
+ n, theta = self.n, self.theta
581
+ condi = isinstance(self.n, Integer)
582
+ if not (isinstance(syms[0], IndexedBase) or condi):
583
+ raise ValueError("Please use IndexedBase object for syms as "
584
+ "the dimension is symbolic")
585
+ term_1 = factorial(n)/rf(theta, n)
586
+ if condi:
587
+ term_2 = Mul.fromiter(theta**syms[j]/((j+1)**syms[j]*factorial(syms[j]))
588
+ for j in range(n))
589
+ cond = Eq(sum((k + 1)*syms[k] for k in range(n)), n)
590
+ return Piecewise((term_1 * term_2, cond), (0, True))
591
+ syms = syms[0]
592
+ j, k = symbols('j, k', positive=True, integer=True)
593
+ term_2 = Product(theta**syms[j]/((j+1)**syms[j]*factorial(syms[j])),
594
+ (j, 0, n - 1))
595
+ cond = Eq(Sum((k + 1)*syms[k], (k, 0, n - 1)), n)
596
+ return Piecewise((term_1 * term_2, cond), (0, True))
597
+
598
+
599
+ def MultivariateEwens(syms, n, theta):
600
+ """
601
+ Creates a discrete random variable with Multivariate Ewens
602
+ Distribution.
603
+
604
+ The density of the said distribution can be found at [1].
605
+
606
+ Parameters
607
+ ==========
608
+
609
+ n : Positive integer
610
+ Size of the sample or the integer whose partitions are considered
611
+ theta : Positive real number
612
+ Denotes Mutation rate
613
+
614
+ Returns
615
+ =======
616
+
617
+ RandomSymbol
618
+
619
+ Examples
620
+ ========
621
+
622
+ >>> from sympy.stats import density, marginal_distribution, MultivariateEwens
623
+ >>> from sympy import Symbol
624
+ >>> a1 = Symbol('a1', positive=True)
625
+ >>> a2 = Symbol('a2', positive=True)
626
+ >>> ed = MultivariateEwens('E', 2, 1)
627
+ >>> density(ed)(a1, a2)
628
+ Piecewise((1/(2**a2*factorial(a1)*factorial(a2)), Eq(a1 + 2*a2, 2)), (0, True))
629
+ >>> marginal_distribution(ed, ed[0])(a1)
630
+ Piecewise((1/factorial(a1), Eq(a1, 2)), (0, True))
631
+
632
+ References
633
+ ==========
634
+
635
+ .. [1] https://en.wikipedia.org/wiki/Ewens%27s_sampling_formula
636
+ .. [2] https://www.jstor.org/stable/24780825
637
+ """
638
+ return multivariate_rv(MultivariateEwensDistribution, syms, n, theta)
639
+
640
+ #-------------------------------------------------------------------------------
641
+ # Generalized Multivariate Log Gamma distribution ------------------------------
642
+
643
+ class GeneralizedMultivariateLogGammaDistribution(JointDistribution):
644
+
645
+ _argnames = ('delta', 'v', 'lamda', 'mu')
646
+ is_Continuous=True
647
+
648
+ def check(self, delta, v, l, mu):
649
+ _value_check((delta >= 0, delta <= 1), "delta must be in range [0, 1].")
650
+ _value_check((v > 0), "v must be positive")
651
+ for lk in l:
652
+ _value_check((lk > 0), "lamda must be a positive vector.")
653
+ for muk in mu:
654
+ _value_check((muk > 0), "mu must be a positive vector.")
655
+ _value_check(len(l) > 1,"the distribution should have at least"
656
+ " two random variables.")
657
+
658
+ @property
659
+ def set(self):
660
+ return S.Reals**len(self.lamda)
661
+
662
+ def pdf(self, *y):
663
+ d, v, l, mu = self.delta, self.v, self.lamda, self.mu
664
+ n = Symbol('n', negative=False, integer=True)
665
+ k = len(l)
666
+ sterm1 = Pow((1 - d), n)/\
667
+ ((gamma(v + n)**(k - 1))*gamma(v)*gamma(n + 1))
668
+ sterm2 = Mul.fromiter(mui*li**(-v - n) for mui, li in zip(mu, l))
669
+ term1 = sterm1 * sterm2
670
+ sterm3 = (v + n) * sum(mui * yi for mui, yi in zip(mu, y))
671
+ sterm4 = sum(exp(mui * yi)/li for (mui, yi, li) in zip(mu, y, l))
672
+ term2 = exp(sterm3 - sterm4)
673
+ return Pow(d, v) * Sum(term1 * term2, (n, 0, S.Infinity))
674
+
675
+ def GeneralizedMultivariateLogGamma(syms, delta, v, lamda, mu):
676
+ """
677
+ Creates a joint random variable with generalized multivariate log gamma
678
+ distribution.
679
+
680
+ The joint pdf can be found at [1].
681
+
682
+ Parameters
683
+ ==========
684
+
685
+ syms : list/tuple/set of symbols for identifying each component
686
+ delta : A constant in range $[0, 1]$
687
+ v : Positive real number
688
+ lamda : List of positive real numbers
689
+ mu : List of positive real numbers
690
+
691
+ Returns
692
+ =======
693
+
694
+ RandomSymbol
695
+
696
+ Examples
697
+ ========
698
+
699
+ >>> from sympy.stats import density
700
+ >>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma
701
+ >>> from sympy import symbols, S
702
+ >>> v = 1
703
+ >>> l, mu = [1, 1, 1], [1, 1, 1]
704
+ >>> d = S.Half
705
+ >>> y = symbols('y_1:4', positive=True)
706
+ >>> Gd = GeneralizedMultivariateLogGamma('G', d, v, l, mu)
707
+ >>> density(Gd)(y[0], y[1], y[2])
708
+ Sum(exp((n + 1)*(y_1 + y_2 + y_3) - exp(y_1) - exp(y_2) -
709
+ exp(y_3))/(2**n*gamma(n + 1)**3), (n, 0, oo))/2
710
+
711
+ References
712
+ ==========
713
+
714
+ .. [1] https://en.wikipedia.org/wiki/Generalized_multivariate_log-gamma_distribution
715
+ .. [2] https://www.researchgate.net/publication/234137346_On_a_multivariate_log-gamma_distribution_and_the_use_of_the_distribution_in_the_Bayesian_analysis
716
+
717
+ Note
718
+ ====
719
+
720
+ If the GeneralizedMultivariateLogGamma is too long to type use,
721
+
722
+ >>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGamma as GMVLG
723
+ >>> Gd = GMVLG('G', d, v, l, mu)
724
+
725
+ If you want to pass the matrix omega instead of the constant delta, then use
726
+ ``GeneralizedMultivariateLogGammaOmega``.
727
+
728
+ """
729
+ return multivariate_rv(GeneralizedMultivariateLogGammaDistribution,
730
+ syms, delta, v, lamda, mu)
731
+
732
+ def GeneralizedMultivariateLogGammaOmega(syms, omega, v, lamda, mu):
733
+ """
734
+ Extends GeneralizedMultivariateLogGamma.
735
+
736
+ Parameters
737
+ ==========
738
+
739
+ syms : list/tuple/set of symbols
740
+ For identifying each component
741
+ omega : A square matrix
742
+ Every element of square matrix must be absolute value of
743
+ square root of correlation coefficient
744
+ v : Positive real number
745
+ lamda : List of positive real numbers
746
+ mu : List of positive real numbers
747
+
748
+ Returns
749
+ =======
750
+
751
+ RandomSymbol
752
+
753
+ Examples
754
+ ========
755
+
756
+ >>> from sympy.stats import density
757
+ >>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGammaOmega
758
+ >>> from sympy import Matrix, symbols, S
759
+ >>> omega = Matrix([[1, S.Half, S.Half], [S.Half, 1, S.Half], [S.Half, S.Half, 1]])
760
+ >>> v = 1
761
+ >>> l, mu = [1, 1, 1], [1, 1, 1]
762
+ >>> G = GeneralizedMultivariateLogGammaOmega('G', omega, v, l, mu)
763
+ >>> y = symbols('y_1:4', positive=True)
764
+ >>> density(G)(y[0], y[1], y[2])
765
+ sqrt(2)*Sum((1 - sqrt(2)/2)**n*exp((n + 1)*(y_1 + y_2 + y_3) - exp(y_1) -
766
+ exp(y_2) - exp(y_3))/gamma(n + 1)**3, (n, 0, oo))/2
767
+
768
+ References
769
+ ==========
770
+
771
+ .. [1] https://en.wikipedia.org/wiki/Generalized_multivariate_log-gamma_distribution
772
+ .. [2] https://www.researchgate.net/publication/234137346_On_a_multivariate_log-gamma_distribution_and_the_use_of_the_distribution_in_the_Bayesian_analysis
773
+
774
+ Notes
775
+ =====
776
+
777
+ If the GeneralizedMultivariateLogGammaOmega is too long to type use,
778
+
779
+ >>> from sympy.stats.joint_rv_types import GeneralizedMultivariateLogGammaOmega as GMVLGO
780
+ >>> G = GMVLGO('G', omega, v, l, mu)
781
+
782
+ """
783
+ _value_check((omega.is_square, isinstance(omega, Matrix)), "omega must be a"
784
+ " square matrix")
785
+ for val in omega.values():
786
+ _value_check((val >= 0, val <= 1),
787
+ "all values in matrix must be between 0 and 1(both inclusive).")
788
+ _value_check(omega.diagonal().equals(ones(1, omega.shape[0])),
789
+ "all the elements of diagonal should be 1.")
790
+ _value_check((omega.shape[0] == len(lamda), len(lamda) == len(mu)),
791
+ "lamda, mu should be of same length and omega should "
792
+ " be of shape (length of lamda, length of mu)")
793
+ _value_check(len(lamda) > 1,"the distribution should have at least"
794
+ " two random variables.")
795
+ delta = Pow(Rational(omega.det()), Rational(1, len(lamda) - 1))
796
+ return GeneralizedMultivariateLogGamma(syms, delta, v, lamda, mu)
797
+
798
+
799
+ #-------------------------------------------------------------------------------
800
+ # Multinomial distribution -----------------------------------------------------
801
+
802
+ class MultinomialDistribution(JointDistribution):
803
+
804
+ _argnames = ('n', 'p')
805
+ is_Continuous=False
806
+ is_Discrete = True
807
+
808
+ @staticmethod
809
+ def check(n, p):
810
+ _value_check(n > 0,
811
+ "number of trials must be a positive integer")
812
+ for p_k in p:
813
+ _value_check((p_k >= 0, p_k <= 1),
814
+ "probability must be in range [0, 1]")
815
+ _value_check(Eq(sum(p), 1),
816
+ "probabilities must sum to 1")
817
+
818
+ @property
819
+ def set(self):
820
+ return Intersection(S.Naturals0, Interval(0, self.n))**len(self.p)
821
+
822
+ def pdf(self, *x):
823
+ n, p = self.n, self.p
824
+ term_1 = factorial(n)/Mul.fromiter(factorial(x_k) for x_k in x)
825
+ term_2 = Mul.fromiter(p_k**x_k for p_k, x_k in zip(p, x))
826
+ return Piecewise((term_1 * term_2, Eq(sum(x), n)), (0, True))
827
+
828
+ def Multinomial(syms, n, *p):
829
+ """
830
+ Creates a discrete random variable with Multinomial Distribution.
831
+
832
+ The density of the said distribution can be found at [1].
833
+
834
+ Parameters
835
+ ==========
836
+
837
+ n : Positive integer
838
+ Represents number of trials
839
+ p : List of event probabilities
840
+ Must be in the range of $[0, 1]$.
841
+
842
+ Returns
843
+ =======
844
+
845
+ RandomSymbol
846
+
847
+ Examples
848
+ ========
849
+
850
+ >>> from sympy.stats import density, Multinomial, marginal_distribution
851
+ >>> from sympy import symbols
852
+ >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True)
853
+ >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True)
854
+ >>> M = Multinomial('M', 3, p1, p2, p3)
855
+ >>> density(M)(x1, x2, x3)
856
+ Piecewise((6*p1**x1*p2**x2*p3**x3/(factorial(x1)*factorial(x2)*factorial(x3)),
857
+ Eq(x1 + x2 + x3, 3)), (0, True))
858
+ >>> marginal_distribution(M, M[0])(x1).subs(x1, 1)
859
+ 3*p1*p2**2 + 6*p1*p2*p3 + 3*p1*p3**2
860
+
861
+ References
862
+ ==========
863
+
864
+ .. [1] https://en.wikipedia.org/wiki/Multinomial_distribution
865
+ .. [2] https://mathworld.wolfram.com/MultinomialDistribution.html
866
+
867
+ """
868
+ if not isinstance(p[0], list):
869
+ p = (list(p), )
870
+ return multivariate_rv(MultinomialDistribution, syms, n, p[0])
871
+
872
+ #-------------------------------------------------------------------------------
873
+ # Negative Multinomial Distribution --------------------------------------------
874
+
875
+ class NegativeMultinomialDistribution(JointDistribution):
876
+
877
+ _argnames = ('k0', 'p')
878
+ is_Continuous=False
879
+ is_Discrete = True
880
+
881
+ @staticmethod
882
+ def check(k0, p):
883
+ _value_check(k0 > 0,
884
+ "number of failures must be a positive integer")
885
+ for p_k in p:
886
+ _value_check((p_k >= 0, p_k <= 1),
887
+ "probability must be in range [0, 1].")
888
+ _value_check(sum(p) <= 1,
889
+ "success probabilities must not be greater than 1.")
890
+
891
+ @property
892
+ def set(self):
893
+ return Range(0, S.Infinity)**len(self.p)
894
+
895
+ def pdf(self, *k):
896
+ k0, p = self.k0, self.p
897
+ term_1 = (gamma(k0 + sum(k))*(1 - sum(p))**k0)/gamma(k0)
898
+ term_2 = Mul.fromiter(pi**ki/factorial(ki) for pi, ki in zip(p, k))
899
+ return term_1 * term_2
900
+
901
+ def NegativeMultinomial(syms, k0, *p):
902
+ """
903
+ Creates a discrete random variable with Negative Multinomial Distribution.
904
+
905
+ The density of the said distribution can be found at [1].
906
+
907
+ Parameters
908
+ ==========
909
+
910
+ k0 : positive integer
911
+ Represents number of failures before the experiment is stopped
912
+ p : List of event probabilities
913
+ Must be in the range of $[0, 1]$
914
+
915
+ Returns
916
+ =======
917
+
918
+ RandomSymbol
919
+
920
+ Examples
921
+ ========
922
+
923
+ >>> from sympy.stats import density, NegativeMultinomial, marginal_distribution
924
+ >>> from sympy import symbols
925
+ >>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True)
926
+ >>> p1, p2, p3 = symbols('p1, p2, p3', positive=True)
927
+ >>> N = NegativeMultinomial('M', 3, p1, p2, p3)
928
+ >>> N_c = NegativeMultinomial('M', 3, 0.1, 0.1, 0.1)
929
+ >>> density(N)(x1, x2, x3)
930
+ p1**x1*p2**x2*p3**x3*(-p1 - p2 - p3 + 1)**3*gamma(x1 + x2 +
931
+ x3 + 3)/(2*factorial(x1)*factorial(x2)*factorial(x3))
932
+ >>> marginal_distribution(N_c, N_c[0])(1).evalf().round(2)
933
+ 0.25
934
+
935
+
936
+ References
937
+ ==========
938
+
939
+ .. [1] https://en.wikipedia.org/wiki/Negative_multinomial_distribution
940
+ .. [2] https://mathworld.wolfram.com/NegativeBinomialDistribution.html
941
+
942
+ """
943
+ if not isinstance(p[0], list):
944
+ p = (list(p), )
945
+ return multivariate_rv(NegativeMultinomialDistribution, syms, k0, p[0])
parrot/lib/python3.10/site-packages/sympy/stats/matrix_distributions.py ADDED
@@ -0,0 +1,610 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from math import prod
2
+
3
+ from sympy.core.basic import Basic
4
+ from sympy.core.numbers import pi
5
+ from sympy.core.singleton import S
6
+ from sympy.functions.elementary.exponential import exp
7
+ from sympy.functions.special.gamma_functions import multigamma
8
+ from sympy.core.sympify import sympify, _sympify
9
+ from sympy.matrices import (ImmutableMatrix, Inverse, Trace, Determinant,
10
+ MatrixSymbol, MatrixBase, Transpose, MatrixSet,
11
+ matrix2numpy)
12
+ from sympy.stats.rv import (_value_check, RandomMatrixSymbol, NamedArgsMixin, PSpace,
13
+ _symbol_converter, MatrixDomain, Distribution)
14
+ from sympy.external import import_module
15
+
16
+
17
+ ################################################################################
18
+ #------------------------Matrix Probability Space------------------------------#
19
+ ################################################################################
20
+ class MatrixPSpace(PSpace):
21
+ """
22
+ Represents probability space for
23
+ Matrix Distributions.
24
+ """
25
+ def __new__(cls, sym, distribution, dim_n, dim_m):
26
+ sym = _symbol_converter(sym)
27
+ dim_n, dim_m = _sympify(dim_n), _sympify(dim_m)
28
+ if not (dim_n.is_integer and dim_m.is_integer):
29
+ raise ValueError("Dimensions should be integers")
30
+ return Basic.__new__(cls, sym, distribution, dim_n, dim_m)
31
+
32
+ distribution = property(lambda self: self.args[1])
33
+ symbol = property(lambda self: self.args[0])
34
+
35
+ @property
36
+ def domain(self):
37
+ return MatrixDomain(self.symbol, self.distribution.set)
38
+
39
+ @property
40
+ def value(self):
41
+ return RandomMatrixSymbol(self.symbol, self.args[2], self.args[3], self)
42
+
43
+ @property
44
+ def values(self):
45
+ return {self.value}
46
+
47
+ def compute_density(self, expr, *args):
48
+ rms = expr.atoms(RandomMatrixSymbol)
49
+ if len(rms) > 1 or (not isinstance(expr, RandomMatrixSymbol)):
50
+ raise NotImplementedError("Currently, no algorithm has been "
51
+ "implemented to handle general expressions containing "
52
+ "multiple matrix distributions.")
53
+ return self.distribution.pdf(expr)
54
+
55
+ def sample(self, size=(), library='scipy', seed=None):
56
+ """
57
+ Internal sample method
58
+
59
+ Returns dictionary mapping RandomMatrixSymbol to realization value.
60
+ """
61
+ return {self.value: self.distribution.sample(size, library=library, seed=seed)}
62
+
63
+
64
+ def rv(symbol, cls, args):
65
+ args = list(map(sympify, args))
66
+ dist = cls(*args)
67
+ dist.check(*args)
68
+ dim = dist.dimension
69
+ pspace = MatrixPSpace(symbol, dist, dim[0], dim[1])
70
+ return pspace.value
71
+
72
+
73
+ class SampleMatrixScipy:
74
+ """Returns the sample from scipy of the given distribution"""
75
+ def __new__(cls, dist, size, seed=None):
76
+ return cls._sample_scipy(dist, size, seed)
77
+
78
+ @classmethod
79
+ def _sample_scipy(cls, dist, size, seed):
80
+ """Sample from SciPy."""
81
+
82
+ from scipy import stats as scipy_stats
83
+ import numpy
84
+ scipy_rv_map = {
85
+ 'WishartDistribution': lambda dist, size, rand_state: scipy_stats.wishart.rvs(
86
+ df=int(dist.n), scale=matrix2numpy(dist.scale_matrix, float), size=size),
87
+ 'MatrixNormalDistribution': lambda dist, size, rand_state: scipy_stats.matrix_normal.rvs(
88
+ mean=matrix2numpy(dist.location_matrix, float),
89
+ rowcov=matrix2numpy(dist.scale_matrix_1, float),
90
+ colcov=matrix2numpy(dist.scale_matrix_2, float), size=size, random_state=rand_state)
91
+ }
92
+
93
+ sample_shape = {
94
+ 'WishartDistribution': lambda dist: dist.scale_matrix.shape,
95
+ 'MatrixNormalDistribution' : lambda dist: dist.location_matrix.shape
96
+ }
97
+
98
+ dist_list = scipy_rv_map.keys()
99
+
100
+ if dist.__class__.__name__ not in dist_list:
101
+ return None
102
+
103
+ if seed is None or isinstance(seed, int):
104
+ rand_state = numpy.random.default_rng(seed=seed)
105
+ else:
106
+ rand_state = seed
107
+ samp = scipy_rv_map[dist.__class__.__name__](dist, prod(size), rand_state)
108
+ return samp.reshape(size + sample_shape[dist.__class__.__name__](dist))
109
+
110
+
111
+ class SampleMatrixNumpy:
112
+ """Returns the sample from numpy of the given distribution"""
113
+
114
+ ### TODO: Add tests after adding matrix distributions in numpy_rv_map
115
+ def __new__(cls, dist, size, seed=None):
116
+ return cls._sample_numpy(dist, size, seed)
117
+
118
+ @classmethod
119
+ def _sample_numpy(cls, dist, size, seed):
120
+ """Sample from NumPy."""
121
+
122
+ numpy_rv_map = {
123
+ }
124
+
125
+ sample_shape = {
126
+ }
127
+
128
+ dist_list = numpy_rv_map.keys()
129
+
130
+ if dist.__class__.__name__ not in dist_list:
131
+ return None
132
+
133
+ import numpy
134
+ if seed is None or isinstance(seed, int):
135
+ rand_state = numpy.random.default_rng(seed=seed)
136
+ else:
137
+ rand_state = seed
138
+ samp = numpy_rv_map[dist.__class__.__name__](dist, prod(size), rand_state)
139
+ return samp.reshape(size + sample_shape[dist.__class__.__name__](dist))
140
+
141
+
142
+ class SampleMatrixPymc:
143
+ """Returns the sample from pymc of the given distribution"""
144
+
145
+ def __new__(cls, dist, size, seed=None):
146
+ return cls._sample_pymc(dist, size, seed)
147
+
148
+ @classmethod
149
+ def _sample_pymc(cls, dist, size, seed):
150
+ """Sample from PyMC."""
151
+
152
+ try:
153
+ import pymc
154
+ except ImportError:
155
+ import pymc3 as pymc
156
+ pymc_rv_map = {
157
+ 'MatrixNormalDistribution': lambda dist: pymc.MatrixNormal('X',
158
+ mu=matrix2numpy(dist.location_matrix, float),
159
+ rowcov=matrix2numpy(dist.scale_matrix_1, float),
160
+ colcov=matrix2numpy(dist.scale_matrix_2, float),
161
+ shape=dist.location_matrix.shape),
162
+ 'WishartDistribution': lambda dist: pymc.WishartBartlett('X',
163
+ nu=int(dist.n), S=matrix2numpy(dist.scale_matrix, float))
164
+ }
165
+
166
+ sample_shape = {
167
+ 'WishartDistribution': lambda dist: dist.scale_matrix.shape,
168
+ 'MatrixNormalDistribution' : lambda dist: dist.location_matrix.shape
169
+ }
170
+
171
+ dist_list = pymc_rv_map.keys()
172
+
173
+ if dist.__class__.__name__ not in dist_list:
174
+ return None
175
+ import logging
176
+ logging.getLogger("pymc").setLevel(logging.ERROR)
177
+ with pymc.Model():
178
+ pymc_rv_map[dist.__class__.__name__](dist)
179
+ samps = pymc.sample(draws=prod(size), chains=1, progressbar=False, random_seed=seed, return_inferencedata=False, compute_convergence_checks=False)['X']
180
+ return samps.reshape(size + sample_shape[dist.__class__.__name__](dist))
181
+
182
+ _get_sample_class_matrixrv = {
183
+ 'scipy': SampleMatrixScipy,
184
+ 'pymc3': SampleMatrixPymc,
185
+ 'pymc': SampleMatrixPymc,
186
+ 'numpy': SampleMatrixNumpy
187
+ }
188
+
189
+ ################################################################################
190
+ #-------------------------Matrix Distribution----------------------------------#
191
+ ################################################################################
192
+
193
+ class MatrixDistribution(Distribution, NamedArgsMixin):
194
+ """
195
+ Abstract class for Matrix Distribution.
196
+ """
197
+ def __new__(cls, *args):
198
+ args = [ImmutableMatrix(arg) if isinstance(arg, list)
199
+ else _sympify(arg) for arg in args]
200
+ return Basic.__new__(cls, *args)
201
+
202
+ @staticmethod
203
+ def check(*args):
204
+ pass
205
+
206
+ def __call__(self, expr):
207
+ if isinstance(expr, list):
208
+ expr = ImmutableMatrix(expr)
209
+ return self.pdf(expr)
210
+
211
+ def sample(self, size=(), library='scipy', seed=None):
212
+ """
213
+ Internal sample method
214
+
215
+ Returns dictionary mapping RandomSymbol to realization value.
216
+ """
217
+
218
+ libraries = ['scipy', 'numpy', 'pymc3', 'pymc']
219
+ if library not in libraries:
220
+ raise NotImplementedError("Sampling from %s is not supported yet."
221
+ % str(library))
222
+ if not import_module(library):
223
+ raise ValueError("Failed to import %s" % library)
224
+
225
+ samps = _get_sample_class_matrixrv[library](self, size, seed)
226
+
227
+ if samps is not None:
228
+ return samps
229
+ raise NotImplementedError(
230
+ "Sampling for %s is not currently implemented from %s"
231
+ % (self.__class__.__name__, library)
232
+ )
233
+
234
+ ################################################################################
235
+ #------------------------Matrix Distribution Types-----------------------------#
236
+ ################################################################################
237
+
238
+ #-------------------------------------------------------------------------------
239
+ # Matrix Gamma distribution ----------------------------------------------------
240
+
241
+ class MatrixGammaDistribution(MatrixDistribution):
242
+
243
+ _argnames = ('alpha', 'beta', 'scale_matrix')
244
+
245
+ @staticmethod
246
+ def check(alpha, beta, scale_matrix):
247
+ if not isinstance(scale_matrix, MatrixSymbol):
248
+ _value_check(scale_matrix.is_positive_definite, "The shape "
249
+ "matrix must be positive definite.")
250
+ _value_check(scale_matrix.is_square, "Should "
251
+ "be square matrix")
252
+ _value_check(alpha.is_positive, "Shape parameter should be positive.")
253
+ _value_check(beta.is_positive, "Scale parameter should be positive.")
254
+
255
+ @property
256
+ def set(self):
257
+ k = self.scale_matrix.shape[0]
258
+ return MatrixSet(k, k, S.Reals)
259
+
260
+ @property
261
+ def dimension(self):
262
+ return self.scale_matrix.shape
263
+
264
+ def pdf(self, x):
265
+ alpha, beta, scale_matrix = self.alpha, self.beta, self.scale_matrix
266
+ p = scale_matrix.shape[0]
267
+ if isinstance(x, list):
268
+ x = ImmutableMatrix(x)
269
+ if not isinstance(x, (MatrixBase, MatrixSymbol)):
270
+ raise ValueError("%s should be an isinstance of Matrix "
271
+ "or MatrixSymbol" % str(x))
272
+ sigma_inv_x = - Inverse(scale_matrix)*x / beta
273
+ term1 = exp(Trace(sigma_inv_x))/((beta**(p*alpha)) * multigamma(alpha, p))
274
+ term2 = (Determinant(scale_matrix))**(-alpha)
275
+ term3 = (Determinant(x))**(alpha - S(p + 1)/2)
276
+ return term1 * term2 * term3
277
+
278
+ def MatrixGamma(symbol, alpha, beta, scale_matrix):
279
+ """
280
+ Creates a random variable with Matrix Gamma Distribution.
281
+
282
+ The density of the said distribution can be found at [1].
283
+
284
+ Parameters
285
+ ==========
286
+
287
+ alpha: Positive Real number
288
+ Shape Parameter
289
+ beta: Positive Real number
290
+ Scale Parameter
291
+ scale_matrix: Positive definite real square matrix
292
+ Scale Matrix
293
+
294
+ Returns
295
+ =======
296
+
297
+ RandomSymbol
298
+
299
+ Examples
300
+ ========
301
+
302
+ >>> from sympy.stats import density, MatrixGamma
303
+ >>> from sympy import MatrixSymbol, symbols
304
+ >>> a, b = symbols('a b', positive=True)
305
+ >>> M = MatrixGamma('M', a, b, [[2, 1], [1, 2]])
306
+ >>> X = MatrixSymbol('X', 2, 2)
307
+ >>> density(M)(X).doit()
308
+ exp(Trace(Matrix([
309
+ [-2/3, 1/3],
310
+ [ 1/3, -2/3]])*X)/b)*Determinant(X)**(a - 3/2)/(3**a*sqrt(pi)*b**(2*a)*gamma(a)*gamma(a - 1/2))
311
+ >>> density(M)([[1, 0], [0, 1]]).doit()
312
+ exp(-4/(3*b))/(3**a*sqrt(pi)*b**(2*a)*gamma(a)*gamma(a - 1/2))
313
+
314
+
315
+ References
316
+ ==========
317
+
318
+ .. [1] https://en.wikipedia.org/wiki/Matrix_gamma_distribution
319
+
320
+ """
321
+ if isinstance(scale_matrix, list):
322
+ scale_matrix = ImmutableMatrix(scale_matrix)
323
+ return rv(symbol, MatrixGammaDistribution, (alpha, beta, scale_matrix))
324
+
325
+ #-------------------------------------------------------------------------------
326
+ # Wishart Distribution ---------------------------------------------------------
327
+
328
+ class WishartDistribution(MatrixDistribution):
329
+
330
+ _argnames = ('n', 'scale_matrix')
331
+
332
+ @staticmethod
333
+ def check(n, scale_matrix):
334
+ if not isinstance(scale_matrix, MatrixSymbol):
335
+ _value_check(scale_matrix.is_positive_definite, "The shape "
336
+ "matrix must be positive definite.")
337
+ _value_check(scale_matrix.is_square, "Should "
338
+ "be square matrix")
339
+ _value_check(n.is_positive, "Shape parameter should be positive.")
340
+
341
+ @property
342
+ def set(self):
343
+ k = self.scale_matrix.shape[0]
344
+ return MatrixSet(k, k, S.Reals)
345
+
346
+ @property
347
+ def dimension(self):
348
+ return self.scale_matrix.shape
349
+
350
+ def pdf(self, x):
351
+ n, scale_matrix = self.n, self.scale_matrix
352
+ p = scale_matrix.shape[0]
353
+ if isinstance(x, list):
354
+ x = ImmutableMatrix(x)
355
+ if not isinstance(x, (MatrixBase, MatrixSymbol)):
356
+ raise ValueError("%s should be an isinstance of Matrix "
357
+ "or MatrixSymbol" % str(x))
358
+ sigma_inv_x = - Inverse(scale_matrix)*x / S(2)
359
+ term1 = exp(Trace(sigma_inv_x))/((2**(p*n/S(2))) * multigamma(n/S(2), p))
360
+ term2 = (Determinant(scale_matrix))**(-n/S(2))
361
+ term3 = (Determinant(x))**(S(n - p - 1)/2)
362
+ return term1 * term2 * term3
363
+
364
+ def Wishart(symbol, n, scale_matrix):
365
+ """
366
+ Creates a random variable with Wishart Distribution.
367
+
368
+ The density of the said distribution can be found at [1].
369
+
370
+ Parameters
371
+ ==========
372
+
373
+ n: Positive Real number
374
+ Represents degrees of freedom
375
+ scale_matrix: Positive definite real square matrix
376
+ Scale Matrix
377
+
378
+ Returns
379
+ =======
380
+
381
+ RandomSymbol
382
+
383
+ Examples
384
+ ========
385
+
386
+ >>> from sympy.stats import density, Wishart
387
+ >>> from sympy import MatrixSymbol, symbols
388
+ >>> n = symbols('n', positive=True)
389
+ >>> W = Wishart('W', n, [[2, 1], [1, 2]])
390
+ >>> X = MatrixSymbol('X', 2, 2)
391
+ >>> density(W)(X).doit()
392
+ exp(Trace(Matrix([
393
+ [-1/3, 1/6],
394
+ [ 1/6, -1/3]])*X))*Determinant(X)**(n/2 - 3/2)/(2**n*3**(n/2)*sqrt(pi)*gamma(n/2)*gamma(n/2 - 1/2))
395
+ >>> density(W)([[1, 0], [0, 1]]).doit()
396
+ exp(-2/3)/(2**n*3**(n/2)*sqrt(pi)*gamma(n/2)*gamma(n/2 - 1/2))
397
+
398
+ References
399
+ ==========
400
+
401
+ .. [1] https://en.wikipedia.org/wiki/Wishart_distribution
402
+
403
+ """
404
+ if isinstance(scale_matrix, list):
405
+ scale_matrix = ImmutableMatrix(scale_matrix)
406
+ return rv(symbol, WishartDistribution, (n, scale_matrix))
407
+
408
+ #-------------------------------------------------------------------------------
409
+ # Matrix Normal distribution ---------------------------------------------------
410
+
411
+ class MatrixNormalDistribution(MatrixDistribution):
412
+
413
+ _argnames = ('location_matrix', 'scale_matrix_1', 'scale_matrix_2')
414
+
415
+ @staticmethod
416
+ def check(location_matrix, scale_matrix_1, scale_matrix_2):
417
+ if not isinstance(scale_matrix_1, MatrixSymbol):
418
+ _value_check(scale_matrix_1.is_positive_definite, "The shape "
419
+ "matrix must be positive definite.")
420
+ if not isinstance(scale_matrix_2, MatrixSymbol):
421
+ _value_check(scale_matrix_2.is_positive_definite, "The shape "
422
+ "matrix must be positive definite.")
423
+ _value_check(scale_matrix_1.is_square, "Scale matrix 1 should be "
424
+ "be square matrix")
425
+ _value_check(scale_matrix_2.is_square, "Scale matrix 2 should be "
426
+ "be square matrix")
427
+ n = location_matrix.shape[0]
428
+ p = location_matrix.shape[1]
429
+ _value_check(scale_matrix_1.shape[0] == n, "Scale matrix 1 should be"
430
+ " of shape %s x %s"% (str(n), str(n)))
431
+ _value_check(scale_matrix_2.shape[0] == p, "Scale matrix 2 should be"
432
+ " of shape %s x %s"% (str(p), str(p)))
433
+
434
+ @property
435
+ def set(self):
436
+ n, p = self.location_matrix.shape
437
+ return MatrixSet(n, p, S.Reals)
438
+
439
+ @property
440
+ def dimension(self):
441
+ return self.location_matrix.shape
442
+
443
+ def pdf(self, x):
444
+ M, U, V = self.location_matrix, self.scale_matrix_1, self.scale_matrix_2
445
+ n, p = M.shape
446
+ if isinstance(x, list):
447
+ x = ImmutableMatrix(x)
448
+ if not isinstance(x, (MatrixBase, MatrixSymbol)):
449
+ raise ValueError("%s should be an isinstance of Matrix "
450
+ "or MatrixSymbol" % str(x))
451
+ term1 = Inverse(V)*Transpose(x - M)*Inverse(U)*(x - M)
452
+ num = exp(-Trace(term1)/S(2))
453
+ den = (2*pi)**(S(n*p)/2) * Determinant(U)**(S(p)/2) * Determinant(V)**(S(n)/2)
454
+ return num/den
455
+
456
+ def MatrixNormal(symbol, location_matrix, scale_matrix_1, scale_matrix_2):
457
+ """
458
+ Creates a random variable with Matrix Normal Distribution.
459
+
460
+ The density of the said distribution can be found at [1].
461
+
462
+ Parameters
463
+ ==========
464
+
465
+ location_matrix: Real ``n x p`` matrix
466
+ Represents degrees of freedom
467
+ scale_matrix_1: Positive definite matrix
468
+ Scale Matrix of shape ``n x n``
469
+ scale_matrix_2: Positive definite matrix
470
+ Scale Matrix of shape ``p x p``
471
+
472
+ Returns
473
+ =======
474
+
475
+ RandomSymbol
476
+
477
+ Examples
478
+ ========
479
+
480
+ >>> from sympy import MatrixSymbol
481
+ >>> from sympy.stats import density, MatrixNormal
482
+ >>> M = MatrixNormal('M', [[1, 2]], [1], [[1, 0], [0, 1]])
483
+ >>> X = MatrixSymbol('X', 1, 2)
484
+ >>> density(M)(X).doit()
485
+ exp(-Trace((Matrix([
486
+ [-1],
487
+ [-2]]) + X.T)*(Matrix([[-1, -2]]) + X))/2)/(2*pi)
488
+ >>> density(M)([[3, 4]]).doit()
489
+ exp(-4)/(2*pi)
490
+
491
+ References
492
+ ==========
493
+
494
+ .. [1] https://en.wikipedia.org/wiki/Matrix_normal_distribution
495
+
496
+ """
497
+ if isinstance(location_matrix, list):
498
+ location_matrix = ImmutableMatrix(location_matrix)
499
+ if isinstance(scale_matrix_1, list):
500
+ scale_matrix_1 = ImmutableMatrix(scale_matrix_1)
501
+ if isinstance(scale_matrix_2, list):
502
+ scale_matrix_2 = ImmutableMatrix(scale_matrix_2)
503
+ args = (location_matrix, scale_matrix_1, scale_matrix_2)
504
+ return rv(symbol, MatrixNormalDistribution, args)
505
+
506
+ #-------------------------------------------------------------------------------
507
+ # Matrix Student's T distribution ---------------------------------------------------
508
+
509
+ class MatrixStudentTDistribution(MatrixDistribution):
510
+
511
+ _argnames = ('nu', 'location_matrix', 'scale_matrix_1', 'scale_matrix_2')
512
+
513
+ @staticmethod
514
+ def check(nu, location_matrix, scale_matrix_1, scale_matrix_2):
515
+ if not isinstance(scale_matrix_1, MatrixSymbol):
516
+ _value_check(scale_matrix_1.is_positive_definite != False, "The shape "
517
+ "matrix must be positive definite.")
518
+ if not isinstance(scale_matrix_2, MatrixSymbol):
519
+ _value_check(scale_matrix_2.is_positive_definite != False, "The shape "
520
+ "matrix must be positive definite.")
521
+ _value_check(scale_matrix_1.is_square != False, "Scale matrix 1 should be "
522
+ "be square matrix")
523
+ _value_check(scale_matrix_2.is_square != False, "Scale matrix 2 should be "
524
+ "be square matrix")
525
+ n = location_matrix.shape[0]
526
+ p = location_matrix.shape[1]
527
+ _value_check(scale_matrix_1.shape[0] == p, "Scale matrix 1 should be"
528
+ " of shape %s x %s" % (str(p), str(p)))
529
+ _value_check(scale_matrix_2.shape[0] == n, "Scale matrix 2 should be"
530
+ " of shape %s x %s" % (str(n), str(n)))
531
+ _value_check(nu.is_positive != False, "Degrees of freedom must be positive")
532
+
533
+ @property
534
+ def set(self):
535
+ n, p = self.location_matrix.shape
536
+ return MatrixSet(n, p, S.Reals)
537
+
538
+ @property
539
+ def dimension(self):
540
+ return self.location_matrix.shape
541
+
542
+ def pdf(self, x):
543
+ from sympy.matrices.dense import eye
544
+ if isinstance(x, list):
545
+ x = ImmutableMatrix(x)
546
+ if not isinstance(x, (MatrixBase, MatrixSymbol)):
547
+ raise ValueError("%s should be an isinstance of Matrix "
548
+ "or MatrixSymbol" % str(x))
549
+ nu, M, Omega, Sigma = self.nu, self.location_matrix, self.scale_matrix_1, self.scale_matrix_2
550
+ n, p = M.shape
551
+
552
+ K = multigamma((nu + n + p - 1)/2, p) * Determinant(Omega)**(-n/2) * Determinant(Sigma)**(-p/2) \
553
+ / ((pi)**(n*p/2) * multigamma((nu + p - 1)/2, p))
554
+ return K * (Determinant(eye(n) + Inverse(Sigma)*(x - M)*Inverse(Omega)*Transpose(x - M))) \
555
+ **(-(nu + n + p -1)/2)
556
+
557
+
558
+
559
+ def MatrixStudentT(symbol, nu, location_matrix, scale_matrix_1, scale_matrix_2):
560
+ """
561
+ Creates a random variable with Matrix Gamma Distribution.
562
+
563
+ The density of the said distribution can be found at [1].
564
+
565
+ Parameters
566
+ ==========
567
+
568
+ nu: Positive Real number
569
+ degrees of freedom
570
+ location_matrix: Positive definite real square matrix
571
+ Location Matrix of shape ``n x p``
572
+ scale_matrix_1: Positive definite real square matrix
573
+ Scale Matrix of shape ``p x p``
574
+ scale_matrix_2: Positive definite real square matrix
575
+ Scale Matrix of shape ``n x n``
576
+
577
+ Returns
578
+ =======
579
+
580
+ RandomSymbol
581
+
582
+ Examples
583
+ ========
584
+
585
+ >>> from sympy import MatrixSymbol,symbols
586
+ >>> from sympy.stats import density, MatrixStudentT
587
+ >>> v = symbols('v',positive=True)
588
+ >>> M = MatrixStudentT('M', v, [[1, 2]], [[1, 0], [0, 1]], [1])
589
+ >>> X = MatrixSymbol('X', 1, 2)
590
+ >>> density(M)(X)
591
+ gamma(v/2 + 1)*Determinant((Matrix([[-1, -2]]) + X)*(Matrix([
592
+ [-1],
593
+ [-2]]) + X.T) + Matrix([[1]]))**(-v/2 - 1)/(pi**1.0*gamma(v/2)*Determinant(Matrix([[1]]))**1.0*Determinant(Matrix([
594
+ [1, 0],
595
+ [0, 1]]))**0.5)
596
+
597
+ References
598
+ ==========
599
+
600
+ .. [1] https://en.wikipedia.org/wiki/Matrix_t-distribution
601
+
602
+ """
603
+ if isinstance(location_matrix, list):
604
+ location_matrix = ImmutableMatrix(location_matrix)
605
+ if isinstance(scale_matrix_1, list):
606
+ scale_matrix_1 = ImmutableMatrix(scale_matrix_1)
607
+ if isinstance(scale_matrix_2, list):
608
+ scale_matrix_2 = ImmutableMatrix(scale_matrix_2)
609
+ args = (nu, location_matrix, scale_matrix_1, scale_matrix_2)
610
+ return rv(symbol, MatrixStudentTDistribution, args)
parrot/lib/python3.10/site-packages/sympy/stats/random_matrix.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.core.basic import Basic
2
+ from sympy.stats.rv import PSpace, _symbol_converter, RandomMatrixSymbol
3
+
4
+ class RandomMatrixPSpace(PSpace):
5
+ """
6
+ Represents probability space for
7
+ random matrices. It contains the mechanics
8
+ for handling the API calls for random matrices.
9
+ """
10
+ def __new__(cls, sym, model=None):
11
+ sym = _symbol_converter(sym)
12
+ if model:
13
+ return Basic.__new__(cls, sym, model)
14
+ else:
15
+ return Basic.__new__(cls, sym)
16
+
17
+ @property
18
+ def model(self):
19
+ try:
20
+ return self.args[1]
21
+ except IndexError:
22
+ return None
23
+
24
+ def compute_density(self, expr, *args):
25
+ rms = expr.atoms(RandomMatrixSymbol)
26
+ if len(rms) > 2 or (not isinstance(expr, RandomMatrixSymbol)):
27
+ raise NotImplementedError("Currently, no algorithm has been "
28
+ "implemented to handle general expressions containing "
29
+ "multiple random matrices.")
30
+ return self.model.density(expr)
parrot/lib/python3.10/site-packages/sympy/stats/random_matrix_models.py ADDED
@@ -0,0 +1,457 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sympy.concrete.products import Product
2
+ from sympy.concrete.summations import Sum
3
+ from sympy.core.basic import Basic
4
+ from sympy.core.function import Lambda
5
+ from sympy.core.numbers import (I, pi)
6
+ from sympy.core.singleton import S
7
+ from sympy.core.symbol import Dummy
8
+ from sympy.functions.elementary.complexes import Abs
9
+ from sympy.functions.elementary.exponential import exp
10
+ from sympy.functions.special.gamma_functions import gamma
11
+ from sympy.integrals.integrals import Integral
12
+ from sympy.matrices.expressions.matexpr import MatrixSymbol
13
+ from sympy.matrices.expressions.trace import Trace
14
+ from sympy.tensor.indexed import IndexedBase
15
+ from sympy.core.sympify import _sympify
16
+ from sympy.stats.rv import _symbol_converter, Density, RandomMatrixSymbol, is_random
17
+ from sympy.stats.joint_rv_types import JointDistributionHandmade
18
+ from sympy.stats.random_matrix import RandomMatrixPSpace
19
+ from sympy.tensor.array import ArrayComprehension
20
+
21
+ __all__ = [
22
+ 'CircularEnsemble',
23
+ 'CircularUnitaryEnsemble',
24
+ 'CircularOrthogonalEnsemble',
25
+ 'CircularSymplecticEnsemble',
26
+ 'GaussianEnsemble',
27
+ 'GaussianUnitaryEnsemble',
28
+ 'GaussianOrthogonalEnsemble',
29
+ 'GaussianSymplecticEnsemble',
30
+ 'joint_eigen_distribution',
31
+ 'JointEigenDistribution',
32
+ 'level_spacing_distribution'
33
+ ]
34
+
35
+ @is_random.register(RandomMatrixSymbol)
36
+ def _(x):
37
+ return True
38
+
39
+
40
+ class RandomMatrixEnsembleModel(Basic):
41
+ """
42
+ Base class for random matrix ensembles.
43
+ It acts as an umbrella and contains
44
+ the methods common to all the ensembles
45
+ defined in sympy.stats.random_matrix_models.
46
+ """
47
+ def __new__(cls, sym, dim=None):
48
+ sym, dim = _symbol_converter(sym), _sympify(dim)
49
+ if dim.is_integer == False:
50
+ raise ValueError("Dimension of the random matrices must be "
51
+ "integers, received %s instead."%(dim))
52
+ return Basic.__new__(cls, sym, dim)
53
+
54
+ symbol = property(lambda self: self.args[0])
55
+ dimension = property(lambda self: self.args[1])
56
+
57
+ def density(self, expr):
58
+ return Density(expr)
59
+
60
+ def __call__(self, expr):
61
+ return self.density(expr)
62
+
63
+ class GaussianEnsembleModel(RandomMatrixEnsembleModel):
64
+ """
65
+ Abstract class for Gaussian ensembles.
66
+ Contains the properties common to all the
67
+ gaussian ensembles.
68
+
69
+ References
70
+ ==========
71
+
72
+ .. [1] https://en.wikipedia.org/wiki/Random_matrix#Gaussian_ensembles
73
+ .. [2] https://arxiv.org/pdf/1712.07903.pdf
74
+ """
75
+ def _compute_normalization_constant(self, beta, n):
76
+ """
77
+ Helper function for computing normalization
78
+ constant for joint probability density of eigen
79
+ values of Gaussian ensembles.
80
+
81
+ References
82
+ ==========
83
+
84
+ .. [1] https://en.wikipedia.org/wiki/Selberg_integral#Mehta's_integral
85
+ """
86
+ n = S(n)
87
+ prod_term = lambda j: gamma(1 + beta*S(j)/2)/gamma(S.One + beta/S(2))
88
+ j = Dummy('j', integer=True, positive=True)
89
+ term1 = Product(prod_term(j), (j, 1, n)).doit()
90
+ term2 = (2/(beta*n))**(beta*n*(n - 1)/4 + n/2)
91
+ term3 = (2*pi)**(n/2)
92
+ return term1 * term2 * term3
93
+
94
+ def _compute_joint_eigen_distribution(self, beta):
95
+ """
96
+ Helper function for computing the joint
97
+ probability distribution of eigen values
98
+ of the random matrix.
99
+ """
100
+ n = self.dimension
101
+ Zbn = self._compute_normalization_constant(beta, n)
102
+ l = IndexedBase('l')
103
+ i = Dummy('i', integer=True, positive=True)
104
+ j = Dummy('j', integer=True, positive=True)
105
+ k = Dummy('k', integer=True, positive=True)
106
+ term1 = exp((-S(n)/2) * Sum(l[k]**2, (k, 1, n)).doit())
107
+ sub_term = Lambda(i, Product(Abs(l[j] - l[i])**beta, (j, i + 1, n)))
108
+ term2 = Product(sub_term(i).doit(), (i, 1, n - 1)).doit()
109
+ syms = ArrayComprehension(l[k], (k, 1, n)).doit()
110
+ return Lambda(tuple(syms), (term1 * term2)/Zbn)
111
+
112
+ class GaussianUnitaryEnsembleModel(GaussianEnsembleModel):
113
+ @property
114
+ def normalization_constant(self):
115
+ n = self.dimension
116
+ return 2**(S(n)/2) * pi**(S(n**2)/2)
117
+
118
+ def density(self, expr):
119
+ n, ZGUE = self.dimension, self.normalization_constant
120
+ h_pspace = RandomMatrixPSpace('P', model=self)
121
+ H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
122
+ return Lambda(H, exp(-S(n)/2 * Trace(H**2))/ZGUE)(expr)
123
+
124
+ def joint_eigen_distribution(self):
125
+ return self._compute_joint_eigen_distribution(S(2))
126
+
127
+ def level_spacing_distribution(self):
128
+ s = Dummy('s')
129
+ f = (32/pi**2)*(s**2)*exp((-4/pi)*s**2)
130
+ return Lambda(s, f)
131
+
132
+ class GaussianOrthogonalEnsembleModel(GaussianEnsembleModel):
133
+ @property
134
+ def normalization_constant(self):
135
+ n = self.dimension
136
+ _H = MatrixSymbol('_H', n, n)
137
+ return Integral(exp(-S(n)/4 * Trace(_H**2)))
138
+
139
+ def density(self, expr):
140
+ n, ZGOE = self.dimension, self.normalization_constant
141
+ h_pspace = RandomMatrixPSpace('P', model=self)
142
+ H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
143
+ return Lambda(H, exp(-S(n)/4 * Trace(H**2))/ZGOE)(expr)
144
+
145
+ def joint_eigen_distribution(self):
146
+ return self._compute_joint_eigen_distribution(S.One)
147
+
148
+ def level_spacing_distribution(self):
149
+ s = Dummy('s')
150
+ f = (pi/2)*s*exp((-pi/4)*s**2)
151
+ return Lambda(s, f)
152
+
153
+ class GaussianSymplecticEnsembleModel(GaussianEnsembleModel):
154
+ @property
155
+ def normalization_constant(self):
156
+ n = self.dimension
157
+ _H = MatrixSymbol('_H', n, n)
158
+ return Integral(exp(-S(n) * Trace(_H**2)))
159
+
160
+ def density(self, expr):
161
+ n, ZGSE = self.dimension, self.normalization_constant
162
+ h_pspace = RandomMatrixPSpace('P', model=self)
163
+ H = RandomMatrixSymbol('H', n, n, pspace=h_pspace)
164
+ return Lambda(H, exp(-S(n) * Trace(H**2))/ZGSE)(expr)
165
+
166
+ def joint_eigen_distribution(self):
167
+ return self._compute_joint_eigen_distribution(S(4))
168
+
169
+ def level_spacing_distribution(self):
170
+ s = Dummy('s')
171
+ f = ((S(2)**18)/((S(3)**6)*(pi**3)))*(s**4)*exp((-64/(9*pi))*s**2)
172
+ return Lambda(s, f)
173
+
174
+ def GaussianEnsemble(sym, dim):
175
+ sym, dim = _symbol_converter(sym), _sympify(dim)
176
+ model = GaussianEnsembleModel(sym, dim)
177
+ rmp = RandomMatrixPSpace(sym, model=model)
178
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
179
+
180
+ def GaussianUnitaryEnsemble(sym, dim):
181
+ """
182
+ Represents Gaussian Unitary Ensembles.
183
+
184
+ Examples
185
+ ========
186
+
187
+ >>> from sympy.stats import GaussianUnitaryEnsemble as GUE, density
188
+ >>> from sympy import MatrixSymbol
189
+ >>> G = GUE('U', 2)
190
+ >>> X = MatrixSymbol('X', 2, 2)
191
+ >>> density(G)(X)
192
+ exp(-Trace(X**2))/(2*pi**2)
193
+ """
194
+ sym, dim = _symbol_converter(sym), _sympify(dim)
195
+ model = GaussianUnitaryEnsembleModel(sym, dim)
196
+ rmp = RandomMatrixPSpace(sym, model=model)
197
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
198
+
199
+ def GaussianOrthogonalEnsemble(sym, dim):
200
+ """
201
+ Represents Gaussian Orthogonal Ensembles.
202
+
203
+ Examples
204
+ ========
205
+
206
+ >>> from sympy.stats import GaussianOrthogonalEnsemble as GOE, density
207
+ >>> from sympy import MatrixSymbol
208
+ >>> G = GOE('U', 2)
209
+ >>> X = MatrixSymbol('X', 2, 2)
210
+ >>> density(G)(X)
211
+ exp(-Trace(X**2)/2)/Integral(exp(-Trace(_H**2)/2), _H)
212
+ """
213
+ sym, dim = _symbol_converter(sym), _sympify(dim)
214
+ model = GaussianOrthogonalEnsembleModel(sym, dim)
215
+ rmp = RandomMatrixPSpace(sym, model=model)
216
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
217
+
218
+ def GaussianSymplecticEnsemble(sym, dim):
219
+ """
220
+ Represents Gaussian Symplectic Ensembles.
221
+
222
+ Examples
223
+ ========
224
+
225
+ >>> from sympy.stats import GaussianSymplecticEnsemble as GSE, density
226
+ >>> from sympy import MatrixSymbol
227
+ >>> G = GSE('U', 2)
228
+ >>> X = MatrixSymbol('X', 2, 2)
229
+ >>> density(G)(X)
230
+ exp(-2*Trace(X**2))/Integral(exp(-2*Trace(_H**2)), _H)
231
+ """
232
+ sym, dim = _symbol_converter(sym), _sympify(dim)
233
+ model = GaussianSymplecticEnsembleModel(sym, dim)
234
+ rmp = RandomMatrixPSpace(sym, model=model)
235
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
236
+
237
+ class CircularEnsembleModel(RandomMatrixEnsembleModel):
238
+ """
239
+ Abstract class for Circular ensembles.
240
+ Contains the properties and methods
241
+ common to all the circular ensembles.
242
+
243
+ References
244
+ ==========
245
+
246
+ .. [1] https://en.wikipedia.org/wiki/Circular_ensemble
247
+ """
248
+ def density(self, expr):
249
+ # TODO : Add support for Lie groups(as extensions of sympy.diffgeom)
250
+ # and define measures on them
251
+ raise NotImplementedError("Support for Haar measure hasn't been "
252
+ "implemented yet, therefore the density of "
253
+ "%s cannot be computed."%(self))
254
+
255
+ def _compute_joint_eigen_distribution(self, beta):
256
+ """
257
+ Helper function to compute the joint distribution of phases
258
+ of the complex eigen values of matrices belonging to any
259
+ circular ensembles.
260
+ """
261
+ n = self.dimension
262
+ Zbn = ((2*pi)**n)*(gamma(beta*n/2 + 1)/S(gamma(beta/2 + 1))**n)
263
+ t = IndexedBase('t')
264
+ i, j, k = (Dummy('i', integer=True), Dummy('j', integer=True),
265
+ Dummy('k', integer=True))
266
+ syms = ArrayComprehension(t[i], (i, 1, n)).doit()
267
+ f = Product(Product(Abs(exp(I*t[k]) - exp(I*t[j]))**beta, (j, k + 1, n)).doit(),
268
+ (k, 1, n - 1)).doit()
269
+ return Lambda(tuple(syms), f/Zbn)
270
+
271
+ class CircularUnitaryEnsembleModel(CircularEnsembleModel):
272
+ def joint_eigen_distribution(self):
273
+ return self._compute_joint_eigen_distribution(S(2))
274
+
275
+ class CircularOrthogonalEnsembleModel(CircularEnsembleModel):
276
+ def joint_eigen_distribution(self):
277
+ return self._compute_joint_eigen_distribution(S.One)
278
+
279
+ class CircularSymplecticEnsembleModel(CircularEnsembleModel):
280
+ def joint_eigen_distribution(self):
281
+ return self._compute_joint_eigen_distribution(S(4))
282
+
283
+ def CircularEnsemble(sym, dim):
284
+ sym, dim = _symbol_converter(sym), _sympify(dim)
285
+ model = CircularEnsembleModel(sym, dim)
286
+ rmp = RandomMatrixPSpace(sym, model=model)
287
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
288
+
289
+ def CircularUnitaryEnsemble(sym, dim):
290
+ """
291
+ Represents Circular Unitary Ensembles.
292
+
293
+ Examples
294
+ ========
295
+
296
+ >>> from sympy.stats import CircularUnitaryEnsemble as CUE
297
+ >>> from sympy.stats import joint_eigen_distribution
298
+ >>> C = CUE('U', 1)
299
+ >>> joint_eigen_distribution(C)
300
+ Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**2, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
301
+
302
+ Note
303
+ ====
304
+
305
+ As can be seen above in the example, density of CiruclarUnitaryEnsemble
306
+ is not evaluated because the exact definition is based on haar measure of
307
+ unitary group which is not unique.
308
+ """
309
+ sym, dim = _symbol_converter(sym), _sympify(dim)
310
+ model = CircularUnitaryEnsembleModel(sym, dim)
311
+ rmp = RandomMatrixPSpace(sym, model=model)
312
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
313
+
314
+ def CircularOrthogonalEnsemble(sym, dim):
315
+ """
316
+ Represents Circular Orthogonal Ensembles.
317
+
318
+ Examples
319
+ ========
320
+
321
+ >>> from sympy.stats import CircularOrthogonalEnsemble as COE
322
+ >>> from sympy.stats import joint_eigen_distribution
323
+ >>> C = COE('O', 1)
324
+ >>> joint_eigen_distribution(C)
325
+ Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k])), (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
326
+
327
+ Note
328
+ ====
329
+
330
+ As can be seen above in the example, density of CiruclarOrthogonalEnsemble
331
+ is not evaluated because the exact definition is based on haar measure of
332
+ unitary group which is not unique.
333
+ """
334
+ sym, dim = _symbol_converter(sym), _sympify(dim)
335
+ model = CircularOrthogonalEnsembleModel(sym, dim)
336
+ rmp = RandomMatrixPSpace(sym, model=model)
337
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
338
+
339
+ def CircularSymplecticEnsemble(sym, dim):
340
+ """
341
+ Represents Circular Symplectic Ensembles.
342
+
343
+ Examples
344
+ ========
345
+
346
+ >>> from sympy.stats import CircularSymplecticEnsemble as CSE
347
+ >>> from sympy.stats import joint_eigen_distribution
348
+ >>> C = CSE('S', 1)
349
+ >>> joint_eigen_distribution(C)
350
+ Lambda(t[1], Product(Abs(exp(I*t[_j]) - exp(I*t[_k]))**4, (_j, _k + 1, 1), (_k, 1, 0))/(2*pi))
351
+
352
+ Note
353
+ ====
354
+
355
+ As can be seen above in the example, density of CiruclarSymplecticEnsemble
356
+ is not evaluated because the exact definition is based on haar measure of
357
+ unitary group which is not unique.
358
+ """
359
+ sym, dim = _symbol_converter(sym), _sympify(dim)
360
+ model = CircularSymplecticEnsembleModel(sym, dim)
361
+ rmp = RandomMatrixPSpace(sym, model=model)
362
+ return RandomMatrixSymbol(sym, dim, dim, pspace=rmp)
363
+
364
+ def joint_eigen_distribution(mat):
365
+ """
366
+ For obtaining joint probability distribution
367
+ of eigen values of random matrix.
368
+
369
+ Parameters
370
+ ==========
371
+
372
+ mat: RandomMatrixSymbol
373
+ The matrix symbol whose eigen values are to be considered.
374
+
375
+ Returns
376
+ =======
377
+
378
+ Lambda
379
+
380
+ Examples
381
+ ========
382
+
383
+ >>> from sympy.stats import GaussianUnitaryEnsemble as GUE
384
+ >>> from sympy.stats import joint_eigen_distribution
385
+ >>> U = GUE('U', 2)
386
+ >>> joint_eigen_distribution(U)
387
+ Lambda((l[1], l[2]), exp(-l[1]**2 - l[2]**2)*Product(Abs(l[_i] - l[_j])**2, (_j, _i + 1, 2), (_i, 1, 1))/pi)
388
+ """
389
+ if not isinstance(mat, RandomMatrixSymbol):
390
+ raise ValueError("%s is not of type, RandomMatrixSymbol."%(mat))
391
+ return mat.pspace.model.joint_eigen_distribution()
392
+
393
+ def JointEigenDistribution(mat):
394
+ """
395
+ Creates joint distribution of eigen values of matrices with random
396
+ expressions.
397
+
398
+ Parameters
399
+ ==========
400
+
401
+ mat: Matrix
402
+ The matrix under consideration.
403
+
404
+ Returns
405
+ =======
406
+
407
+ JointDistributionHandmade
408
+
409
+ Examples
410
+ ========
411
+
412
+ >>> from sympy.stats import Normal, JointEigenDistribution
413
+ >>> from sympy import Matrix
414
+ >>> A = [[Normal('A00', 0, 1), Normal('A01', 0, 1)],
415
+ ... [Normal('A10', 0, 1), Normal('A11', 0, 1)]]
416
+ >>> JointEigenDistribution(Matrix(A))
417
+ JointDistributionHandmade(-sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2
418
+ + A00/2 + A11/2, sqrt(A00**2 - 2*A00*A11 + 4*A01*A10 + A11**2)/2 + A00/2 + A11/2)
419
+
420
+ """
421
+ eigenvals = mat.eigenvals(multiple=True)
422
+ if not all(is_random(eigenval) for eigenval in set(eigenvals)):
423
+ raise ValueError("Eigen values do not have any random expression, "
424
+ "joint distribution cannot be generated.")
425
+ return JointDistributionHandmade(*eigenvals)
426
+
427
+ def level_spacing_distribution(mat):
428
+ """
429
+ For obtaining distribution of level spacings.
430
+
431
+ Parameters
432
+ ==========
433
+
434
+ mat: RandomMatrixSymbol
435
+ The random matrix symbol whose eigen values are
436
+ to be considered for finding the level spacings.
437
+
438
+ Returns
439
+ =======
440
+
441
+ Lambda
442
+
443
+ Examples
444
+ ========
445
+
446
+ >>> from sympy.stats import GaussianUnitaryEnsemble as GUE
447
+ >>> from sympy.stats import level_spacing_distribution
448
+ >>> U = GUE('U', 2)
449
+ >>> level_spacing_distribution(U)
450
+ Lambda(_s, 32*_s**2*exp(-4*_s**2/pi)/pi**2)
451
+
452
+ References
453
+ ==========
454
+
455
+ .. [1] https://en.wikipedia.org/wiki/Random_matrix#Distribution_of_level_spacings
456
+ """
457
+ return mat.pspace.model.level_spacing_distribution()