ZTWHHH commited on
Commit
f808588
·
verified ·
1 Parent(s): ca2a3fc

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +2 -0
  2. parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc +0 -0
  3. parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc +0 -0
  4. parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc +0 -0
  5. parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc +0 -0
  6. parrot/lib/python3.10/site-packages/scipy/datasets/_fetchers.py +221 -0
  7. parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  8. parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc +0 -0
  9. parrot/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py +124 -0
  10. parrot/lib/python3.10/site-packages/scipy/integrate/__init__.py +110 -0
  11. parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc +0 -0
  12. parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc +0 -0
  13. parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc +0 -0
  14. parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc +0 -0
  15. parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py +8 -0
  16. parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py +290 -0
  17. parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py +480 -0
  18. parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py +193 -0
  19. parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py +574 -0
  20. parrot/lib/python3.10/site-packages/scipy/integrate/_ode.py +1376 -0
  21. parrot/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py +1279 -0
  22. parrot/lib/python3.10/site-packages/scipy/integrate/_quadrature.py +1684 -0
  23. parrot/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py +1231 -0
  24. parrot/lib/python3.10/site-packages/scipy/integrate/dop.py +15 -0
  25. parrot/lib/python3.10/site-packages/scipy/integrate/lsoda.py +15 -0
  26. parrot/lib/python3.10/site-packages/scipy/integrate/quadpack.py +23 -0
  27. parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  28. parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc +0 -0
  29. parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc +0 -0
  30. parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc +0 -0
  31. parrot/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py +215 -0
  32. parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py +218 -0
  33. parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py +74 -0
  34. parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py +947 -0
  35. parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so +3 -0
  36. parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc +0 -0
  37. parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/_common.cpython-310.pyc +0 -0
  38. parrot/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc +0 -0
  39. parrot/lib/python3.10/site-packages/scipy/ndimage/fourier.py +21 -0
  40. parrot/lib/python3.10/site-packages/scipy/odr/__init__.py +131 -0
  41. parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc +0 -0
  42. parrot/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py +34 -0
  43. parrot/lib/python3.10/site-packages/scipy/odr/_models.py +315 -0
  44. parrot/lib/python3.10/site-packages/scipy/odr/_odrpack.py +1151 -0
  45. parrot/lib/python3.10/site-packages/scipy/odr/models.py +20 -0
  46. parrot/lib/python3.10/site-packages/scipy/odr/odrpack.py +21 -0
  47. parrot/lib/python3.10/site-packages/scipy/odr/tests/__init__.py +0 -0
  48. parrot/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc +0 -0
  49. parrot/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py +606 -0
  50. vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/__init__.cpython-310.pyc +0 -0
.gitattributes CHANGED
@@ -1692,3 +1692,5 @@ vllm/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.py
1692
  vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1693
  vllm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1694
  vllm/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
 
1692
  vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1693
  vllm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1694
  vllm/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1695
+ parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1696
+ vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc ADDED
Binary file (1.7 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc ADDED
Binary file (6.27 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc ADDED
Binary file (758 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc ADDED
Binary file (2.35 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/datasets/_fetchers.py ADDED
@@ -0,0 +1,221 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy import array, frombuffer, load
2
+ from ._registry import registry, registry_urls
3
+
4
+ try:
5
+ import pooch
6
+ except ImportError:
7
+ pooch = None
8
+ data_fetcher = None
9
+ else:
10
+ data_fetcher = pooch.create(
11
+ # Use the default cache folder for the operating system
12
+ # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to
13
+ # select an appropriate directory for the cache on each platform.
14
+ path=pooch.os_cache("scipy-data"),
15
+
16
+ # The remote data is on Github
17
+ # base_url is a required param, even though we override this
18
+ # using individual urls in the registry.
19
+ base_url="https://github.com/scipy/",
20
+ registry=registry,
21
+ urls=registry_urls
22
+ )
23
+
24
+
25
+ def fetch_data(dataset_name, data_fetcher=data_fetcher):
26
+ if data_fetcher is None:
27
+ raise ImportError("Missing optional dependency 'pooch' required "
28
+ "for scipy.datasets module. Please use pip or "
29
+ "conda to install 'pooch'.")
30
+ # The "fetch" method returns the full path to the downloaded data file.
31
+ return data_fetcher.fetch(dataset_name)
32
+
33
+
34
+ def ascent():
35
+ """
36
+ Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy
37
+ use in demos.
38
+
39
+ The image is derived from
40
+ https://pixnio.com/people/accent-to-the-top
41
+
42
+ Parameters
43
+ ----------
44
+ None
45
+
46
+ Returns
47
+ -------
48
+ ascent : ndarray
49
+ convenient image to use for testing and demonstration
50
+
51
+ Examples
52
+ --------
53
+ >>> import scipy.datasets
54
+ >>> ascent = scipy.datasets.ascent()
55
+ >>> ascent.shape
56
+ (512, 512)
57
+ >>> ascent.max()
58
+ 255
59
+
60
+ >>> import matplotlib.pyplot as plt
61
+ >>> plt.gray()
62
+ >>> plt.imshow(ascent)
63
+ >>> plt.show()
64
+
65
+ """
66
+ import pickle
67
+
68
+ # The file will be downloaded automatically the first time this is run,
69
+ # returning the path to the downloaded file. Afterwards, Pooch finds
70
+ # it in the local cache and doesn't repeat the download.
71
+ fname = fetch_data("ascent.dat")
72
+ # Now we just need to load it with our standard Python tools.
73
+ with open(fname, 'rb') as f:
74
+ ascent = array(pickle.load(f))
75
+ return ascent
76
+
77
+
78
+ def electrocardiogram():
79
+ """
80
+ Load an electrocardiogram as an example for a 1-D signal.
81
+
82
+ The returned signal is a 5 minute long electrocardiogram (ECG), a medical
83
+ recording of the heart's electrical activity, sampled at 360 Hz.
84
+
85
+ Returns
86
+ -------
87
+ ecg : ndarray
88
+ The electrocardiogram in millivolt (mV) sampled at 360 Hz.
89
+
90
+ Notes
91
+ -----
92
+ The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_
93
+ (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on
94
+ PhysioNet [2]_. The excerpt includes noise induced artifacts, typical
95
+ heartbeats as well as pathological changes.
96
+
97
+ .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208
98
+
99
+ .. versionadded:: 1.1.0
100
+
101
+ References
102
+ ----------
103
+ .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database.
104
+ IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001).
105
+ (PMID: 11446209); :doi:`10.13026/C2F305`
106
+ .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
107
+ Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank,
108
+ PhysioToolkit, and PhysioNet: Components of a New Research Resource
109
+ for Complex Physiologic Signals. Circulation 101(23):e215-e220;
110
+ :doi:`10.1161/01.CIR.101.23.e215`
111
+
112
+ Examples
113
+ --------
114
+ >>> from scipy.datasets import electrocardiogram
115
+ >>> ecg = electrocardiogram()
116
+ >>> ecg
117
+ array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385])
118
+ >>> ecg.shape, ecg.mean(), ecg.std()
119
+ ((108000,), -0.16510875, 0.5992473991177294)
120
+
121
+ As stated the signal features several areas with a different morphology.
122
+ E.g., the first few seconds show the electrical activity of a heart in
123
+ normal sinus rhythm as seen below.
124
+
125
+ >>> import numpy as np
126
+ >>> import matplotlib.pyplot as plt
127
+ >>> fs = 360
128
+ >>> time = np.arange(ecg.size) / fs
129
+ >>> plt.plot(time, ecg)
130
+ >>> plt.xlabel("time in s")
131
+ >>> plt.ylabel("ECG in mV")
132
+ >>> plt.xlim(9, 10.2)
133
+ >>> plt.ylim(-1, 1.5)
134
+ >>> plt.show()
135
+
136
+ After second 16, however, the first premature ventricular contractions,
137
+ also called extrasystoles, appear. These have a different morphology
138
+ compared to typical heartbeats. The difference can easily be observed
139
+ in the following plot.
140
+
141
+ >>> plt.plot(time, ecg)
142
+ >>> plt.xlabel("time in s")
143
+ >>> plt.ylabel("ECG in mV")
144
+ >>> plt.xlim(46.5, 50)
145
+ >>> plt.ylim(-2, 1.5)
146
+ >>> plt.show()
147
+
148
+ At several points large artifacts disturb the recording, e.g.:
149
+
150
+ >>> plt.plot(time, ecg)
151
+ >>> plt.xlabel("time in s")
152
+ >>> plt.ylabel("ECG in mV")
153
+ >>> plt.xlim(207, 215)
154
+ >>> plt.ylim(-2, 3.5)
155
+ >>> plt.show()
156
+
157
+ Finally, examining the power spectrum reveals that most of the biosignal is
158
+ made up of lower frequencies. At 60 Hz the noise induced by the mains
159
+ electricity can be clearly observed.
160
+
161
+ >>> from scipy.signal import welch
162
+ >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum")
163
+ >>> plt.semilogy(f, Pxx)
164
+ >>> plt.xlabel("Frequency in Hz")
165
+ >>> plt.ylabel("Power spectrum of the ECG in mV**2")
166
+ >>> plt.xlim(f[[0, -1]])
167
+ >>> plt.show()
168
+ """
169
+ fname = fetch_data("ecg.dat")
170
+ with load(fname) as file:
171
+ ecg = file["ecg"].astype(int) # np.uint16 -> int
172
+ # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain
173
+ ecg = (ecg - 1024) / 200.0
174
+ return ecg
175
+
176
+
177
+ def face(gray=False):
178
+ """
179
+ Get a 1024 x 768, color image of a raccoon face.
180
+
181
+ The image is derived from
182
+ https://pixnio.com/fauna-animals/raccoons/raccoon-procyon-lotor
183
+
184
+ Parameters
185
+ ----------
186
+ gray : bool, optional
187
+ If True return 8-bit grey-scale image, otherwise return a color image
188
+
189
+ Returns
190
+ -------
191
+ face : ndarray
192
+ image of a raccoon face
193
+
194
+ Examples
195
+ --------
196
+ >>> import scipy.datasets
197
+ >>> face = scipy.datasets.face()
198
+ >>> face.shape
199
+ (768, 1024, 3)
200
+ >>> face.max()
201
+ 255
202
+ >>> face.dtype
203
+ dtype('uint8')
204
+
205
+ >>> import matplotlib.pyplot as plt
206
+ >>> plt.gray()
207
+ >>> plt.imshow(face)
208
+ >>> plt.show()
209
+
210
+ """
211
+ import bz2
212
+ fname = fetch_data("face.dat")
213
+ with open(fname, 'rb') as f:
214
+ rawdata = f.read()
215
+ face_data = bz2.decompress(rawdata)
216
+ face = frombuffer(face_data, dtype='uint8')
217
+ face.shape = (768, 1024, 3)
218
+ if gray is True:
219
+ face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] +
220
+ 0.07 * face[:, :, 2]).astype('uint8')
221
+ return face
parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (172 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc ADDED
Binary file (3.84 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py ADDED
@@ -0,0 +1,124 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from scipy.datasets._registry import registry
2
+ from scipy.datasets._fetchers import data_fetcher
3
+ from scipy.datasets._utils import _clear_cache
4
+ from scipy.datasets import ascent, face, electrocardiogram, download_all
5
+ from numpy.testing import assert_equal, assert_almost_equal
6
+ import os
7
+ import pytest
8
+
9
+ try:
10
+ import pooch
11
+ except ImportError:
12
+ raise ImportError("Missing optional dependency 'pooch' required "
13
+ "for scipy.datasets module. Please use pip or "
14
+ "conda to install 'pooch'.")
15
+
16
+
17
+ data_dir = data_fetcher.path # type: ignore
18
+
19
+
20
+ def _has_hash(path, expected_hash):
21
+ """Check if the provided path has the expected hash."""
22
+ if not os.path.exists(path):
23
+ return False
24
+ return pooch.file_hash(path) == expected_hash
25
+
26
+
27
+ class TestDatasets:
28
+
29
+ @pytest.fixture(scope='module', autouse=True)
30
+ def test_download_all(self):
31
+ # This fixture requires INTERNET CONNECTION
32
+
33
+ # test_setup phase
34
+ download_all()
35
+
36
+ yield
37
+
38
+ @pytest.mark.fail_slow(5)
39
+ def test_existence_all(self):
40
+ assert len(os.listdir(data_dir)) >= len(registry)
41
+
42
+ def test_ascent(self):
43
+ assert_equal(ascent().shape, (512, 512))
44
+
45
+ # hash check
46
+ assert _has_hash(os.path.join(data_dir, "ascent.dat"),
47
+ registry["ascent.dat"])
48
+
49
+ def test_face(self):
50
+ assert_equal(face().shape, (768, 1024, 3))
51
+
52
+ # hash check
53
+ assert _has_hash(os.path.join(data_dir, "face.dat"),
54
+ registry["face.dat"])
55
+
56
+ def test_electrocardiogram(self):
57
+ # Test shape, dtype and stats of signal
58
+ ecg = electrocardiogram()
59
+ assert_equal(ecg.dtype, float)
60
+ assert_equal(ecg.shape, (108000,))
61
+ assert_almost_equal(ecg.mean(), -0.16510875)
62
+ assert_almost_equal(ecg.std(), 0.5992473991177294)
63
+
64
+ # hash check
65
+ assert _has_hash(os.path.join(data_dir, "ecg.dat"),
66
+ registry["ecg.dat"])
67
+
68
+
69
+ def test_clear_cache(tmp_path):
70
+ # Note: `tmp_path` is a pytest fixture, it handles cleanup
71
+ dummy_basepath = tmp_path / "dummy_cache_dir"
72
+ dummy_basepath.mkdir()
73
+
74
+ # Create three dummy dataset files for dummy dataset methods
75
+ dummy_method_map = {}
76
+ for i in range(4):
77
+ dummy_method_map[f"data{i}"] = [f"data{i}.dat"]
78
+ data_filepath = dummy_basepath / f"data{i}.dat"
79
+ data_filepath.write_text("")
80
+
81
+ # clear files associated to single dataset method data0
82
+ # also test callable argument instead of list of callables
83
+ def data0():
84
+ pass
85
+ _clear_cache(datasets=data0, cache_dir=dummy_basepath,
86
+ method_map=dummy_method_map)
87
+ assert not os.path.exists(dummy_basepath/"data0.dat")
88
+
89
+ # clear files associated to multiple dataset methods "data3" and "data4"
90
+ def data1():
91
+ pass
92
+
93
+ def data2():
94
+ pass
95
+ _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath,
96
+ method_map=dummy_method_map)
97
+ assert not os.path.exists(dummy_basepath/"data1.dat")
98
+ assert not os.path.exists(dummy_basepath/"data2.dat")
99
+
100
+ # clear multiple dataset files "data3_0.dat" and "data3_1.dat"
101
+ # associated with dataset method "data3"
102
+ def data4():
103
+ pass
104
+ # create files
105
+ (dummy_basepath / "data4_0.dat").write_text("")
106
+ (dummy_basepath / "data4_1.dat").write_text("")
107
+
108
+ dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"]
109
+ _clear_cache(datasets=[data4], cache_dir=dummy_basepath,
110
+ method_map=dummy_method_map)
111
+ assert not os.path.exists(dummy_basepath/"data4_0.dat")
112
+ assert not os.path.exists(dummy_basepath/"data4_1.dat")
113
+
114
+ # wrong dataset method should raise ValueError since it
115
+ # doesn't exist in the dummy_method_map
116
+ def data5():
117
+ pass
118
+ with pytest.raises(ValueError):
119
+ _clear_cache(datasets=[data5], cache_dir=dummy_basepath,
120
+ method_map=dummy_method_map)
121
+
122
+ # remove all dataset cache
123
+ _clear_cache(datasets=None, cache_dir=dummy_basepath)
124
+ assert not os.path.exists(dummy_basepath)
parrot/lib/python3.10/site-packages/scipy/integrate/__init__.py ADDED
@@ -0,0 +1,110 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =============================================
3
+ Integration and ODEs (:mod:`scipy.integrate`)
4
+ =============================================
5
+
6
+ .. currentmodule:: scipy.integrate
7
+
8
+ Integrating functions, given function object
9
+ ============================================
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ quad -- General purpose integration
15
+ quad_vec -- General purpose integration of vector-valued functions
16
+ dblquad -- General purpose double integration
17
+ tplquad -- General purpose triple integration
18
+ nquad -- General purpose N-D integration
19
+ fixed_quad -- Integrate func(x) using Gaussian quadrature of order n
20
+ quadrature -- Integrate with given tolerance using Gaussian quadrature
21
+ romberg -- Integrate func using Romberg integration
22
+ newton_cotes -- Weights and error coefficient for Newton-Cotes integration
23
+ qmc_quad -- N-D integration using Quasi-Monte Carlo quadrature
24
+ IntegrationWarning -- Warning on issues during integration
25
+ AccuracyWarning -- Warning on issues during quadrature integration
26
+
27
+ Integrating functions, given fixed samples
28
+ ==========================================
29
+
30
+ .. autosummary::
31
+ :toctree: generated/
32
+
33
+ trapezoid -- Use trapezoidal rule to compute integral.
34
+ cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral.
35
+ simpson -- Use Simpson's rule to compute integral from samples.
36
+ cumulative_simpson -- Use Simpson's rule to cumulatively compute integral from samples.
37
+ romb -- Use Romberg Integration to compute integral from
38
+ -- (2**k + 1) evenly-spaced samples.
39
+
40
+ .. seealso::
41
+
42
+ :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian
43
+ quadrature roots and weights for other weighting factors and regions.
44
+
45
+ Solving initial value problems for ODE systems
46
+ ==============================================
47
+
48
+ The solvers are implemented as individual classes, which can be used directly
49
+ (low-level usage) or through a convenience function.
50
+
51
+ .. autosummary::
52
+ :toctree: generated/
53
+
54
+ solve_ivp -- Convenient function for ODE integration.
55
+ RK23 -- Explicit Runge-Kutta solver of order 3(2).
56
+ RK45 -- Explicit Runge-Kutta solver of order 5(4).
57
+ DOP853 -- Explicit Runge-Kutta solver of order 8.
58
+ Radau -- Implicit Runge-Kutta solver of order 5.
59
+ BDF -- Implicit multi-step variable order (1 to 5) solver.
60
+ LSODA -- LSODA solver from ODEPACK Fortran package.
61
+ OdeSolver -- Base class for ODE solvers.
62
+ DenseOutput -- Local interpolant for computing a dense output.
63
+ OdeSolution -- Class which represents a continuous ODE solution.
64
+
65
+
66
+ Old API
67
+ -------
68
+
69
+ These are the routines developed earlier for SciPy. They wrap older solvers
70
+ implemented in Fortran (mostly ODEPACK). While the interface to them is not
71
+ particularly convenient and certain features are missing compared to the new
72
+ API, the solvers themselves are of good quality and work fast as compiled
73
+ Fortran code. In some cases, it might be worth using this old API.
74
+
75
+ .. autosummary::
76
+ :toctree: generated/
77
+
78
+ odeint -- General integration of ordinary differential equations.
79
+ ode -- Integrate ODE using VODE and ZVODE routines.
80
+ complex_ode -- Convert a complex-valued ODE to real-valued and integrate.
81
+ ODEintWarning -- Warning raised during the execution of `odeint`.
82
+
83
+
84
+ Solving boundary value problems for ODE systems
85
+ ===============================================
86
+
87
+ .. autosummary::
88
+ :toctree: generated/
89
+
90
+ solve_bvp -- Solve a boundary value problem for a system of ODEs.
91
+ """ # noqa: E501
92
+
93
+
94
+ from ._quadrature import *
95
+ from ._odepack_py import *
96
+ from ._quadpack_py import *
97
+ from ._ode import *
98
+ from ._bvp import solve_bvp
99
+ from ._ivp import (solve_ivp, OdeSolution, DenseOutput,
100
+ OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA)
101
+ from ._quad_vec import quad_vec
102
+
103
+ # Deprecated namespaces, to be removed in v2.0.0
104
+ from . import dop, lsoda, vode, odepack, quadpack
105
+
106
+ __all__ = [s for s in dir() if not s.startswith('_')]
107
+
108
+ from scipy._lib._testutils import PytestTester
109
+ test = PytestTester(__name__)
110
+ del PytestTester
parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc ADDED
Binary file (35.7 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc ADDED
Binary file (15.7 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc ADDED
Binary file (614 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc ADDED
Binary file (617 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ """Suite of ODE solvers implemented in Python."""
2
+ from .ivp import solve_ivp
3
+ from .rk import RK23, RK45, DOP853
4
+ from .radau import Radau
5
+ from .bdf import BDF
6
+ from .lsoda import LSODA
7
+ from .common import OdeSolution
8
+ from .base import DenseOutput, OdeSolver
parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py ADDED
@@ -0,0 +1,290 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+
4
+ def check_arguments(fun, y0, support_complex):
5
+ """Helper function for checking arguments common to all solvers."""
6
+ y0 = np.asarray(y0)
7
+ if np.issubdtype(y0.dtype, np.complexfloating):
8
+ if not support_complex:
9
+ raise ValueError("`y0` is complex, but the chosen solver does "
10
+ "not support integration in a complex domain.")
11
+ dtype = complex
12
+ else:
13
+ dtype = float
14
+ y0 = y0.astype(dtype, copy=False)
15
+
16
+ if y0.ndim != 1:
17
+ raise ValueError("`y0` must be 1-dimensional.")
18
+
19
+ if not np.isfinite(y0).all():
20
+ raise ValueError("All components of the initial state `y0` must be finite.")
21
+
22
+ def fun_wrapped(t, y):
23
+ return np.asarray(fun(t, y), dtype=dtype)
24
+
25
+ return fun_wrapped, y0
26
+
27
+
28
+ class OdeSolver:
29
+ """Base class for ODE solvers.
30
+
31
+ In order to implement a new solver you need to follow the guidelines:
32
+
33
+ 1. A constructor must accept parameters presented in the base class
34
+ (listed below) along with any other parameters specific to a solver.
35
+ 2. A constructor must accept arbitrary extraneous arguments
36
+ ``**extraneous``, but warn that these arguments are irrelevant
37
+ using `common.warn_extraneous` function. Do not pass these
38
+ arguments to the base class.
39
+ 3. A solver must implement a private method `_step_impl(self)` which
40
+ propagates a solver one step further. It must return tuple
41
+ ``(success, message)``, where ``success`` is a boolean indicating
42
+ whether a step was successful, and ``message`` is a string
43
+ containing description of a failure if a step failed or None
44
+ otherwise.
45
+ 4. A solver must implement a private method `_dense_output_impl(self)`,
46
+ which returns a `DenseOutput` object covering the last successful
47
+ step.
48
+ 5. A solver must have attributes listed below in Attributes section.
49
+ Note that ``t_old`` and ``step_size`` are updated automatically.
50
+ 6. Use `fun(self, t, y)` method for the system rhs evaluation, this
51
+ way the number of function evaluations (`nfev`) will be tracked
52
+ automatically.
53
+ 7. For convenience, a base class provides `fun_single(self, t, y)` and
54
+ `fun_vectorized(self, t, y)` for evaluating the rhs in
55
+ non-vectorized and vectorized fashions respectively (regardless of
56
+ how `fun` from the constructor is implemented). These calls don't
57
+ increment `nfev`.
58
+ 8. If a solver uses a Jacobian matrix and LU decompositions, it should
59
+ track the number of Jacobian evaluations (`njev`) and the number of
60
+ LU decompositions (`nlu`).
61
+ 9. By convention, the function evaluations used to compute a finite
62
+ difference approximation of the Jacobian should not be counted in
63
+ `nfev`, thus use `fun_single(self, t, y)` or
64
+ `fun_vectorized(self, t, y)` when computing a finite difference
65
+ approximation of the Jacobian.
66
+
67
+ Parameters
68
+ ----------
69
+ fun : callable
70
+ Right-hand side of the system: the time derivative of the state ``y``
71
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
72
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
73
+ return an array of the same shape as ``y``. See `vectorized` for more
74
+ information.
75
+ t0 : float
76
+ Initial time.
77
+ y0 : array_like, shape (n,)
78
+ Initial state.
79
+ t_bound : float
80
+ Boundary time --- the integration won't continue beyond it. It also
81
+ determines the direction of the integration.
82
+ vectorized : bool
83
+ Whether `fun` can be called in a vectorized fashion. Default is False.
84
+
85
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
86
+ shape ``(n,)``, where ``n = len(y0)``.
87
+
88
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
89
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
90
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
91
+ the returned array is the time derivative of the state corresponding
92
+ with a column of ``y``).
93
+
94
+ Setting ``vectorized=True`` allows for faster finite difference
95
+ approximation of the Jacobian by methods 'Radau' and 'BDF', but
96
+ will result in slower execution for other methods. It can also
97
+ result in slower overall execution for 'Radau' and 'BDF' in some
98
+ circumstances (e.g. small ``len(y0)``).
99
+ support_complex : bool, optional
100
+ Whether integration in a complex domain should be supported.
101
+ Generally determined by a derived solver class capabilities.
102
+ Default is False.
103
+
104
+ Attributes
105
+ ----------
106
+ n : int
107
+ Number of equations.
108
+ status : string
109
+ Current status of the solver: 'running', 'finished' or 'failed'.
110
+ t_bound : float
111
+ Boundary time.
112
+ direction : float
113
+ Integration direction: +1 or -1.
114
+ t : float
115
+ Current time.
116
+ y : ndarray
117
+ Current state.
118
+ t_old : float
119
+ Previous time. None if no steps were made yet.
120
+ step_size : float
121
+ Size of the last successful step. None if no steps were made yet.
122
+ nfev : int
123
+ Number of the system's rhs evaluations.
124
+ njev : int
125
+ Number of the Jacobian evaluations.
126
+ nlu : int
127
+ Number of LU decompositions.
128
+ """
129
+ TOO_SMALL_STEP = "Required step size is less than spacing between numbers."
130
+
131
+ def __init__(self, fun, t0, y0, t_bound, vectorized,
132
+ support_complex=False):
133
+ self.t_old = None
134
+ self.t = t0
135
+ self._fun, self.y = check_arguments(fun, y0, support_complex)
136
+ self.t_bound = t_bound
137
+ self.vectorized = vectorized
138
+
139
+ if vectorized:
140
+ def fun_single(t, y):
141
+ return self._fun(t, y[:, None]).ravel()
142
+ fun_vectorized = self._fun
143
+ else:
144
+ fun_single = self._fun
145
+
146
+ def fun_vectorized(t, y):
147
+ f = np.empty_like(y)
148
+ for i, yi in enumerate(y.T):
149
+ f[:, i] = self._fun(t, yi)
150
+ return f
151
+
152
+ def fun(t, y):
153
+ self.nfev += 1
154
+ return self.fun_single(t, y)
155
+
156
+ self.fun = fun
157
+ self.fun_single = fun_single
158
+ self.fun_vectorized = fun_vectorized
159
+
160
+ self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1
161
+ self.n = self.y.size
162
+ self.status = 'running'
163
+
164
+ self.nfev = 0
165
+ self.njev = 0
166
+ self.nlu = 0
167
+
168
+ @property
169
+ def step_size(self):
170
+ if self.t_old is None:
171
+ return None
172
+ else:
173
+ return np.abs(self.t - self.t_old)
174
+
175
+ def step(self):
176
+ """Perform one integration step.
177
+
178
+ Returns
179
+ -------
180
+ message : string or None
181
+ Report from the solver. Typically a reason for a failure if
182
+ `self.status` is 'failed' after the step was taken or None
183
+ otherwise.
184
+ """
185
+ if self.status != 'running':
186
+ raise RuntimeError("Attempt to step on a failed or finished "
187
+ "solver.")
188
+
189
+ if self.n == 0 or self.t == self.t_bound:
190
+ # Handle corner cases of empty solver or no integration.
191
+ self.t_old = self.t
192
+ self.t = self.t_bound
193
+ message = None
194
+ self.status = 'finished'
195
+ else:
196
+ t = self.t
197
+ success, message = self._step_impl()
198
+
199
+ if not success:
200
+ self.status = 'failed'
201
+ else:
202
+ self.t_old = t
203
+ if self.direction * (self.t - self.t_bound) >= 0:
204
+ self.status = 'finished'
205
+
206
+ return message
207
+
208
+ def dense_output(self):
209
+ """Compute a local interpolant over the last successful step.
210
+
211
+ Returns
212
+ -------
213
+ sol : `DenseOutput`
214
+ Local interpolant over the last successful step.
215
+ """
216
+ if self.t_old is None:
217
+ raise RuntimeError("Dense output is available after a successful "
218
+ "step was made.")
219
+
220
+ if self.n == 0 or self.t == self.t_old:
221
+ # Handle corner cases of empty solver and no integration.
222
+ return ConstantDenseOutput(self.t_old, self.t, self.y)
223
+ else:
224
+ return self._dense_output_impl()
225
+
226
+ def _step_impl(self):
227
+ raise NotImplementedError
228
+
229
+ def _dense_output_impl(self):
230
+ raise NotImplementedError
231
+
232
+
233
+ class DenseOutput:
234
+ """Base class for local interpolant over step made by an ODE solver.
235
+
236
+ It interpolates between `t_min` and `t_max` (see Attributes below).
237
+ Evaluation outside this interval is not forbidden, but the accuracy is not
238
+ guaranteed.
239
+
240
+ Attributes
241
+ ----------
242
+ t_min, t_max : float
243
+ Time range of the interpolation.
244
+ """
245
+ def __init__(self, t_old, t):
246
+ self.t_old = t_old
247
+ self.t = t
248
+ self.t_min = min(t, t_old)
249
+ self.t_max = max(t, t_old)
250
+
251
+ def __call__(self, t):
252
+ """Evaluate the interpolant.
253
+
254
+ Parameters
255
+ ----------
256
+ t : float or array_like with shape (n_points,)
257
+ Points to evaluate the solution at.
258
+
259
+ Returns
260
+ -------
261
+ y : ndarray, shape (n,) or (n, n_points)
262
+ Computed values. Shape depends on whether `t` was a scalar or a
263
+ 1-D array.
264
+ """
265
+ t = np.asarray(t)
266
+ if t.ndim > 1:
267
+ raise ValueError("`t` must be a float or a 1-D array.")
268
+ return self._call_impl(t)
269
+
270
+ def _call_impl(self, t):
271
+ raise NotImplementedError
272
+
273
+
274
+ class ConstantDenseOutput(DenseOutput):
275
+ """Constant value interpolator.
276
+
277
+ This class used for degenerate integration cases: equal integration limits
278
+ or a system with 0 equations.
279
+ """
280
+ def __init__(self, t_old, t, value):
281
+ super().__init__(t_old, t)
282
+ self.value = value
283
+
284
+ def _call_impl(self, t):
285
+ if t.ndim == 0:
286
+ return self.value
287
+ else:
288
+ ret = np.empty((self.value.shape[0], t.shape[0]))
289
+ ret[:] = self.value[:, None]
290
+ return ret
parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py ADDED
@@ -0,0 +1,480 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lu_factor, lu_solve
3
+ from scipy.sparse import issparse, csc_matrix, eye
4
+ from scipy.sparse.linalg import splu
5
+ from scipy.optimize._numdiff import group_columns
6
+ from .common import (validate_max_step, validate_tol, select_initial_step,
7
+ norm, EPS, num_jac, validate_first_step,
8
+ warn_extraneous)
9
+ from .base import OdeSolver, DenseOutput
10
+
11
+
12
+ MAX_ORDER = 5
13
+ NEWTON_MAXITER = 4
14
+ MIN_FACTOR = 0.2
15
+ MAX_FACTOR = 10
16
+
17
+
18
+ def compute_R(order, factor):
19
+ """Compute the matrix for changing the differences array."""
20
+ I = np.arange(1, order + 1)[:, None]
21
+ J = np.arange(1, order + 1)
22
+ M = np.zeros((order + 1, order + 1))
23
+ M[1:, 1:] = (I - 1 - factor * J) / I
24
+ M[0] = 1
25
+ return np.cumprod(M, axis=0)
26
+
27
+
28
+ def change_D(D, order, factor):
29
+ """Change differences array in-place when step size is changed."""
30
+ R = compute_R(order, factor)
31
+ U = compute_R(order, 1)
32
+ RU = R.dot(U)
33
+ D[:order + 1] = np.dot(RU.T, D[:order + 1])
34
+
35
+
36
+ def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol):
37
+ """Solve the algebraic system resulting from BDF method."""
38
+ d = 0
39
+ y = y_predict.copy()
40
+ dy_norm_old = None
41
+ converged = False
42
+ for k in range(NEWTON_MAXITER):
43
+ f = fun(t_new, y)
44
+ if not np.all(np.isfinite(f)):
45
+ break
46
+
47
+ dy = solve_lu(LU, c * f - psi - d)
48
+ dy_norm = norm(dy / scale)
49
+
50
+ if dy_norm_old is None:
51
+ rate = None
52
+ else:
53
+ rate = dy_norm / dy_norm_old
54
+
55
+ if (rate is not None and (rate >= 1 or
56
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)):
57
+ break
58
+
59
+ y += dy
60
+ d += dy
61
+
62
+ if (dy_norm == 0 or
63
+ rate is not None and rate / (1 - rate) * dy_norm < tol):
64
+ converged = True
65
+ break
66
+
67
+ dy_norm_old = dy_norm
68
+
69
+ return converged, k + 1, y, d
70
+
71
+
72
+ class BDF(OdeSolver):
73
+ """Implicit method based on backward-differentiation formulas.
74
+
75
+ This is a variable order method with the order varying automatically from
76
+ 1 to 5. The general framework of the BDF algorithm is described in [1]_.
77
+ This class implements a quasi-constant step size as explained in [2]_.
78
+ The error estimation strategy for the constant-step BDF is derived in [3]_.
79
+ An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented.
80
+
81
+ Can be applied in the complex domain.
82
+
83
+ Parameters
84
+ ----------
85
+ fun : callable
86
+ Right-hand side of the system: the time derivative of the state ``y``
87
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
88
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
89
+ return an array of the same shape as ``y``. See `vectorized` for more
90
+ information.
91
+ t0 : float
92
+ Initial time.
93
+ y0 : array_like, shape (n,)
94
+ Initial state.
95
+ t_bound : float
96
+ Boundary time - the integration won't continue beyond it. It also
97
+ determines the direction of the integration.
98
+ first_step : float or None, optional
99
+ Initial step size. Default is ``None`` which means that the algorithm
100
+ should choose.
101
+ max_step : float, optional
102
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
103
+ bounded and determined solely by the solver.
104
+ rtol, atol : float and array_like, optional
105
+ Relative and absolute tolerances. The solver keeps the local error
106
+ estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a
107
+ relative accuracy (number of correct digits), while `atol` controls
108
+ absolute accuracy (number of correct decimal places). To achieve the
109
+ desired `rtol`, set `atol` to be smaller than the smallest value that
110
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
111
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
112
+ number of correct digits is not guaranteed. Conversely, to achieve the
113
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
114
+ than `atol`. If components of y have different scales, it might be
115
+ beneficial to set different `atol` values for different components by
116
+ passing array_like with shape (n,) for `atol`. Default values are
117
+ 1e-3 for `rtol` and 1e-6 for `atol`.
118
+ jac : {None, array_like, sparse_matrix, callable}, optional
119
+ Jacobian matrix of the right-hand side of the system with respect to y,
120
+ required by this method. The Jacobian matrix has shape (n, n) and its
121
+ element (i, j) is equal to ``d f_i / d y_j``.
122
+ There are three ways to define the Jacobian:
123
+
124
+ * If array_like or sparse_matrix, the Jacobian is assumed to
125
+ be constant.
126
+ * If callable, the Jacobian is assumed to depend on both
127
+ t and y; it will be called as ``jac(t, y)`` as necessary.
128
+ For the 'Radau' and 'BDF' methods, the return value might be a
129
+ sparse matrix.
130
+ * If None (default), the Jacobian will be approximated by
131
+ finite differences.
132
+
133
+ It is generally recommended to provide the Jacobian rather than
134
+ relying on a finite-difference approximation.
135
+ jac_sparsity : {None, array_like, sparse matrix}, optional
136
+ Defines a sparsity structure of the Jacobian matrix for a
137
+ finite-difference approximation. Its shape must be (n, n). This argument
138
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
139
+ elements in *each* row, providing the sparsity structure will greatly
140
+ speed up the computations [4]_. A zero entry means that a corresponding
141
+ element in the Jacobian is always zero. If None (default), the Jacobian
142
+ is assumed to be dense.
143
+ vectorized : bool, optional
144
+ Whether `fun` can be called in a vectorized fashion. Default is False.
145
+
146
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
147
+ shape ``(n,)``, where ``n = len(y0)``.
148
+
149
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
150
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
151
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
152
+ the returned array is the time derivative of the state corresponding
153
+ with a column of ``y``).
154
+
155
+ Setting ``vectorized=True`` allows for faster finite difference
156
+ approximation of the Jacobian by this method, but may result in slower
157
+ execution overall in some circumstances (e.g. small ``len(y0)``).
158
+
159
+ Attributes
160
+ ----------
161
+ n : int
162
+ Number of equations.
163
+ status : string
164
+ Current status of the solver: 'running', 'finished' or 'failed'.
165
+ t_bound : float
166
+ Boundary time.
167
+ direction : float
168
+ Integration direction: +1 or -1.
169
+ t : float
170
+ Current time.
171
+ y : ndarray
172
+ Current state.
173
+ t_old : float
174
+ Previous time. None if no steps were made yet.
175
+ step_size : float
176
+ Size of the last successful step. None if no steps were made yet.
177
+ nfev : int
178
+ Number of evaluations of the right-hand side.
179
+ njev : int
180
+ Number of evaluations of the Jacobian.
181
+ nlu : int
182
+ Number of LU decompositions.
183
+
184
+ References
185
+ ----------
186
+ .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical
187
+ Solution of Ordinary Differential Equations", ACM Transactions on
188
+ Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975.
189
+ .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI.
190
+ COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997.
191
+ .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I:
192
+ Nonstiff Problems", Sec. III.2.
193
+ .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
194
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
195
+ and its Applications, 13, pp. 117-120, 1974.
196
+ """
197
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
198
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
199
+ vectorized=False, first_step=None, **extraneous):
200
+ warn_extraneous(extraneous)
201
+ super().__init__(fun, t0, y0, t_bound, vectorized,
202
+ support_complex=True)
203
+ self.max_step = validate_max_step(max_step)
204
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
205
+ f = self.fun(self.t, self.y)
206
+ if first_step is None:
207
+ self.h_abs = select_initial_step(self.fun, self.t, self.y,
208
+ t_bound, max_step, f,
209
+ self.direction, 1,
210
+ self.rtol, self.atol)
211
+ else:
212
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
213
+ self.h_abs_old = None
214
+ self.error_norm_old = None
215
+
216
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
217
+
218
+ self.jac_factor = None
219
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
220
+ if issparse(self.J):
221
+ def lu(A):
222
+ self.nlu += 1
223
+ return splu(A)
224
+
225
+ def solve_lu(LU, b):
226
+ return LU.solve(b)
227
+
228
+ I = eye(self.n, format='csc', dtype=self.y.dtype)
229
+ else:
230
+ def lu(A):
231
+ self.nlu += 1
232
+ return lu_factor(A, overwrite_a=True)
233
+
234
+ def solve_lu(LU, b):
235
+ return lu_solve(LU, b, overwrite_b=True)
236
+
237
+ I = np.identity(self.n, dtype=self.y.dtype)
238
+
239
+ self.lu = lu
240
+ self.solve_lu = solve_lu
241
+ self.I = I
242
+
243
+ kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0])
244
+ self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1))))
245
+ self.alpha = (1 - kappa) * self.gamma
246
+ self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2)
247
+
248
+ D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype)
249
+ D[0] = self.y
250
+ D[1] = f * self.h_abs * self.direction
251
+ self.D = D
252
+
253
+ self.order = 1
254
+ self.n_equal_steps = 0
255
+ self.LU = None
256
+
257
+ def _validate_jac(self, jac, sparsity):
258
+ t0 = self.t
259
+ y0 = self.y
260
+
261
+ if jac is None:
262
+ if sparsity is not None:
263
+ if issparse(sparsity):
264
+ sparsity = csc_matrix(sparsity)
265
+ groups = group_columns(sparsity)
266
+ sparsity = (sparsity, groups)
267
+
268
+ def jac_wrapped(t, y):
269
+ self.njev += 1
270
+ f = self.fun_single(t, y)
271
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
272
+ self.atol, self.jac_factor,
273
+ sparsity)
274
+ return J
275
+ J = jac_wrapped(t0, y0)
276
+ elif callable(jac):
277
+ J = jac(t0, y0)
278
+ self.njev += 1
279
+ if issparse(J):
280
+ J = csc_matrix(J, dtype=y0.dtype)
281
+
282
+ def jac_wrapped(t, y):
283
+ self.njev += 1
284
+ return csc_matrix(jac(t, y), dtype=y0.dtype)
285
+ else:
286
+ J = np.asarray(J, dtype=y0.dtype)
287
+
288
+ def jac_wrapped(t, y):
289
+ self.njev += 1
290
+ return np.asarray(jac(t, y), dtype=y0.dtype)
291
+
292
+ if J.shape != (self.n, self.n):
293
+ raise ValueError("`jac` is expected to have shape {}, but "
294
+ "actually has {}."
295
+ .format((self.n, self.n), J.shape))
296
+ else:
297
+ if issparse(jac):
298
+ J = csc_matrix(jac, dtype=y0.dtype)
299
+ else:
300
+ J = np.asarray(jac, dtype=y0.dtype)
301
+
302
+ if J.shape != (self.n, self.n):
303
+ raise ValueError("`jac` is expected to have shape {}, but "
304
+ "actually has {}."
305
+ .format((self.n, self.n), J.shape))
306
+ jac_wrapped = None
307
+
308
+ return jac_wrapped, J
309
+
310
+ def _step_impl(self):
311
+ t = self.t
312
+ D = self.D
313
+
314
+ max_step = self.max_step
315
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
316
+ if self.h_abs > max_step:
317
+ h_abs = max_step
318
+ change_D(D, self.order, max_step / self.h_abs)
319
+ self.n_equal_steps = 0
320
+ elif self.h_abs < min_step:
321
+ h_abs = min_step
322
+ change_D(D, self.order, min_step / self.h_abs)
323
+ self.n_equal_steps = 0
324
+ else:
325
+ h_abs = self.h_abs
326
+
327
+ atol = self.atol
328
+ rtol = self.rtol
329
+ order = self.order
330
+
331
+ alpha = self.alpha
332
+ gamma = self.gamma
333
+ error_const = self.error_const
334
+
335
+ J = self.J
336
+ LU = self.LU
337
+ current_jac = self.jac is None
338
+
339
+ step_accepted = False
340
+ while not step_accepted:
341
+ if h_abs < min_step:
342
+ return False, self.TOO_SMALL_STEP
343
+
344
+ h = h_abs * self.direction
345
+ t_new = t + h
346
+
347
+ if self.direction * (t_new - self.t_bound) > 0:
348
+ t_new = self.t_bound
349
+ change_D(D, order, np.abs(t_new - t) / h_abs)
350
+ self.n_equal_steps = 0
351
+ LU = None
352
+
353
+ h = t_new - t
354
+ h_abs = np.abs(h)
355
+
356
+ y_predict = np.sum(D[:order + 1], axis=0)
357
+
358
+ scale = atol + rtol * np.abs(y_predict)
359
+ psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order]
360
+
361
+ converged = False
362
+ c = h / alpha[order]
363
+ while not converged:
364
+ if LU is None:
365
+ LU = self.lu(self.I - c * J)
366
+
367
+ converged, n_iter, y_new, d = solve_bdf_system(
368
+ self.fun, t_new, y_predict, c, psi, LU, self.solve_lu,
369
+ scale, self.newton_tol)
370
+
371
+ if not converged:
372
+ if current_jac:
373
+ break
374
+ J = self.jac(t_new, y_predict)
375
+ LU = None
376
+ current_jac = True
377
+
378
+ if not converged:
379
+ factor = 0.5
380
+ h_abs *= factor
381
+ change_D(D, order, factor)
382
+ self.n_equal_steps = 0
383
+ LU = None
384
+ continue
385
+
386
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
387
+ + n_iter)
388
+
389
+ scale = atol + rtol * np.abs(y_new)
390
+ error = error_const[order] * d
391
+ error_norm = norm(error / scale)
392
+
393
+ if error_norm > 1:
394
+ factor = max(MIN_FACTOR,
395
+ safety * error_norm ** (-1 / (order + 1)))
396
+ h_abs *= factor
397
+ change_D(D, order, factor)
398
+ self.n_equal_steps = 0
399
+ # As we didn't have problems with convergence, we don't
400
+ # reset LU here.
401
+ else:
402
+ step_accepted = True
403
+
404
+ self.n_equal_steps += 1
405
+
406
+ self.t = t_new
407
+ self.y = y_new
408
+
409
+ self.h_abs = h_abs
410
+ self.J = J
411
+ self.LU = LU
412
+
413
+ # Update differences. The principal relation here is
414
+ # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D
415
+ # contained difference for previous interpolating polynomial and
416
+ # d = D^{k + 1} y_n. Thus this elegant code follows.
417
+ D[order + 2] = d - D[order + 1]
418
+ D[order + 1] = d
419
+ for i in reversed(range(order + 1)):
420
+ D[i] += D[i + 1]
421
+
422
+ if self.n_equal_steps < order + 1:
423
+ return True, None
424
+
425
+ if order > 1:
426
+ error_m = error_const[order - 1] * D[order]
427
+ error_m_norm = norm(error_m / scale)
428
+ else:
429
+ error_m_norm = np.inf
430
+
431
+ if order < MAX_ORDER:
432
+ error_p = error_const[order + 1] * D[order + 2]
433
+ error_p_norm = norm(error_p / scale)
434
+ else:
435
+ error_p_norm = np.inf
436
+
437
+ error_norms = np.array([error_m_norm, error_norm, error_p_norm])
438
+ with np.errstate(divide='ignore'):
439
+ factors = error_norms ** (-1 / np.arange(order, order + 3))
440
+
441
+ delta_order = np.argmax(factors) - 1
442
+ order += delta_order
443
+ self.order = order
444
+
445
+ factor = min(MAX_FACTOR, safety * np.max(factors))
446
+ self.h_abs *= factor
447
+ change_D(D, order, factor)
448
+ self.n_equal_steps = 0
449
+ self.LU = None
450
+
451
+ return True, None
452
+
453
+ def _dense_output_impl(self):
454
+ return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction,
455
+ self.order, self.D[:self.order + 1].copy())
456
+
457
+
458
+ class BdfDenseOutput(DenseOutput):
459
+ def __init__(self, t_old, t, h, order, D):
460
+ super().__init__(t_old, t)
461
+ self.order = order
462
+ self.t_shift = self.t - h * np.arange(self.order)
463
+ self.denom = h * (1 + np.arange(self.order))
464
+ self.D = D
465
+
466
+ def _call_impl(self, t):
467
+ if t.ndim == 0:
468
+ x = (t - self.t_shift) / self.denom
469
+ p = np.cumprod(x)
470
+ else:
471
+ x = (t - self.t_shift[:, None]) / self.denom[:, None]
472
+ p = np.cumprod(x, axis=0)
473
+
474
+ y = np.dot(self.D[1:].T, p)
475
+ if y.ndim == 1:
476
+ y += self.D[0]
477
+ else:
478
+ y += self.D[0, :, None]
479
+
480
+ return y
parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py ADDED
@@ -0,0 +1,193 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+
3
+ N_STAGES = 12
4
+ N_STAGES_EXTENDED = 16
5
+ INTERPOLATOR_POWER = 7
6
+
7
+ C = np.array([0.0,
8
+ 0.526001519587677318785587544488e-01,
9
+ 0.789002279381515978178381316732e-01,
10
+ 0.118350341907227396726757197510,
11
+ 0.281649658092772603273242802490,
12
+ 0.333333333333333333333333333333,
13
+ 0.25,
14
+ 0.307692307692307692307692307692,
15
+ 0.651282051282051282051282051282,
16
+ 0.6,
17
+ 0.857142857142857142857142857142,
18
+ 1.0,
19
+ 1.0,
20
+ 0.1,
21
+ 0.2,
22
+ 0.777777777777777777777777777778])
23
+
24
+ A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED))
25
+ A[1, 0] = 5.26001519587677318785587544488e-2
26
+
27
+ A[2, 0] = 1.97250569845378994544595329183e-2
28
+ A[2, 1] = 5.91751709536136983633785987549e-2
29
+
30
+ A[3, 0] = 2.95875854768068491816892993775e-2
31
+ A[3, 2] = 8.87627564304205475450678981324e-2
32
+
33
+ A[4, 0] = 2.41365134159266685502369798665e-1
34
+ A[4, 2] = -8.84549479328286085344864962717e-1
35
+ A[4, 3] = 9.24834003261792003115737966543e-1
36
+
37
+ A[5, 0] = 3.7037037037037037037037037037e-2
38
+ A[5, 3] = 1.70828608729473871279604482173e-1
39
+ A[5, 4] = 1.25467687566822425016691814123e-1
40
+
41
+ A[6, 0] = 3.7109375e-2
42
+ A[6, 3] = 1.70252211019544039314978060272e-1
43
+ A[6, 4] = 6.02165389804559606850219397283e-2
44
+ A[6, 5] = -1.7578125e-2
45
+
46
+ A[7, 0] = 3.70920001185047927108779319836e-2
47
+ A[7, 3] = 1.70383925712239993810214054705e-1
48
+ A[7, 4] = 1.07262030446373284651809199168e-1
49
+ A[7, 5] = -1.53194377486244017527936158236e-2
50
+ A[7, 6] = 8.27378916381402288758473766002e-3
51
+
52
+ A[8, 0] = 6.24110958716075717114429577812e-1
53
+ A[8, 3] = -3.36089262944694129406857109825
54
+ A[8, 4] = -8.68219346841726006818189891453e-1
55
+ A[8, 5] = 2.75920996994467083049415600797e1
56
+ A[8, 6] = 2.01540675504778934086186788979e1
57
+ A[8, 7] = -4.34898841810699588477366255144e1
58
+
59
+ A[9, 0] = 4.77662536438264365890433908527e-1
60
+ A[9, 3] = -2.48811461997166764192642586468
61
+ A[9, 4] = -5.90290826836842996371446475743e-1
62
+ A[9, 5] = 2.12300514481811942347288949897e1
63
+ A[9, 6] = 1.52792336328824235832596922938e1
64
+ A[9, 7] = -3.32882109689848629194453265587e1
65
+ A[9, 8] = -2.03312017085086261358222928593e-2
66
+
67
+ A[10, 0] = -9.3714243008598732571704021658e-1
68
+ A[10, 3] = 5.18637242884406370830023853209
69
+ A[10, 4] = 1.09143734899672957818500254654
70
+ A[10, 5] = -8.14978701074692612513997267357
71
+ A[10, 6] = -1.85200656599969598641566180701e1
72
+ A[10, 7] = 2.27394870993505042818970056734e1
73
+ A[10, 8] = 2.49360555267965238987089396762
74
+ A[10, 9] = -3.0467644718982195003823669022
75
+
76
+ A[11, 0] = 2.27331014751653820792359768449
77
+ A[11, 3] = -1.05344954667372501984066689879e1
78
+ A[11, 4] = -2.00087205822486249909675718444
79
+ A[11, 5] = -1.79589318631187989172765950534e1
80
+ A[11, 6] = 2.79488845294199600508499808837e1
81
+ A[11, 7] = -2.85899827713502369474065508674
82
+ A[11, 8] = -8.87285693353062954433549289258
83
+ A[11, 9] = 1.23605671757943030647266201528e1
84
+ A[11, 10] = 6.43392746015763530355970484046e-1
85
+
86
+ A[12, 0] = 5.42937341165687622380535766363e-2
87
+ A[12, 5] = 4.45031289275240888144113950566
88
+ A[12, 6] = 1.89151789931450038304281599044
89
+ A[12, 7] = -5.8012039600105847814672114227
90
+ A[12, 8] = 3.1116436695781989440891606237e-1
91
+ A[12, 9] = -1.52160949662516078556178806805e-1
92
+ A[12, 10] = 2.01365400804030348374776537501e-1
93
+ A[12, 11] = 4.47106157277725905176885569043e-2
94
+
95
+ A[13, 0] = 5.61675022830479523392909219681e-2
96
+ A[13, 6] = 2.53500210216624811088794765333e-1
97
+ A[13, 7] = -2.46239037470802489917441475441e-1
98
+ A[13, 8] = -1.24191423263816360469010140626e-1
99
+ A[13, 9] = 1.5329179827876569731206322685e-1
100
+ A[13, 10] = 8.20105229563468988491666602057e-3
101
+ A[13, 11] = 7.56789766054569976138603589584e-3
102
+ A[13, 12] = -8.298e-3
103
+
104
+ A[14, 0] = 3.18346481635021405060768473261e-2
105
+ A[14, 5] = 2.83009096723667755288322961402e-2
106
+ A[14, 6] = 5.35419883074385676223797384372e-2
107
+ A[14, 7] = -5.49237485713909884646569340306e-2
108
+ A[14, 10] = -1.08347328697249322858509316994e-4
109
+ A[14, 11] = 3.82571090835658412954920192323e-4
110
+ A[14, 12] = -3.40465008687404560802977114492e-4
111
+ A[14, 13] = 1.41312443674632500278074618366e-1
112
+
113
+ A[15, 0] = -4.28896301583791923408573538692e-1
114
+ A[15, 5] = -4.69762141536116384314449447206
115
+ A[15, 6] = 7.68342119606259904184240953878
116
+ A[15, 7] = 4.06898981839711007970213554331
117
+ A[15, 8] = 3.56727187455281109270669543021e-1
118
+ A[15, 12] = -1.39902416515901462129418009734e-3
119
+ A[15, 13] = 2.9475147891527723389556272149
120
+ A[15, 14] = -9.15095847217987001081870187138
121
+
122
+
123
+ B = A[N_STAGES, :N_STAGES]
124
+
125
+ E3 = np.zeros(N_STAGES + 1)
126
+ E3[:-1] = B.copy()
127
+ E3[0] -= 0.244094488188976377952755905512
128
+ E3[8] -= 0.733846688281611857341361741547
129
+ E3[11] -= 0.220588235294117647058823529412e-1
130
+
131
+ E5 = np.zeros(N_STAGES + 1)
132
+ E5[0] = 0.1312004499419488073250102996e-1
133
+ E5[5] = -0.1225156446376204440720569753e+1
134
+ E5[6] = -0.4957589496572501915214079952
135
+ E5[7] = 0.1664377182454986536961530415e+1
136
+ E5[8] = -0.3503288487499736816886487290
137
+ E5[9] = 0.3341791187130174790297318841
138
+ E5[10] = 0.8192320648511571246570742613e-1
139
+ E5[11] = -0.2235530786388629525884427845e-1
140
+
141
+ # First 3 coefficients are computed separately.
142
+ D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED))
143
+ D[0, 0] = -0.84289382761090128651353491142e+1
144
+ D[0, 5] = 0.56671495351937776962531783590
145
+ D[0, 6] = -0.30689499459498916912797304727e+1
146
+ D[0, 7] = 0.23846676565120698287728149680e+1
147
+ D[0, 8] = 0.21170345824450282767155149946e+1
148
+ D[0, 9] = -0.87139158377797299206789907490
149
+ D[0, 10] = 0.22404374302607882758541771650e+1
150
+ D[0, 11] = 0.63157877876946881815570249290
151
+ D[0, 12] = -0.88990336451333310820698117400e-1
152
+ D[0, 13] = 0.18148505520854727256656404962e+2
153
+ D[0, 14] = -0.91946323924783554000451984436e+1
154
+ D[0, 15] = -0.44360363875948939664310572000e+1
155
+
156
+ D[1, 0] = 0.10427508642579134603413151009e+2
157
+ D[1, 5] = 0.24228349177525818288430175319e+3
158
+ D[1, 6] = 0.16520045171727028198505394887e+3
159
+ D[1, 7] = -0.37454675472269020279518312152e+3
160
+ D[1, 8] = -0.22113666853125306036270938578e+2
161
+ D[1, 9] = 0.77334326684722638389603898808e+1
162
+ D[1, 10] = -0.30674084731089398182061213626e+2
163
+ D[1, 11] = -0.93321305264302278729567221706e+1
164
+ D[1, 12] = 0.15697238121770843886131091075e+2
165
+ D[1, 13] = -0.31139403219565177677282850411e+2
166
+ D[1, 14] = -0.93529243588444783865713862664e+1
167
+ D[1, 15] = 0.35816841486394083752465898540e+2
168
+
169
+ D[2, 0] = 0.19985053242002433820987653617e+2
170
+ D[2, 5] = -0.38703730874935176555105901742e+3
171
+ D[2, 6] = -0.18917813819516756882830838328e+3
172
+ D[2, 7] = 0.52780815920542364900561016686e+3
173
+ D[2, 8] = -0.11573902539959630126141871134e+2
174
+ D[2, 9] = 0.68812326946963000169666922661e+1
175
+ D[2, 10] = -0.10006050966910838403183860980e+1
176
+ D[2, 11] = 0.77771377980534432092869265740
177
+ D[2, 12] = -0.27782057523535084065932004339e+1
178
+ D[2, 13] = -0.60196695231264120758267380846e+2
179
+ D[2, 14] = 0.84320405506677161018159903784e+2
180
+ D[2, 15] = 0.11992291136182789328035130030e+2
181
+
182
+ D[3, 0] = -0.25693933462703749003312586129e+2
183
+ D[3, 5] = -0.15418974869023643374053993627e+3
184
+ D[3, 6] = -0.23152937917604549567536039109e+3
185
+ D[3, 7] = 0.35763911791061412378285349910e+3
186
+ D[3, 8] = 0.93405324183624310003907691704e+2
187
+ D[3, 9] = -0.37458323136451633156875139351e+2
188
+ D[3, 10] = 0.10409964950896230045147246184e+3
189
+ D[3, 11] = 0.29840293426660503123344363579e+2
190
+ D[3, 12] = -0.43533456590011143754432175058e+2
191
+ D[3, 13] = 0.96324553959188282948394950600e+2
192
+ D[3, 14] = -0.39177261675615439165231486172e+2
193
+ D[3, 15] = -0.14972683625798562581422125276e+3
parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py ADDED
@@ -0,0 +1,574 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.linalg import lu_factor, lu_solve
3
+ from scipy.sparse import csc_matrix, issparse, eye
4
+ from scipy.sparse.linalg import splu
5
+ from scipy.optimize._numdiff import group_columns
6
+ from .common import (validate_max_step, validate_tol, select_initial_step,
7
+ norm, num_jac, EPS, warn_extraneous,
8
+ validate_first_step)
9
+ from .base import OdeSolver, DenseOutput
10
+
11
+ S6 = 6 ** 0.5
12
+
13
+ # Butcher tableau. A is not used directly, see below.
14
+ C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1])
15
+ E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3
16
+
17
+ # Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue
18
+ # and a complex conjugate pair. They are written below.
19
+ MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3)
20
+ MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3))
21
+ - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6)))
22
+
23
+ # These are transformation matrices.
24
+ T = np.array([
25
+ [0.09443876248897524, -0.14125529502095421, 0.03002919410514742],
26
+ [0.25021312296533332, 0.20412935229379994, -0.38294211275726192],
27
+ [1, 1, 0]])
28
+ TI = np.array([
29
+ [4.17871859155190428, 0.32768282076106237, 0.52337644549944951],
30
+ [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044],
31
+ [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]])
32
+ # These linear combinations are used in the algorithm.
33
+ TI_REAL = TI[0]
34
+ TI_COMPLEX = TI[1] + 1j * TI[2]
35
+
36
+ # Interpolator coefficients.
37
+ P = np.array([
38
+ [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6],
39
+ [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6],
40
+ [1/3, -8/3, 10/3]])
41
+
42
+
43
+ NEWTON_MAXITER = 6 # Maximum number of Newton iterations.
44
+ MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size.
45
+ MAX_FACTOR = 10 # Maximum allowed increase in a step size.
46
+
47
+
48
+ def solve_collocation_system(fun, t, y, h, Z0, scale, tol,
49
+ LU_real, LU_complex, solve_lu):
50
+ """Solve the collocation system.
51
+
52
+ Parameters
53
+ ----------
54
+ fun : callable
55
+ Right-hand side of the system.
56
+ t : float
57
+ Current time.
58
+ y : ndarray, shape (n,)
59
+ Current state.
60
+ h : float
61
+ Step to try.
62
+ Z0 : ndarray, shape (3, n)
63
+ Initial guess for the solution. It determines new values of `y` at
64
+ ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants.
65
+ scale : ndarray, shape (n)
66
+ Problem tolerance scale, i.e. ``rtol * abs(y) + atol``.
67
+ tol : float
68
+ Tolerance to which solve the system. This value is compared with
69
+ the normalized by `scale` error.
70
+ LU_real, LU_complex
71
+ LU decompositions of the system Jacobians.
72
+ solve_lu : callable
73
+ Callable which solves a linear system given a LU decomposition. The
74
+ signature is ``solve_lu(LU, b)``.
75
+
76
+ Returns
77
+ -------
78
+ converged : bool
79
+ Whether iterations converged.
80
+ n_iter : int
81
+ Number of completed iterations.
82
+ Z : ndarray, shape (3, n)
83
+ Found solution.
84
+ rate : float
85
+ The rate of convergence.
86
+ """
87
+ n = y.shape[0]
88
+ M_real = MU_REAL / h
89
+ M_complex = MU_COMPLEX / h
90
+
91
+ W = TI.dot(Z0)
92
+ Z = Z0
93
+
94
+ F = np.empty((3, n))
95
+ ch = h * C
96
+
97
+ dW_norm_old = None
98
+ dW = np.empty_like(W)
99
+ converged = False
100
+ rate = None
101
+ for k in range(NEWTON_MAXITER):
102
+ for i in range(3):
103
+ F[i] = fun(t + ch[i], y + Z[i])
104
+
105
+ if not np.all(np.isfinite(F)):
106
+ break
107
+
108
+ f_real = F.T.dot(TI_REAL) - M_real * W[0]
109
+ f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2])
110
+
111
+ dW_real = solve_lu(LU_real, f_real)
112
+ dW_complex = solve_lu(LU_complex, f_complex)
113
+
114
+ dW[0] = dW_real
115
+ dW[1] = dW_complex.real
116
+ dW[2] = dW_complex.imag
117
+
118
+ dW_norm = norm(dW / scale)
119
+ if dW_norm_old is not None:
120
+ rate = dW_norm / dW_norm_old
121
+
122
+ if (rate is not None and (rate >= 1 or
123
+ rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)):
124
+ break
125
+
126
+ W += dW
127
+ Z = T.dot(W)
128
+
129
+ if (dW_norm == 0 or
130
+ rate is not None and rate / (1 - rate) * dW_norm < tol):
131
+ converged = True
132
+ break
133
+
134
+ dW_norm_old = dW_norm
135
+
136
+ return converged, k + 1, Z, rate
137
+
138
+
139
+ def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old):
140
+ """Predict by which factor to increase/decrease the step size.
141
+
142
+ The algorithm is described in [1]_.
143
+
144
+ Parameters
145
+ ----------
146
+ h_abs, h_abs_old : float
147
+ Current and previous values of the step size, `h_abs_old` can be None
148
+ (see Notes).
149
+ error_norm, error_norm_old : float
150
+ Current and previous values of the error norm, `error_norm_old` can
151
+ be None (see Notes).
152
+
153
+ Returns
154
+ -------
155
+ factor : float
156
+ Predicted factor.
157
+
158
+ Notes
159
+ -----
160
+ If `h_abs_old` and `error_norm_old` are both not None then a two-step
161
+ algorithm is used, otherwise a one-step algorithm is used.
162
+
163
+ References
164
+ ----------
165
+ .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
166
+ Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8.
167
+ """
168
+ if error_norm_old is None or h_abs_old is None or error_norm == 0:
169
+ multiplier = 1
170
+ else:
171
+ multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25
172
+
173
+ with np.errstate(divide='ignore'):
174
+ factor = min(1, multiplier) * error_norm ** -0.25
175
+
176
+ return factor
177
+
178
+
179
+ class Radau(OdeSolver):
180
+ """Implicit Runge-Kutta method of Radau IIA family of order 5.
181
+
182
+ The implementation follows [1]_. The error is controlled with a
183
+ third-order accurate embedded formula. A cubic polynomial which satisfies
184
+ the collocation conditions is used for the dense output.
185
+
186
+ Parameters
187
+ ----------
188
+ fun : callable
189
+ Right-hand side of the system: the time derivative of the state ``y``
190
+ at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a
191
+ scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must
192
+ return an array of the same shape as ``y``. See `vectorized` for more
193
+ information.
194
+ t0 : float
195
+ Initial time.
196
+ y0 : array_like, shape (n,)
197
+ Initial state.
198
+ t_bound : float
199
+ Boundary time - the integration won't continue beyond it. It also
200
+ determines the direction of the integration.
201
+ first_step : float or None, optional
202
+ Initial step size. Default is ``None`` which means that the algorithm
203
+ should choose.
204
+ max_step : float, optional
205
+ Maximum allowed step size. Default is np.inf, i.e., the step size is not
206
+ bounded and determined solely by the solver.
207
+ rtol, atol : float and array_like, optional
208
+ Relative and absolute tolerances. The solver keeps the local error
209
+ estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a
210
+ relative accuracy (number of correct digits), while `atol` controls
211
+ absolute accuracy (number of correct decimal places). To achieve the
212
+ desired `rtol`, set `atol` to be smaller than the smallest value that
213
+ can be expected from ``rtol * abs(y)`` so that `rtol` dominates the
214
+ allowable error. If `atol` is larger than ``rtol * abs(y)`` the
215
+ number of correct digits is not guaranteed. Conversely, to achieve the
216
+ desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller
217
+ than `atol`. If components of y have different scales, it might be
218
+ beneficial to set different `atol` values for different components by
219
+ passing array_like with shape (n,) for `atol`. Default values are
220
+ 1e-3 for `rtol` and 1e-6 for `atol`.
221
+ jac : {None, array_like, sparse_matrix, callable}, optional
222
+ Jacobian matrix of the right-hand side of the system with respect to
223
+ y, required by this method. The Jacobian matrix has shape (n, n) and
224
+ its element (i, j) is equal to ``d f_i / d y_j``.
225
+ There are three ways to define the Jacobian:
226
+
227
+ * If array_like or sparse_matrix, the Jacobian is assumed to
228
+ be constant.
229
+ * If callable, the Jacobian is assumed to depend on both
230
+ t and y; it will be called as ``jac(t, y)`` as necessary.
231
+ For the 'Radau' and 'BDF' methods, the return value might be a
232
+ sparse matrix.
233
+ * If None (default), the Jacobian will be approximated by
234
+ finite differences.
235
+
236
+ It is generally recommended to provide the Jacobian rather than
237
+ relying on a finite-difference approximation.
238
+ jac_sparsity : {None, array_like, sparse matrix}, optional
239
+ Defines a sparsity structure of the Jacobian matrix for a
240
+ finite-difference approximation. Its shape must be (n, n). This argument
241
+ is ignored if `jac` is not `None`. If the Jacobian has only few non-zero
242
+ elements in *each* row, providing the sparsity structure will greatly
243
+ speed up the computations [2]_. A zero entry means that a corresponding
244
+ element in the Jacobian is always zero. If None (default), the Jacobian
245
+ is assumed to be dense.
246
+ vectorized : bool, optional
247
+ Whether `fun` can be called in a vectorized fashion. Default is False.
248
+
249
+ If ``vectorized`` is False, `fun` will always be called with ``y`` of
250
+ shape ``(n,)``, where ``n = len(y0)``.
251
+
252
+ If ``vectorized`` is True, `fun` may be called with ``y`` of shape
253
+ ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave
254
+ such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of
255
+ the returned array is the time derivative of the state corresponding
256
+ with a column of ``y``).
257
+
258
+ Setting ``vectorized=True`` allows for faster finite difference
259
+ approximation of the Jacobian by this method, but may result in slower
260
+ execution overall in some circumstances (e.g. small ``len(y0)``).
261
+
262
+ Attributes
263
+ ----------
264
+ n : int
265
+ Number of equations.
266
+ status : string
267
+ Current status of the solver: 'running', 'finished' or 'failed'.
268
+ t_bound : float
269
+ Boundary time.
270
+ direction : float
271
+ Integration direction: +1 or -1.
272
+ t : float
273
+ Current time.
274
+ y : ndarray
275
+ Current state.
276
+ t_old : float
277
+ Previous time. None if no steps were made yet.
278
+ step_size : float
279
+ Size of the last successful step. None if no steps were made yet.
280
+ nfev : int
281
+ Number of evaluations of the right-hand side.
282
+ njev : int
283
+ Number of evaluations of the Jacobian.
284
+ nlu : int
285
+ Number of LU decompositions.
286
+
287
+ References
288
+ ----------
289
+ .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II:
290
+ Stiff and Differential-Algebraic Problems", Sec. IV.8.
291
+ .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of
292
+ sparse Jacobian matrices", Journal of the Institute of Mathematics
293
+ and its Applications, 13, pp. 117-120, 1974.
294
+ """
295
+ def __init__(self, fun, t0, y0, t_bound, max_step=np.inf,
296
+ rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None,
297
+ vectorized=False, first_step=None, **extraneous):
298
+ warn_extraneous(extraneous)
299
+ super().__init__(fun, t0, y0, t_bound, vectorized)
300
+ self.y_old = None
301
+ self.max_step = validate_max_step(max_step)
302
+ self.rtol, self.atol = validate_tol(rtol, atol, self.n)
303
+ self.f = self.fun(self.t, self.y)
304
+ # Select initial step assuming the same order which is used to control
305
+ # the error.
306
+ if first_step is None:
307
+ self.h_abs = select_initial_step(
308
+ self.fun, self.t, self.y, t_bound, max_step, self.f, self.direction,
309
+ 3, self.rtol, self.atol)
310
+ else:
311
+ self.h_abs = validate_first_step(first_step, t0, t_bound)
312
+ self.h_abs_old = None
313
+ self.error_norm_old = None
314
+
315
+ self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5))
316
+ self.sol = None
317
+
318
+ self.jac_factor = None
319
+ self.jac, self.J = self._validate_jac(jac, jac_sparsity)
320
+ if issparse(self.J):
321
+ def lu(A):
322
+ self.nlu += 1
323
+ return splu(A)
324
+
325
+ def solve_lu(LU, b):
326
+ return LU.solve(b)
327
+
328
+ I = eye(self.n, format='csc')
329
+ else:
330
+ def lu(A):
331
+ self.nlu += 1
332
+ return lu_factor(A, overwrite_a=True)
333
+
334
+ def solve_lu(LU, b):
335
+ return lu_solve(LU, b, overwrite_b=True)
336
+
337
+ I = np.identity(self.n)
338
+
339
+ self.lu = lu
340
+ self.solve_lu = solve_lu
341
+ self.I = I
342
+
343
+ self.current_jac = True
344
+ self.LU_real = None
345
+ self.LU_complex = None
346
+ self.Z = None
347
+
348
+ def _validate_jac(self, jac, sparsity):
349
+ t0 = self.t
350
+ y0 = self.y
351
+
352
+ if jac is None:
353
+ if sparsity is not None:
354
+ if issparse(sparsity):
355
+ sparsity = csc_matrix(sparsity)
356
+ groups = group_columns(sparsity)
357
+ sparsity = (sparsity, groups)
358
+
359
+ def jac_wrapped(t, y, f):
360
+ self.njev += 1
361
+ J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f,
362
+ self.atol, self.jac_factor,
363
+ sparsity)
364
+ return J
365
+ J = jac_wrapped(t0, y0, self.f)
366
+ elif callable(jac):
367
+ J = jac(t0, y0)
368
+ self.njev = 1
369
+ if issparse(J):
370
+ J = csc_matrix(J)
371
+
372
+ def jac_wrapped(t, y, _=None):
373
+ self.njev += 1
374
+ return csc_matrix(jac(t, y), dtype=float)
375
+
376
+ else:
377
+ J = np.asarray(J, dtype=float)
378
+
379
+ def jac_wrapped(t, y, _=None):
380
+ self.njev += 1
381
+ return np.asarray(jac(t, y), dtype=float)
382
+
383
+ if J.shape != (self.n, self.n):
384
+ raise ValueError("`jac` is expected to have shape {}, but "
385
+ "actually has {}."
386
+ .format((self.n, self.n), J.shape))
387
+ else:
388
+ if issparse(jac):
389
+ J = csc_matrix(jac)
390
+ else:
391
+ J = np.asarray(jac, dtype=float)
392
+
393
+ if J.shape != (self.n, self.n):
394
+ raise ValueError("`jac` is expected to have shape {}, but "
395
+ "actually has {}."
396
+ .format((self.n, self.n), J.shape))
397
+ jac_wrapped = None
398
+
399
+ return jac_wrapped, J
400
+
401
+ def _step_impl(self):
402
+ t = self.t
403
+ y = self.y
404
+ f = self.f
405
+
406
+ max_step = self.max_step
407
+ atol = self.atol
408
+ rtol = self.rtol
409
+
410
+ min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t)
411
+ if self.h_abs > max_step:
412
+ h_abs = max_step
413
+ h_abs_old = None
414
+ error_norm_old = None
415
+ elif self.h_abs < min_step:
416
+ h_abs = min_step
417
+ h_abs_old = None
418
+ error_norm_old = None
419
+ else:
420
+ h_abs = self.h_abs
421
+ h_abs_old = self.h_abs_old
422
+ error_norm_old = self.error_norm_old
423
+
424
+ J = self.J
425
+ LU_real = self.LU_real
426
+ LU_complex = self.LU_complex
427
+
428
+ current_jac = self.current_jac
429
+ jac = self.jac
430
+
431
+ rejected = False
432
+ step_accepted = False
433
+ message = None
434
+ while not step_accepted:
435
+ if h_abs < min_step:
436
+ return False, self.TOO_SMALL_STEP
437
+
438
+ h = h_abs * self.direction
439
+ t_new = t + h
440
+
441
+ if self.direction * (t_new - self.t_bound) > 0:
442
+ t_new = self.t_bound
443
+
444
+ h = t_new - t
445
+ h_abs = np.abs(h)
446
+
447
+ if self.sol is None:
448
+ Z0 = np.zeros((3, y.shape[0]))
449
+ else:
450
+ Z0 = self.sol(t + h * C).T - y
451
+
452
+ scale = atol + np.abs(y) * rtol
453
+
454
+ converged = False
455
+ while not converged:
456
+ if LU_real is None or LU_complex is None:
457
+ LU_real = self.lu(MU_REAL / h * self.I - J)
458
+ LU_complex = self.lu(MU_COMPLEX / h * self.I - J)
459
+
460
+ converged, n_iter, Z, rate = solve_collocation_system(
461
+ self.fun, t, y, h, Z0, scale, self.newton_tol,
462
+ LU_real, LU_complex, self.solve_lu)
463
+
464
+ if not converged:
465
+ if current_jac:
466
+ break
467
+
468
+ J = self.jac(t, y, f)
469
+ current_jac = True
470
+ LU_real = None
471
+ LU_complex = None
472
+
473
+ if not converged:
474
+ h_abs *= 0.5
475
+ LU_real = None
476
+ LU_complex = None
477
+ continue
478
+
479
+ y_new = y + Z[-1]
480
+ ZE = Z.T.dot(E) / h
481
+ error = self.solve_lu(LU_real, f + ZE)
482
+ scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol
483
+ error_norm = norm(error / scale)
484
+ safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER
485
+ + n_iter)
486
+
487
+ if rejected and error_norm > 1:
488
+ error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE)
489
+ error_norm = norm(error / scale)
490
+
491
+ if error_norm > 1:
492
+ factor = predict_factor(h_abs, h_abs_old,
493
+ error_norm, error_norm_old)
494
+ h_abs *= max(MIN_FACTOR, safety * factor)
495
+
496
+ LU_real = None
497
+ LU_complex = None
498
+ rejected = True
499
+ else:
500
+ step_accepted = True
501
+
502
+ recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3
503
+
504
+ factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old)
505
+ factor = min(MAX_FACTOR, safety * factor)
506
+
507
+ if not recompute_jac and factor < 1.2:
508
+ factor = 1
509
+ else:
510
+ LU_real = None
511
+ LU_complex = None
512
+
513
+ f_new = self.fun(t_new, y_new)
514
+ if recompute_jac:
515
+ J = jac(t_new, y_new, f_new)
516
+ current_jac = True
517
+ elif jac is not None:
518
+ current_jac = False
519
+
520
+ self.h_abs_old = self.h_abs
521
+ self.error_norm_old = error_norm
522
+
523
+ self.h_abs = h_abs * factor
524
+
525
+ self.y_old = y
526
+
527
+ self.t = t_new
528
+ self.y = y_new
529
+ self.f = f_new
530
+
531
+ self.Z = Z
532
+
533
+ self.LU_real = LU_real
534
+ self.LU_complex = LU_complex
535
+ self.current_jac = current_jac
536
+ self.J = J
537
+
538
+ self.t_old = t
539
+ self.sol = self._compute_dense_output()
540
+
541
+ return step_accepted, message
542
+
543
+ def _compute_dense_output(self):
544
+ Q = np.dot(self.Z.T, P)
545
+ return RadauDenseOutput(self.t_old, self.t, self.y_old, Q)
546
+
547
+ def _dense_output_impl(self):
548
+ return self.sol
549
+
550
+
551
+ class RadauDenseOutput(DenseOutput):
552
+ def __init__(self, t_old, t, y_old, Q):
553
+ super().__init__(t_old, t)
554
+ self.h = t - t_old
555
+ self.Q = Q
556
+ self.order = Q.shape[1] - 1
557
+ self.y_old = y_old
558
+
559
+ def _call_impl(self, t):
560
+ x = (t - self.t_old) / self.h
561
+ if t.ndim == 0:
562
+ p = np.tile(x, self.order + 1)
563
+ p = np.cumprod(p)
564
+ else:
565
+ p = np.tile(x, (self.order + 1, 1))
566
+ p = np.cumprod(p, axis=0)
567
+ # Here we don't multiply by h, not a mistake.
568
+ y = np.dot(self.Q, p)
569
+ if y.ndim == 2:
570
+ y += self.y_old[:, None]
571
+ else:
572
+ y += self.y_old
573
+
574
+ return y
parrot/lib/python3.10/site-packages/scipy/integrate/_ode.py ADDED
@@ -0,0 +1,1376 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Authors: Pearu Peterson, Pauli Virtanen, John Travers
2
+ """
3
+ First-order ODE integrators.
4
+
5
+ User-friendly interface to various numerical integrators for solving a
6
+ system of first order ODEs with prescribed initial conditions::
7
+
8
+ d y(t)[i]
9
+ --------- = f(t,y(t))[i],
10
+ d t
11
+
12
+ y(t=0)[i] = y0[i],
13
+
14
+ where::
15
+
16
+ i = 0, ..., len(y0) - 1
17
+
18
+ class ode
19
+ ---------
20
+
21
+ A generic interface class to numeric integrators. It has the following
22
+ methods::
23
+
24
+ integrator = ode(f, jac=None)
25
+ integrator = integrator.set_integrator(name, **params)
26
+ integrator = integrator.set_initial_value(y0, t0=0.0)
27
+ integrator = integrator.set_f_params(*args)
28
+ integrator = integrator.set_jac_params(*args)
29
+ y1 = integrator.integrate(t1, step=False, relax=False)
30
+ flag = integrator.successful()
31
+
32
+ class complex_ode
33
+ -----------------
34
+
35
+ This class has the same generic interface as ode, except it can handle complex
36
+ f, y and Jacobians by transparently translating them into the equivalent
37
+ real-valued system. It supports the real-valued solvers (i.e., not zvode) and is
38
+ an alternative to ode with the zvode solver, sometimes performing better.
39
+ """
40
+ # XXX: Integrators must have:
41
+ # ===========================
42
+ # cvode - C version of vode and vodpk with many improvements.
43
+ # Get it from http://www.netlib.org/ode/cvode.tar.gz.
44
+ # To wrap cvode to Python, one must write the extension module by
45
+ # hand. Its interface is too much 'advanced C' that using f2py
46
+ # would be too complicated (or impossible).
47
+ #
48
+ # How to define a new integrator:
49
+ # ===============================
50
+ #
51
+ # class myodeint(IntegratorBase):
52
+ #
53
+ # runner = <odeint function> or None
54
+ #
55
+ # def __init__(self,...): # required
56
+ # <initialize>
57
+ #
58
+ # def reset(self,n,has_jac): # optional
59
+ # # n - the size of the problem (number of equations)
60
+ # # has_jac - whether user has supplied its own routine for Jacobian
61
+ # <allocate memory,initialize further>
62
+ #
63
+ # def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required
64
+ # # this method is called to integrate from t=t0 to t=t1
65
+ # # with initial condition y0. f and jac are user-supplied functions
66
+ # # that define the problem. f_params,jac_params are additional
67
+ # # arguments
68
+ # # to these functions.
69
+ # <calculate y1>
70
+ # if <calculation was unsuccessful>:
71
+ # self.success = 0
72
+ # return t1,y1
73
+ #
74
+ # # In addition, one can define step() and run_relax() methods (they
75
+ # # take the same arguments as run()) if the integrator can support
76
+ # # these features (see IntegratorBase doc strings).
77
+ #
78
+ # if myodeint.runner:
79
+ # IntegratorBase.integrator_classes.append(myodeint)
80
+
81
+ __all__ = ['ode', 'complex_ode']
82
+
83
+ import re
84
+ import warnings
85
+
86
+ from numpy import asarray, array, zeros, isscalar, real, imag, vstack
87
+
88
+ from . import _vode
89
+ from . import _dop
90
+ from . import _lsoda
91
+
92
+
93
+ _dop_int_dtype = _dop.types.intvar.dtype
94
+ _vode_int_dtype = _vode.types.intvar.dtype
95
+ _lsoda_int_dtype = _lsoda.types.intvar.dtype
96
+
97
+
98
+ # ------------------------------------------------------------------------------
99
+ # User interface
100
+ # ------------------------------------------------------------------------------
101
+
102
+
103
+ class ode:
104
+ """
105
+ A generic interface class to numeric integrators.
106
+
107
+ Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``.
108
+
109
+ *Note*: The first two arguments of ``f(t, y, ...)`` are in the
110
+ opposite order of the arguments in the system definition function used
111
+ by `scipy.integrate.odeint`.
112
+
113
+ Parameters
114
+ ----------
115
+ f : callable ``f(t, y, *f_args)``
116
+ Right-hand side of the differential equation. t is a scalar,
117
+ ``y.shape == (n,)``.
118
+ ``f_args`` is set by calling ``set_f_params(*args)``.
119
+ `f` should return a scalar, array or list (not a tuple).
120
+ jac : callable ``jac(t, y, *jac_args)``, optional
121
+ Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``.
122
+ ``jac_args`` is set by calling ``set_jac_params(*args)``.
123
+
124
+ Attributes
125
+ ----------
126
+ t : float
127
+ Current time.
128
+ y : ndarray
129
+ Current variable values.
130
+
131
+ See also
132
+ --------
133
+ odeint : an integrator with a simpler interface based on lsoda from ODEPACK
134
+ quad : for finding the area under a curve
135
+
136
+ Notes
137
+ -----
138
+ Available integrators are listed below. They can be selected using
139
+ the `set_integrator` method.
140
+
141
+ "vode"
142
+
143
+ Real-valued Variable-coefficient Ordinary Differential Equation
144
+ solver, with fixed-leading-coefficient implementation. It provides
145
+ implicit Adams method (for non-stiff problems) and a method based on
146
+ backward differentiation formulas (BDF) (for stiff problems).
147
+
148
+ Source: http://www.netlib.org/ode/vode.f
149
+
150
+ .. warning::
151
+
152
+ This integrator is not re-entrant. You cannot have two `ode`
153
+ instances using the "vode" integrator at the same time.
154
+
155
+ This integrator accepts the following parameters in `set_integrator`
156
+ method of the `ode` class:
157
+
158
+ - atol : float or sequence
159
+ absolute tolerance for solution
160
+ - rtol : float or sequence
161
+ relative tolerance for solution
162
+ - lband : None or int
163
+ - uband : None or int
164
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
165
+ Setting these requires your jac routine to return the jacobian
166
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The
167
+ dimension of the matrix must be (lband+uband+1, len(y)).
168
+ - method: 'adams' or 'bdf'
169
+ Which solver to use, Adams (non-stiff) or BDF (stiff)
170
+ - with_jacobian : bool
171
+ This option is only considered when the user has not supplied a
172
+ Jacobian function and has not indicated (by setting either band)
173
+ that the Jacobian is banded. In this case, `with_jacobian` specifies
174
+ whether the iteration method of the ODE solver's correction step is
175
+ chord iteration with an internally generated full Jacobian or
176
+ functional iteration with no Jacobian.
177
+ - nsteps : int
178
+ Maximum number of (internally defined) steps allowed during one
179
+ call to the solver.
180
+ - first_step : float
181
+ - min_step : float
182
+ - max_step : float
183
+ Limits for the step sizes used by the integrator.
184
+ - order : int
185
+ Maximum order used by the integrator,
186
+ order <= 12 for Adams, <= 5 for BDF.
187
+
188
+ "zvode"
189
+
190
+ Complex-valued Variable-coefficient Ordinary Differential Equation
191
+ solver, with fixed-leading-coefficient implementation. It provides
192
+ implicit Adams method (for non-stiff problems) and a method based on
193
+ backward differentiation formulas (BDF) (for stiff problems).
194
+
195
+ Source: http://www.netlib.org/ode/zvode.f
196
+
197
+ .. warning::
198
+
199
+ This integrator is not re-entrant. You cannot have two `ode`
200
+ instances using the "zvode" integrator at the same time.
201
+
202
+ This integrator accepts the same parameters in `set_integrator`
203
+ as the "vode" solver.
204
+
205
+ .. note::
206
+
207
+ When using ZVODE for a stiff system, it should only be used for
208
+ the case in which the function f is analytic, that is, when each f(i)
209
+ is an analytic function of each y(j). Analyticity means that the
210
+ partial derivative df(i)/dy(j) is a unique complex number, and this
211
+ fact is critical in the way ZVODE solves the dense or banded linear
212
+ systems that arise in the stiff case. For a complex stiff ODE system
213
+ in which f is not analytic, ZVODE is likely to have convergence
214
+ failures, and for this problem one should instead use DVODE on the
215
+ equivalent real system (in the real and imaginary parts of y).
216
+
217
+ "lsoda"
218
+
219
+ Real-valued Variable-coefficient Ordinary Differential Equation
220
+ solver, with fixed-leading-coefficient implementation. It provides
221
+ automatic method switching between implicit Adams method (for non-stiff
222
+ problems) and a method based on backward differentiation formulas (BDF)
223
+ (for stiff problems).
224
+
225
+ Source: http://www.netlib.org/odepack
226
+
227
+ .. warning::
228
+
229
+ This integrator is not re-entrant. You cannot have two `ode`
230
+ instances using the "lsoda" integrator at the same time.
231
+
232
+ This integrator accepts the following parameters in `set_integrator`
233
+ method of the `ode` class:
234
+
235
+ - atol : float or sequence
236
+ absolute tolerance for solution
237
+ - rtol : float or sequence
238
+ relative tolerance for solution
239
+ - lband : None or int
240
+ - uband : None or int
241
+ Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband.
242
+ Setting these requires your jac routine to return the jacobian
243
+ in packed format, jac_packed[i-j+uband, j] = jac[i,j].
244
+ - with_jacobian : bool
245
+ *Not used.*
246
+ - nsteps : int
247
+ Maximum number of (internally defined) steps allowed during one
248
+ call to the solver.
249
+ - first_step : float
250
+ - min_step : float
251
+ - max_step : float
252
+ Limits for the step sizes used by the integrator.
253
+ - max_order_ns : int
254
+ Maximum order used in the nonstiff case (default 12).
255
+ - max_order_s : int
256
+ Maximum order used in the stiff case (default 5).
257
+ - max_hnil : int
258
+ Maximum number of messages reporting too small step size (t + h = t)
259
+ (default 0)
260
+ - ixpr : int
261
+ Whether to generate extra printing at method switches (default False).
262
+
263
+ "dopri5"
264
+
265
+ This is an explicit runge-kutta method of order (4)5 due to Dormand &
266
+ Prince (with stepsize control and dense output).
267
+
268
+ Authors:
269
+
270
+ E. Hairer and G. Wanner
271
+ Universite de Geneve, Dept. de Mathematiques
272
+ CH-1211 Geneve 24, Switzerland
273
+ e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch
274
+
275
+ This code is described in [HNW93]_.
276
+
277
+ This integrator accepts the following parameters in set_integrator()
278
+ method of the ode class:
279
+
280
+ - atol : float or sequence
281
+ absolute tolerance for solution
282
+ - rtol : float or sequence
283
+ relative tolerance for solution
284
+ - nsteps : int
285
+ Maximum number of (internally defined) steps allowed during one
286
+ call to the solver.
287
+ - first_step : float
288
+ - max_step : float
289
+ - safety : float
290
+ Safety factor on new step selection (default 0.9)
291
+ - ifactor : float
292
+ - dfactor : float
293
+ Maximum factor to increase/decrease step size by in one step
294
+ - beta : float
295
+ Beta parameter for stabilised step size control.
296
+ - verbosity : int
297
+ Switch for printing messages (< 0 for no messages).
298
+
299
+ "dop853"
300
+
301
+ This is an explicit runge-kutta method of order 8(5,3) due to Dormand
302
+ & Prince (with stepsize control and dense output).
303
+
304
+ Options and references the same as "dopri5".
305
+
306
+ Examples
307
+ --------
308
+
309
+ A problem to integrate and the corresponding jacobian:
310
+
311
+ >>> from scipy.integrate import ode
312
+ >>>
313
+ >>> y0, t0 = [1.0j, 2.0], 0
314
+ >>>
315
+ >>> def f(t, y, arg1):
316
+ ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2]
317
+ >>> def jac(t, y, arg1):
318
+ ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]]
319
+
320
+ The integration:
321
+
322
+ >>> r = ode(f, jac).set_integrator('zvode', method='bdf')
323
+ >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0)
324
+ >>> t1 = 10
325
+ >>> dt = 1
326
+ >>> while r.successful() and r.t < t1:
327
+ ... print(r.t+dt, r.integrate(r.t+dt))
328
+ 1 [-0.71038232+0.23749653j 0.40000271+0.j ]
329
+ 2.0 [0.19098503-0.52359246j 0.22222356+0.j ]
330
+ 3.0 [0.47153208+0.52701229j 0.15384681+0.j ]
331
+ 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ]
332
+ 5.0 [0.02340997-0.61418799j 0.09523835+0.j ]
333
+ 6.0 [0.58643071+0.339819j 0.08000018+0.j ]
334
+ 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ]
335
+ 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ]
336
+ 9.0 [0.64850462+0.15048982j 0.05405414+0.j ]
337
+ 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ]
338
+
339
+ References
340
+ ----------
341
+ .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary
342
+ Differential Equations i. Nonstiff Problems. 2nd edition.
343
+ Springer Series in Computational Mathematics,
344
+ Springer-Verlag (1993)
345
+
346
+ """
347
+
348
+ def __init__(self, f, jac=None):
349
+ self.stiff = 0
350
+ self.f = f
351
+ self.jac = jac
352
+ self.f_params = ()
353
+ self.jac_params = ()
354
+ self._y = []
355
+
356
+ @property
357
+ def y(self):
358
+ return self._y
359
+
360
+ def set_initial_value(self, y, t=0.0):
361
+ """Set initial conditions y(t) = y."""
362
+ if isscalar(y):
363
+ y = [y]
364
+ n_prev = len(self._y)
365
+ if not n_prev:
366
+ self.set_integrator('') # find first available integrator
367
+ self._y = asarray(y, self._integrator.scalar)
368
+ self.t = t
369
+ self._integrator.reset(len(self._y), self.jac is not None)
370
+ return self
371
+
372
+ def set_integrator(self, name, **integrator_params):
373
+ """
374
+ Set integrator by name.
375
+
376
+ Parameters
377
+ ----------
378
+ name : str
379
+ Name of the integrator.
380
+ **integrator_params
381
+ Additional parameters for the integrator.
382
+ """
383
+ integrator = find_integrator(name)
384
+ if integrator is None:
385
+ # FIXME: this really should be raise an exception. Will that break
386
+ # any code?
387
+ message = f'No integrator name match with {name!r} or is not available.'
388
+ warnings.warn(message, stacklevel=2)
389
+ else:
390
+ self._integrator = integrator(**integrator_params)
391
+ if not len(self._y):
392
+ self.t = 0.0
393
+ self._y = array([0.0], self._integrator.scalar)
394
+ self._integrator.reset(len(self._y), self.jac is not None)
395
+ return self
396
+
397
+ def integrate(self, t, step=False, relax=False):
398
+ """Find y=y(t), set y as an initial condition, and return y.
399
+
400
+ Parameters
401
+ ----------
402
+ t : float
403
+ The endpoint of the integration step.
404
+ step : bool
405
+ If True, and if the integrator supports the step method,
406
+ then perform a single integration step and return.
407
+ This parameter is provided in order to expose internals of
408
+ the implementation, and should not be changed from its default
409
+ value in most cases.
410
+ relax : bool
411
+ If True and if the integrator supports the run_relax method,
412
+ then integrate until t_1 >= t and return. ``relax`` is not
413
+ referenced if ``step=True``.
414
+ This parameter is provided in order to expose internals of
415
+ the implementation, and should not be changed from its default
416
+ value in most cases.
417
+
418
+ Returns
419
+ -------
420
+ y : float
421
+ The integrated value at t
422
+ """
423
+ if step and self._integrator.supports_step:
424
+ mth = self._integrator.step
425
+ elif relax and self._integrator.supports_run_relax:
426
+ mth = self._integrator.run_relax
427
+ else:
428
+ mth = self._integrator.run
429
+
430
+ try:
431
+ self._y, self.t = mth(self.f, self.jac or (lambda: None),
432
+ self._y, self.t, t,
433
+ self.f_params, self.jac_params)
434
+ except SystemError as e:
435
+ # f2py issue with tuple returns, see ticket 1187.
436
+ raise ValueError(
437
+ 'Function to integrate must not return a tuple.'
438
+ ) from e
439
+
440
+ return self._y
441
+
442
+ def successful(self):
443
+ """Check if integration was successful."""
444
+ try:
445
+ self._integrator
446
+ except AttributeError:
447
+ self.set_integrator('')
448
+ return self._integrator.success == 1
449
+
450
+ def get_return_code(self):
451
+ """Extracts the return code for the integration to enable better control
452
+ if the integration fails.
453
+
454
+ In general, a return code > 0 implies success, while a return code < 0
455
+ implies failure.
456
+
457
+ Notes
458
+ -----
459
+ This section describes possible return codes and their meaning, for available
460
+ integrators that can be selected by `set_integrator` method.
461
+
462
+ "vode"
463
+
464
+ =========== =======
465
+ Return Code Message
466
+ =========== =======
467
+ 2 Integration successful.
468
+ -1 Excess work done on this call. (Perhaps wrong MF.)
469
+ -2 Excess accuracy requested. (Tolerances too small.)
470
+ -3 Illegal input detected. (See printed message.)
471
+ -4 Repeated error test failures. (Check all input.)
472
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
473
+ supplied or wrong choice of MF or tolerances.)
474
+ -6 Error weight became zero during problem. (Solution
475
+ component i vanished, and ATOL or ATOL(i) = 0.)
476
+ =========== =======
477
+
478
+ "zvode"
479
+
480
+ =========== =======
481
+ Return Code Message
482
+ =========== =======
483
+ 2 Integration successful.
484
+ -1 Excess work done on this call. (Perhaps wrong MF.)
485
+ -2 Excess accuracy requested. (Tolerances too small.)
486
+ -3 Illegal input detected. (See printed message.)
487
+ -4 Repeated error test failures. (Check all input.)
488
+ -5 Repeated convergence failures. (Perhaps bad Jacobian
489
+ supplied or wrong choice of MF or tolerances.)
490
+ -6 Error weight became zero during problem. (Solution
491
+ component i vanished, and ATOL or ATOL(i) = 0.)
492
+ =========== =======
493
+
494
+ "dopri5"
495
+
496
+ =========== =======
497
+ Return Code Message
498
+ =========== =======
499
+ 1 Integration successful.
500
+ 2 Integration successful (interrupted by solout).
501
+ -1 Input is not consistent.
502
+ -2 Larger nsteps is needed.
503
+ -3 Step size becomes too small.
504
+ -4 Problem is probably stiff (interrupted).
505
+ =========== =======
506
+
507
+ "dop853"
508
+
509
+ =========== =======
510
+ Return Code Message
511
+ =========== =======
512
+ 1 Integration successful.
513
+ 2 Integration successful (interrupted by solout).
514
+ -1 Input is not consistent.
515
+ -2 Larger nsteps is needed.
516
+ -3 Step size becomes too small.
517
+ -4 Problem is probably stiff (interrupted).
518
+ =========== =======
519
+
520
+ "lsoda"
521
+
522
+ =========== =======
523
+ Return Code Message
524
+ =========== =======
525
+ 2 Integration successful.
526
+ -1 Excess work done on this call (perhaps wrong Dfun type).
527
+ -2 Excess accuracy requested (tolerances too small).
528
+ -3 Illegal input detected (internal error).
529
+ -4 Repeated error test failures (internal error).
530
+ -5 Repeated convergence failures (perhaps bad Jacobian or tolerances).
531
+ -6 Error weight became zero during problem.
532
+ -7 Internal workspace insufficient to finish (internal error).
533
+ =========== =======
534
+ """
535
+ try:
536
+ self._integrator
537
+ except AttributeError:
538
+ self.set_integrator('')
539
+ return self._integrator.istate
540
+
541
+ def set_f_params(self, *args):
542
+ """Set extra parameters for user-supplied function f."""
543
+ self.f_params = args
544
+ return self
545
+
546
+ def set_jac_params(self, *args):
547
+ """Set extra parameters for user-supplied function jac."""
548
+ self.jac_params = args
549
+ return self
550
+
551
+ def set_solout(self, solout):
552
+ """
553
+ Set callable to be called at every successful integration step.
554
+
555
+ Parameters
556
+ ----------
557
+ solout : callable
558
+ ``solout(t, y)`` is called at each internal integrator step,
559
+ t is a scalar providing the current independent position
560
+ y is the current solution ``y.shape == (n,)``
561
+ solout should return -1 to stop integration
562
+ otherwise it should return None or 0
563
+
564
+ """
565
+ if self._integrator.supports_solout:
566
+ self._integrator.set_solout(solout)
567
+ if self._y is not None:
568
+ self._integrator.reset(len(self._y), self.jac is not None)
569
+ else:
570
+ raise ValueError("selected integrator does not support solout,"
571
+ " choose another one")
572
+
573
+
574
+ def _transform_banded_jac(bjac):
575
+ """
576
+ Convert a real matrix of the form (for example)
577
+
578
+ [0 0 A B] [0 0 0 B]
579
+ [0 0 C D] [0 0 A D]
580
+ [E F G H] to [0 F C H]
581
+ [I J K L] [E J G L]
582
+ [I 0 K 0]
583
+
584
+ That is, every other column is shifted up one.
585
+ """
586
+ # Shift every other column.
587
+ newjac = zeros((bjac.shape[0] + 1, bjac.shape[1]))
588
+ newjac[1:, ::2] = bjac[:, ::2]
589
+ newjac[:-1, 1::2] = bjac[:, 1::2]
590
+ return newjac
591
+
592
+
593
+ class complex_ode(ode):
594
+ """
595
+ A wrapper of ode for complex systems.
596
+
597
+ This functions similarly as `ode`, but re-maps a complex-valued
598
+ equation system to a real-valued one before using the integrators.
599
+
600
+ Parameters
601
+ ----------
602
+ f : callable ``f(t, y, *f_args)``
603
+ Rhs of the equation. t is a scalar, ``y.shape == (n,)``.
604
+ ``f_args`` is set by calling ``set_f_params(*args)``.
605
+ jac : callable ``jac(t, y, *jac_args)``
606
+ Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``.
607
+ ``jac_args`` is set by calling ``set_f_params(*args)``.
608
+
609
+ Attributes
610
+ ----------
611
+ t : float
612
+ Current time.
613
+ y : ndarray
614
+ Current variable values.
615
+
616
+ Examples
617
+ --------
618
+ For usage examples, see `ode`.
619
+
620
+ """
621
+
622
+ def __init__(self, f, jac=None):
623
+ self.cf = f
624
+ self.cjac = jac
625
+ if jac is None:
626
+ ode.__init__(self, self._wrap, None)
627
+ else:
628
+ ode.__init__(self, self._wrap, self._wrap_jac)
629
+
630
+ def _wrap(self, t, y, *f_args):
631
+ f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args))
632
+ # self.tmp is a real-valued array containing the interleaved
633
+ # real and imaginary parts of f.
634
+ self.tmp[::2] = real(f)
635
+ self.tmp[1::2] = imag(f)
636
+ return self.tmp
637
+
638
+ def _wrap_jac(self, t, y, *jac_args):
639
+ # jac is the complex Jacobian computed by the user-defined function.
640
+ jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args))
641
+
642
+ # jac_tmp is the real version of the complex Jacobian. Each complex
643
+ # entry in jac, say 2+3j, becomes a 2x2 block of the form
644
+ # [2 -3]
645
+ # [3 2]
646
+ jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1]))
647
+ jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac)
648
+ jac_tmp[1::2, ::2] = imag(jac)
649
+ jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2]
650
+
651
+ ml = getattr(self._integrator, 'ml', None)
652
+ mu = getattr(self._integrator, 'mu', None)
653
+ if ml is not None or mu is not None:
654
+ # Jacobian is banded. The user's Jacobian function has computed
655
+ # the complex Jacobian in packed format. The corresponding
656
+ # real-valued version has every other column shifted up.
657
+ jac_tmp = _transform_banded_jac(jac_tmp)
658
+
659
+ return jac_tmp
660
+
661
+ @property
662
+ def y(self):
663
+ return self._y[::2] + 1j * self._y[1::2]
664
+
665
+ def set_integrator(self, name, **integrator_params):
666
+ """
667
+ Set integrator by name.
668
+
669
+ Parameters
670
+ ----------
671
+ name : str
672
+ Name of the integrator
673
+ **integrator_params
674
+ Additional parameters for the integrator.
675
+ """
676
+ if name == 'zvode':
677
+ raise ValueError("zvode must be used with ode, not complex_ode")
678
+
679
+ lband = integrator_params.get('lband')
680
+ uband = integrator_params.get('uband')
681
+ if lband is not None or uband is not None:
682
+ # The Jacobian is banded. Override the user-supplied bandwidths
683
+ # (which are for the complex Jacobian) with the bandwidths of
684
+ # the corresponding real-valued Jacobian wrapper of the complex
685
+ # Jacobian.
686
+ integrator_params['lband'] = 2 * (lband or 0) + 1
687
+ integrator_params['uband'] = 2 * (uband or 0) + 1
688
+
689
+ return ode.set_integrator(self, name, **integrator_params)
690
+
691
+ def set_initial_value(self, y, t=0.0):
692
+ """Set initial conditions y(t) = y."""
693
+ y = asarray(y)
694
+ self.tmp = zeros(y.size * 2, 'float')
695
+ self.tmp[::2] = real(y)
696
+ self.tmp[1::2] = imag(y)
697
+ return ode.set_initial_value(self, self.tmp, t)
698
+
699
+ def integrate(self, t, step=False, relax=False):
700
+ """Find y=y(t), set y as an initial condition, and return y.
701
+
702
+ Parameters
703
+ ----------
704
+ t : float
705
+ The endpoint of the integration step.
706
+ step : bool
707
+ If True, and if the integrator supports the step method,
708
+ then perform a single integration step and return.
709
+ This parameter is provided in order to expose internals of
710
+ the implementation, and should not be changed from its default
711
+ value in most cases.
712
+ relax : bool
713
+ If True and if the integrator supports the run_relax method,
714
+ then integrate until t_1 >= t and return. ``relax`` is not
715
+ referenced if ``step=True``.
716
+ This parameter is provided in order to expose internals of
717
+ the implementation, and should not be changed from its default
718
+ value in most cases.
719
+
720
+ Returns
721
+ -------
722
+ y : float
723
+ The integrated value at t
724
+ """
725
+ y = ode.integrate(self, t, step, relax)
726
+ return y[::2] + 1j * y[1::2]
727
+
728
+ def set_solout(self, solout):
729
+ """
730
+ Set callable to be called at every successful integration step.
731
+
732
+ Parameters
733
+ ----------
734
+ solout : callable
735
+ ``solout(t, y)`` is called at each internal integrator step,
736
+ t is a scalar providing the current independent position
737
+ y is the current solution ``y.shape == (n,)``
738
+ solout should return -1 to stop integration
739
+ otherwise it should return None or 0
740
+
741
+ """
742
+ if self._integrator.supports_solout:
743
+ self._integrator.set_solout(solout, complex=True)
744
+ else:
745
+ raise TypeError("selected integrator does not support solouta, "
746
+ "choose another one")
747
+
748
+
749
+ # ------------------------------------------------------------------------------
750
+ # ODE integrators
751
+ # ------------------------------------------------------------------------------
752
+
753
+ def find_integrator(name):
754
+ for cl in IntegratorBase.integrator_classes:
755
+ if re.match(name, cl.__name__, re.I):
756
+ return cl
757
+ return None
758
+
759
+
760
+ class IntegratorConcurrencyError(RuntimeError):
761
+ """
762
+ Failure due to concurrent usage of an integrator that can be used
763
+ only for a single problem at a time.
764
+
765
+ """
766
+
767
+ def __init__(self, name):
768
+ msg = ("Integrator `%s` can be used to solve only a single problem "
769
+ "at a time. If you want to integrate multiple problems, "
770
+ "consider using a different integrator "
771
+ "(see `ode.set_integrator`)") % name
772
+ RuntimeError.__init__(self, msg)
773
+
774
+
775
+ class IntegratorBase:
776
+ runner = None # runner is None => integrator is not available
777
+ success = None # success==1 if integrator was called successfully
778
+ istate = None # istate > 0 means success, istate < 0 means failure
779
+ supports_run_relax = None
780
+ supports_step = None
781
+ supports_solout = False
782
+ integrator_classes = []
783
+ scalar = float
784
+
785
+ def acquire_new_handle(self):
786
+ # Some of the integrators have internal state (ancient
787
+ # Fortran...), and so only one instance can use them at a time.
788
+ # We keep track of this, and fail when concurrent usage is tried.
789
+ self.__class__.active_global_handle += 1
790
+ self.handle = self.__class__.active_global_handle
791
+
792
+ def check_handle(self):
793
+ if self.handle is not self.__class__.active_global_handle:
794
+ raise IntegratorConcurrencyError(self.__class__.__name__)
795
+
796
+ def reset(self, n, has_jac):
797
+ """Prepare integrator for call: allocate memory, set flags, etc.
798
+ n - number of equations.
799
+ has_jac - if user has supplied function for evaluating Jacobian.
800
+ """
801
+
802
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
803
+ """Integrate from t=t0 to t=t1 using y0 as an initial condition.
804
+ Return 2-tuple (y1,t1) where y1 is the result and t=t1
805
+ defines the stoppage coordinate of the result.
806
+ """
807
+ raise NotImplementedError('all integrators must define '
808
+ 'run(f, jac, t0, t1, y0, f_params, jac_params)')
809
+
810
+ def step(self, f, jac, y0, t0, t1, f_params, jac_params):
811
+ """Make one integration step and return (y1,t1)."""
812
+ raise NotImplementedError('%s does not support step() method' %
813
+ self.__class__.__name__)
814
+
815
+ def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params):
816
+ """Integrate from t=t0 to t>=t1 and return (y1,t)."""
817
+ raise NotImplementedError('%s does not support run_relax() method' %
818
+ self.__class__.__name__)
819
+
820
+ # XXX: __str__ method for getting visual state of the integrator
821
+
822
+
823
+ def _vode_banded_jac_wrapper(jacfunc, ml, jac_params):
824
+ """
825
+ Wrap a banded Jacobian function with a function that pads
826
+ the Jacobian with `ml` rows of zeros.
827
+ """
828
+
829
+ def jac_wrapper(t, y):
830
+ jac = asarray(jacfunc(t, y, *jac_params))
831
+ padded_jac = vstack((jac, zeros((ml, jac.shape[1]))))
832
+ return padded_jac
833
+
834
+ return jac_wrapper
835
+
836
+
837
+ class vode(IntegratorBase):
838
+ runner = getattr(_vode, 'dvode', None)
839
+
840
+ messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)',
841
+ -2: 'Excess accuracy requested. (Tolerances too small.)',
842
+ -3: 'Illegal input detected. (See printed message.)',
843
+ -4: 'Repeated error test failures. (Check all input.)',
844
+ -5: 'Repeated convergence failures. (Perhaps bad'
845
+ ' Jacobian supplied or wrong choice of MF or tolerances.)',
846
+ -6: 'Error weight became zero during problem. (Solution'
847
+ ' component i vanished, and ATOL or ATOL(i) = 0.)'
848
+ }
849
+ supports_run_relax = 1
850
+ supports_step = 1
851
+ active_global_handle = 0
852
+
853
+ def __init__(self,
854
+ method='adams',
855
+ with_jacobian=False,
856
+ rtol=1e-6, atol=1e-12,
857
+ lband=None, uband=None,
858
+ order=12,
859
+ nsteps=500,
860
+ max_step=0.0, # corresponds to infinite
861
+ min_step=0.0,
862
+ first_step=0.0, # determined by solver
863
+ ):
864
+
865
+ if re.match(method, r'adams', re.I):
866
+ self.meth = 1
867
+ elif re.match(method, r'bdf', re.I):
868
+ self.meth = 2
869
+ else:
870
+ raise ValueError('Unknown integration method %s' % method)
871
+ self.with_jacobian = with_jacobian
872
+ self.rtol = rtol
873
+ self.atol = atol
874
+ self.mu = uband
875
+ self.ml = lband
876
+
877
+ self.order = order
878
+ self.nsteps = nsteps
879
+ self.max_step = max_step
880
+ self.min_step = min_step
881
+ self.first_step = first_step
882
+ self.success = 1
883
+
884
+ self.initialized = False
885
+
886
+ def _determine_mf_and_set_bands(self, has_jac):
887
+ """
888
+ Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`.
889
+
890
+ In the Fortran code, the legal values of `MF` are:
891
+ 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25,
892
+ -11, -12, -14, -15, -21, -22, -24, -25
893
+ but this Python wrapper does not use negative values.
894
+
895
+ Returns
896
+
897
+ mf = 10*self.meth + miter
898
+
899
+ self.meth is the linear multistep method:
900
+ self.meth == 1: method="adams"
901
+ self.meth == 2: method="bdf"
902
+
903
+ miter is the correction iteration method:
904
+ miter == 0: Functional iteration; no Jacobian involved.
905
+ miter == 1: Chord iteration with user-supplied full Jacobian.
906
+ miter == 2: Chord iteration with internally computed full Jacobian.
907
+ miter == 3: Chord iteration with internally computed diagonal Jacobian.
908
+ miter == 4: Chord iteration with user-supplied banded Jacobian.
909
+ miter == 5: Chord iteration with internally computed banded Jacobian.
910
+
911
+ Side effects: If either self.mu or self.ml is not None and the other is None,
912
+ then the one that is None is set to 0.
913
+ """
914
+
915
+ jac_is_banded = self.mu is not None or self.ml is not None
916
+ if jac_is_banded:
917
+ if self.mu is None:
918
+ self.mu = 0
919
+ if self.ml is None:
920
+ self.ml = 0
921
+
922
+ # has_jac is True if the user provided a Jacobian function.
923
+ if has_jac:
924
+ if jac_is_banded:
925
+ miter = 4
926
+ else:
927
+ miter = 1
928
+ else:
929
+ if jac_is_banded:
930
+ if self.ml == self.mu == 0:
931
+ miter = 3 # Chord iteration with internal diagonal Jacobian.
932
+ else:
933
+ miter = 5 # Chord iteration with internal banded Jacobian.
934
+ else:
935
+ # self.with_jacobian is set by the user in
936
+ # the call to ode.set_integrator.
937
+ if self.with_jacobian:
938
+ miter = 2 # Chord iteration with internal full Jacobian.
939
+ else:
940
+ miter = 0 # Functional iteration; no Jacobian involved.
941
+
942
+ mf = 10 * self.meth + miter
943
+ return mf
944
+
945
+ def reset(self, n, has_jac):
946
+ mf = self._determine_mf_and_set_bands(has_jac)
947
+
948
+ if mf == 10:
949
+ lrw = 20 + 16 * n
950
+ elif mf in [11, 12]:
951
+ lrw = 22 + 16 * n + 2 * n * n
952
+ elif mf == 13:
953
+ lrw = 22 + 17 * n
954
+ elif mf in [14, 15]:
955
+ lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n
956
+ elif mf == 20:
957
+ lrw = 20 + 9 * n
958
+ elif mf in [21, 22]:
959
+ lrw = 22 + 9 * n + 2 * n * n
960
+ elif mf == 23:
961
+ lrw = 22 + 10 * n
962
+ elif mf in [24, 25]:
963
+ lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n
964
+ else:
965
+ raise ValueError('Unexpected mf=%s' % mf)
966
+
967
+ if mf % 10 in [0, 3]:
968
+ liw = 30
969
+ else:
970
+ liw = 30 + n
971
+
972
+ rwork = zeros((lrw,), float)
973
+ rwork[4] = self.first_step
974
+ rwork[5] = self.max_step
975
+ rwork[6] = self.min_step
976
+ self.rwork = rwork
977
+
978
+ iwork = zeros((liw,), _vode_int_dtype)
979
+ if self.ml is not None:
980
+ iwork[0] = self.ml
981
+ if self.mu is not None:
982
+ iwork[1] = self.mu
983
+ iwork[4] = self.order
984
+ iwork[5] = self.nsteps
985
+ iwork[6] = 2 # mxhnil
986
+ self.iwork = iwork
987
+
988
+ self.call_args = [self.rtol, self.atol, 1, 1,
989
+ self.rwork, self.iwork, mf]
990
+ self.success = 1
991
+ self.initialized = False
992
+
993
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
994
+ if self.initialized:
995
+ self.check_handle()
996
+ else:
997
+ self.initialized = True
998
+ self.acquire_new_handle()
999
+
1000
+ if self.ml is not None and self.ml > 0:
1001
+ # Banded Jacobian. Wrap the user-provided function with one
1002
+ # that pads the Jacobian array with the extra `self.ml` rows
1003
+ # required by the f2py-generated wrapper.
1004
+ jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params)
1005
+
1006
+ args = ((f, jac, y0, t0, t1) + tuple(self.call_args) +
1007
+ (f_params, jac_params))
1008
+ y1, t, istate = self.runner(*args)
1009
+ self.istate = istate
1010
+ if istate < 0:
1011
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1012
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1013
+ self.messages.get(istate, unexpected_istate_msg)),
1014
+ stacklevel=2)
1015
+ self.success = 0
1016
+ else:
1017
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
1018
+ self.istate = 2
1019
+ return y1, t
1020
+
1021
+ def step(self, *args):
1022
+ itask = self.call_args[2]
1023
+ self.call_args[2] = 2
1024
+ r = self.run(*args)
1025
+ self.call_args[2] = itask
1026
+ return r
1027
+
1028
+ def run_relax(self, *args):
1029
+ itask = self.call_args[2]
1030
+ self.call_args[2] = 3
1031
+ r = self.run(*args)
1032
+ self.call_args[2] = itask
1033
+ return r
1034
+
1035
+
1036
+ if vode.runner is not None:
1037
+ IntegratorBase.integrator_classes.append(vode)
1038
+
1039
+
1040
+ class zvode(vode):
1041
+ runner = getattr(_vode, 'zvode', None)
1042
+
1043
+ supports_run_relax = 1
1044
+ supports_step = 1
1045
+ scalar = complex
1046
+ active_global_handle = 0
1047
+
1048
+ def reset(self, n, has_jac):
1049
+ mf = self._determine_mf_and_set_bands(has_jac)
1050
+
1051
+ if mf in (10,):
1052
+ lzw = 15 * n
1053
+ elif mf in (11, 12):
1054
+ lzw = 15 * n + 2 * n ** 2
1055
+ elif mf in (-11, -12):
1056
+ lzw = 15 * n + n ** 2
1057
+ elif mf in (13,):
1058
+ lzw = 16 * n
1059
+ elif mf in (14, 15):
1060
+ lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n
1061
+ elif mf in (-14, -15):
1062
+ lzw = 16 * n + (2 * self.ml + self.mu) * n
1063
+ elif mf in (20,):
1064
+ lzw = 8 * n
1065
+ elif mf in (21, 22):
1066
+ lzw = 8 * n + 2 * n ** 2
1067
+ elif mf in (-21, -22):
1068
+ lzw = 8 * n + n ** 2
1069
+ elif mf in (23,):
1070
+ lzw = 9 * n
1071
+ elif mf in (24, 25):
1072
+ lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n
1073
+ elif mf in (-24, -25):
1074
+ lzw = 9 * n + (2 * self.ml + self.mu) * n
1075
+
1076
+ lrw = 20 + n
1077
+
1078
+ if mf % 10 in (0, 3):
1079
+ liw = 30
1080
+ else:
1081
+ liw = 30 + n
1082
+
1083
+ zwork = zeros((lzw,), complex)
1084
+ self.zwork = zwork
1085
+
1086
+ rwork = zeros((lrw,), float)
1087
+ rwork[4] = self.first_step
1088
+ rwork[5] = self.max_step
1089
+ rwork[6] = self.min_step
1090
+ self.rwork = rwork
1091
+
1092
+ iwork = zeros((liw,), _vode_int_dtype)
1093
+ if self.ml is not None:
1094
+ iwork[0] = self.ml
1095
+ if self.mu is not None:
1096
+ iwork[1] = self.mu
1097
+ iwork[4] = self.order
1098
+ iwork[5] = self.nsteps
1099
+ iwork[6] = 2 # mxhnil
1100
+ self.iwork = iwork
1101
+
1102
+ self.call_args = [self.rtol, self.atol, 1, 1,
1103
+ self.zwork, self.rwork, self.iwork, mf]
1104
+ self.success = 1
1105
+ self.initialized = False
1106
+
1107
+
1108
+ if zvode.runner is not None:
1109
+ IntegratorBase.integrator_classes.append(zvode)
1110
+
1111
+
1112
+ class dopri5(IntegratorBase):
1113
+ runner = getattr(_dop, 'dopri5', None)
1114
+ name = 'dopri5'
1115
+ supports_solout = True
1116
+
1117
+ messages = {1: 'computation successful',
1118
+ 2: 'computation successful (interrupted by solout)',
1119
+ -1: 'input is not consistent',
1120
+ -2: 'larger nsteps is needed',
1121
+ -3: 'step size becomes too small',
1122
+ -4: 'problem is probably stiff (interrupted)',
1123
+ }
1124
+
1125
+ def __init__(self,
1126
+ rtol=1e-6, atol=1e-12,
1127
+ nsteps=500,
1128
+ max_step=0.0,
1129
+ first_step=0.0, # determined by solver
1130
+ safety=0.9,
1131
+ ifactor=10.0,
1132
+ dfactor=0.2,
1133
+ beta=0.0,
1134
+ method=None,
1135
+ verbosity=-1, # no messages if negative
1136
+ ):
1137
+ self.rtol = rtol
1138
+ self.atol = atol
1139
+ self.nsteps = nsteps
1140
+ self.max_step = max_step
1141
+ self.first_step = first_step
1142
+ self.safety = safety
1143
+ self.ifactor = ifactor
1144
+ self.dfactor = dfactor
1145
+ self.beta = beta
1146
+ self.verbosity = verbosity
1147
+ self.success = 1
1148
+ self.set_solout(None)
1149
+
1150
+ def set_solout(self, solout, complex=False):
1151
+ self.solout = solout
1152
+ self.solout_cmplx = complex
1153
+ if solout is None:
1154
+ self.iout = 0
1155
+ else:
1156
+ self.iout = 1
1157
+
1158
+ def reset(self, n, has_jac):
1159
+ work = zeros((8 * n + 21,), float)
1160
+ work[1] = self.safety
1161
+ work[2] = self.dfactor
1162
+ work[3] = self.ifactor
1163
+ work[4] = self.beta
1164
+ work[5] = self.max_step
1165
+ work[6] = self.first_step
1166
+ self.work = work
1167
+ iwork = zeros((21,), _dop_int_dtype)
1168
+ iwork[0] = self.nsteps
1169
+ iwork[2] = self.verbosity
1170
+ self.iwork = iwork
1171
+ self.call_args = [self.rtol, self.atol, self._solout,
1172
+ self.iout, self.work, self.iwork]
1173
+ self.success = 1
1174
+
1175
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
1176
+ x, y, iwork, istate = self.runner(*((f, t0, y0, t1) +
1177
+ tuple(self.call_args) + (f_params,)))
1178
+ self.istate = istate
1179
+ if istate < 0:
1180
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1181
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1182
+ self.messages.get(istate, unexpected_istate_msg)),
1183
+ stacklevel=2)
1184
+ self.success = 0
1185
+ return y, x
1186
+
1187
+ def _solout(self, nr, xold, x, y, nd, icomp, con):
1188
+ if self.solout is not None:
1189
+ if self.solout_cmplx:
1190
+ y = y[::2] + 1j * y[1::2]
1191
+ return self.solout(x, y)
1192
+ else:
1193
+ return 1
1194
+
1195
+
1196
+ if dopri5.runner is not None:
1197
+ IntegratorBase.integrator_classes.append(dopri5)
1198
+
1199
+
1200
+ class dop853(dopri5):
1201
+ runner = getattr(_dop, 'dop853', None)
1202
+ name = 'dop853'
1203
+
1204
+ def __init__(self,
1205
+ rtol=1e-6, atol=1e-12,
1206
+ nsteps=500,
1207
+ max_step=0.0,
1208
+ first_step=0.0, # determined by solver
1209
+ safety=0.9,
1210
+ ifactor=6.0,
1211
+ dfactor=0.3,
1212
+ beta=0.0,
1213
+ method=None,
1214
+ verbosity=-1, # no messages if negative
1215
+ ):
1216
+ super().__init__(rtol, atol, nsteps, max_step, first_step, safety,
1217
+ ifactor, dfactor, beta, method, verbosity)
1218
+
1219
+ def reset(self, n, has_jac):
1220
+ work = zeros((11 * n + 21,), float)
1221
+ work[1] = self.safety
1222
+ work[2] = self.dfactor
1223
+ work[3] = self.ifactor
1224
+ work[4] = self.beta
1225
+ work[5] = self.max_step
1226
+ work[6] = self.first_step
1227
+ self.work = work
1228
+ iwork = zeros((21,), _dop_int_dtype)
1229
+ iwork[0] = self.nsteps
1230
+ iwork[2] = self.verbosity
1231
+ self.iwork = iwork
1232
+ self.call_args = [self.rtol, self.atol, self._solout,
1233
+ self.iout, self.work, self.iwork]
1234
+ self.success = 1
1235
+
1236
+
1237
+ if dop853.runner is not None:
1238
+ IntegratorBase.integrator_classes.append(dop853)
1239
+
1240
+
1241
+ class lsoda(IntegratorBase):
1242
+ runner = getattr(_lsoda, 'lsoda', None)
1243
+ active_global_handle = 0
1244
+
1245
+ messages = {
1246
+ 2: "Integration successful.",
1247
+ -1: "Excess work done on this call (perhaps wrong Dfun type).",
1248
+ -2: "Excess accuracy requested (tolerances too small).",
1249
+ -3: "Illegal input detected (internal error).",
1250
+ -4: "Repeated error test failures (internal error).",
1251
+ -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).",
1252
+ -6: "Error weight became zero during problem.",
1253
+ -7: "Internal workspace insufficient to finish (internal error)."
1254
+ }
1255
+
1256
+ def __init__(self,
1257
+ with_jacobian=False,
1258
+ rtol=1e-6, atol=1e-12,
1259
+ lband=None, uband=None,
1260
+ nsteps=500,
1261
+ max_step=0.0, # corresponds to infinite
1262
+ min_step=0.0,
1263
+ first_step=0.0, # determined by solver
1264
+ ixpr=0,
1265
+ max_hnil=0,
1266
+ max_order_ns=12,
1267
+ max_order_s=5,
1268
+ method=None
1269
+ ):
1270
+
1271
+ self.with_jacobian = with_jacobian
1272
+ self.rtol = rtol
1273
+ self.atol = atol
1274
+ self.mu = uband
1275
+ self.ml = lband
1276
+
1277
+ self.max_order_ns = max_order_ns
1278
+ self.max_order_s = max_order_s
1279
+ self.nsteps = nsteps
1280
+ self.max_step = max_step
1281
+ self.min_step = min_step
1282
+ self.first_step = first_step
1283
+ self.ixpr = ixpr
1284
+ self.max_hnil = max_hnil
1285
+ self.success = 1
1286
+
1287
+ self.initialized = False
1288
+
1289
+ def reset(self, n, has_jac):
1290
+ # Calculate parameters for Fortran subroutine dvode.
1291
+ if has_jac:
1292
+ if self.mu is None and self.ml is None:
1293
+ jt = 1
1294
+ else:
1295
+ if self.mu is None:
1296
+ self.mu = 0
1297
+ if self.ml is None:
1298
+ self.ml = 0
1299
+ jt = 4
1300
+ else:
1301
+ if self.mu is None and self.ml is None:
1302
+ jt = 2
1303
+ else:
1304
+ if self.mu is None:
1305
+ self.mu = 0
1306
+ if self.ml is None:
1307
+ self.ml = 0
1308
+ jt = 5
1309
+ lrn = 20 + (self.max_order_ns + 4) * n
1310
+ if jt in [1, 2]:
1311
+ lrs = 22 + (self.max_order_s + 4) * n + n * n
1312
+ elif jt in [4, 5]:
1313
+ lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n
1314
+ else:
1315
+ raise ValueError('Unexpected jt=%s' % jt)
1316
+ lrw = max(lrn, lrs)
1317
+ liw = 20 + n
1318
+ rwork = zeros((lrw,), float)
1319
+ rwork[4] = self.first_step
1320
+ rwork[5] = self.max_step
1321
+ rwork[6] = self.min_step
1322
+ self.rwork = rwork
1323
+ iwork = zeros((liw,), _lsoda_int_dtype)
1324
+ if self.ml is not None:
1325
+ iwork[0] = self.ml
1326
+ if self.mu is not None:
1327
+ iwork[1] = self.mu
1328
+ iwork[4] = self.ixpr
1329
+ iwork[5] = self.nsteps
1330
+ iwork[6] = self.max_hnil
1331
+ iwork[7] = self.max_order_ns
1332
+ iwork[8] = self.max_order_s
1333
+ self.iwork = iwork
1334
+ self.call_args = [self.rtol, self.atol, 1, 1,
1335
+ self.rwork, self.iwork, jt]
1336
+ self.success = 1
1337
+ self.initialized = False
1338
+
1339
+ def run(self, f, jac, y0, t0, t1, f_params, jac_params):
1340
+ if self.initialized:
1341
+ self.check_handle()
1342
+ else:
1343
+ self.initialized = True
1344
+ self.acquire_new_handle()
1345
+ args = [f, y0, t0, t1] + self.call_args[:-1] + \
1346
+ [jac, self.call_args[-1], f_params, 0, jac_params]
1347
+ y1, t, istate = self.runner(*args)
1348
+ self.istate = istate
1349
+ if istate < 0:
1350
+ unexpected_istate_msg = f'Unexpected istate={istate:d}'
1351
+ warnings.warn('{:s}: {:s}'.format(self.__class__.__name__,
1352
+ self.messages.get(istate, unexpected_istate_msg)),
1353
+ stacklevel=2)
1354
+ self.success = 0
1355
+ else:
1356
+ self.call_args[3] = 2 # upgrade istate from 1 to 2
1357
+ self.istate = 2
1358
+ return y1, t
1359
+
1360
+ def step(self, *args):
1361
+ itask = self.call_args[2]
1362
+ self.call_args[2] = 2
1363
+ r = self.run(*args)
1364
+ self.call_args[2] = itask
1365
+ return r
1366
+
1367
+ def run_relax(self, *args):
1368
+ itask = self.call_args[2]
1369
+ self.call_args[2] = 3
1370
+ r = self.run(*args)
1371
+ self.call_args[2] = itask
1372
+ return r
1373
+
1374
+
1375
+ if lsoda.runner:
1376
+ IntegratorBase.integrator_classes.append(lsoda)
parrot/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py ADDED
@@ -0,0 +1,1279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Author: Travis Oliphant 2001
2
+ # Author: Nathan Woods 2013 (nquad &c)
3
+ import sys
4
+ import warnings
5
+ from functools import partial
6
+
7
+ from . import _quadpack
8
+ import numpy as np
9
+
10
+ __all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"]
11
+
12
+
13
+ class IntegrationWarning(UserWarning):
14
+ """
15
+ Warning on issues during integration.
16
+ """
17
+ pass
18
+
19
+
20
+ def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8,
21
+ limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50,
22
+ limlst=50, complex_func=False):
23
+ """
24
+ Compute a definite integral.
25
+
26
+ Integrate func from `a` to `b` (possibly infinite interval) using a
27
+ technique from the Fortran library QUADPACK.
28
+
29
+ Parameters
30
+ ----------
31
+ func : {function, scipy.LowLevelCallable}
32
+ A Python function or method to integrate. If `func` takes many
33
+ arguments, it is integrated along the axis corresponding to the
34
+ first argument.
35
+
36
+ If the user desires improved integration performance, then `f` may
37
+ be a `scipy.LowLevelCallable` with one of the signatures::
38
+
39
+ double func(double x)
40
+ double func(double x, void *user_data)
41
+ double func(int n, double *xx)
42
+ double func(int n, double *xx, void *user_data)
43
+
44
+ The ``user_data`` is the data contained in the `scipy.LowLevelCallable`.
45
+ In the call forms with ``xx``, ``n`` is the length of the ``xx``
46
+ array which contains ``xx[0] == x`` and the rest of the items are
47
+ numbers contained in the ``args`` argument of quad.
48
+
49
+ In addition, certain ctypes call signatures are supported for
50
+ backward compatibility, but those should not be used in new code.
51
+ a : float
52
+ Lower limit of integration (use -numpy.inf for -infinity).
53
+ b : float
54
+ Upper limit of integration (use numpy.inf for +infinity).
55
+ args : tuple, optional
56
+ Extra arguments to pass to `func`.
57
+ full_output : int, optional
58
+ Non-zero to return a dictionary of integration information.
59
+ If non-zero, warning messages are also suppressed and the
60
+ message is appended to the output tuple.
61
+ complex_func : bool, optional
62
+ Indicate if the function's (`func`) return type is real
63
+ (``complex_func=False``: default) or complex (``complex_func=True``).
64
+ In both cases, the function's argument is real.
65
+ If full_output is also non-zero, the `infodict`, `message`, and
66
+ `explain` for the real and complex components are returned in
67
+ a dictionary with keys "real output" and "imag output".
68
+
69
+ Returns
70
+ -------
71
+ y : float
72
+ The integral of func from `a` to `b`.
73
+ abserr : float
74
+ An estimate of the absolute error in the result.
75
+ infodict : dict
76
+ A dictionary containing additional information.
77
+ message
78
+ A convergence message.
79
+ explain
80
+ Appended only with 'cos' or 'sin' weighting and infinite
81
+ integration limits, it contains an explanation of the codes in
82
+ infodict['ierlst']
83
+
84
+ Other Parameters
85
+ ----------------
86
+ epsabs : float or int, optional
87
+ Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain
88
+ an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
89
+ where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the
90
+ numerical approximation. See `epsrel` below.
91
+ epsrel : float or int, optional
92
+ Relative error tolerance. Default is 1.49e-8.
93
+ If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
94
+ and ``50 * (machine epsilon)``. See `epsabs` above.
95
+ limit : float or int, optional
96
+ An upper bound on the number of subintervals used in the adaptive
97
+ algorithm.
98
+ points : (sequence of floats,ints), optional
99
+ A sequence of break points in the bounded integration interval
100
+ where local difficulties of the integrand may occur (e.g.,
101
+ singularities, discontinuities). The sequence does not have
102
+ to be sorted. Note that this option cannot be used in conjunction
103
+ with ``weight``.
104
+ weight : float or int, optional
105
+ String indicating weighting function. Full explanation for this
106
+ and the remaining arguments can be found below.
107
+ wvar : optional
108
+ Variables for use with weighting functions.
109
+ wopts : optional
110
+ Optional input for reusing Chebyshev moments.
111
+ maxp1 : float or int, optional
112
+ An upper bound on the number of Chebyshev moments.
113
+ limlst : int, optional
114
+ Upper bound on the number of cycles (>=3) for use with a sinusoidal
115
+ weighting and an infinite end-point.
116
+
117
+ See Also
118
+ --------
119
+ dblquad : double integral
120
+ tplquad : triple integral
121
+ nquad : n-dimensional integrals (uses `quad` recursively)
122
+ fixed_quad : fixed-order Gaussian quadrature
123
+ simpson : integrator for sampled data
124
+ romb : integrator for sampled data
125
+ scipy.special : for coefficients and roots of orthogonal polynomials
126
+
127
+ Notes
128
+ -----
129
+ For valid results, the integral must converge; behavior for divergent
130
+ integrals is not guaranteed.
131
+
132
+ **Extra information for quad() inputs and outputs**
133
+
134
+ If full_output is non-zero, then the third output argument
135
+ (infodict) is a dictionary with entries as tabulated below. For
136
+ infinite limits, the range is transformed to (0,1) and the
137
+ optional outputs are given with respect to this transformed range.
138
+ Let M be the input argument limit and let K be infodict['last'].
139
+ The entries are:
140
+
141
+ 'neval'
142
+ The number of function evaluations.
143
+ 'last'
144
+ The number, K, of subintervals produced in the subdivision process.
145
+ 'alist'
146
+ A rank-1 array of length M, the first K elements of which are the
147
+ left end points of the subintervals in the partition of the
148
+ integration range.
149
+ 'blist'
150
+ A rank-1 array of length M, the first K elements of which are the
151
+ right end points of the subintervals.
152
+ 'rlist'
153
+ A rank-1 array of length M, the first K elements of which are the
154
+ integral approximations on the subintervals.
155
+ 'elist'
156
+ A rank-1 array of length M, the first K elements of which are the
157
+ moduli of the absolute error estimates on the subintervals.
158
+ 'iord'
159
+ A rank-1 integer array of length M, the first L elements of
160
+ which are pointers to the error estimates over the subintervals
161
+ with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the
162
+ sequence ``infodict['iord']`` and let E be the sequence
163
+ ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a
164
+ decreasing sequence.
165
+
166
+ If the input argument points is provided (i.e., it is not None),
167
+ the following additional outputs are placed in the output
168
+ dictionary. Assume the points sequence is of length P.
169
+
170
+ 'pts'
171
+ A rank-1 array of length P+2 containing the integration limits
172
+ and the break points of the intervals in ascending order.
173
+ This is an array giving the subintervals over which integration
174
+ will occur.
175
+ 'level'
176
+ A rank-1 integer array of length M (=limit), containing the
177
+ subdivision levels of the subintervals, i.e., if (aa,bb) is a
178
+ subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]``
179
+ are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l
180
+ if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``.
181
+ 'ndin'
182
+ A rank-1 integer array of length P+2. After the first integration
183
+ over the intervals (pts[1], pts[2]), the error estimates over some
184
+ of the intervals may have been increased artificially in order to
185
+ put their subdivision forward. This array has ones in slots
186
+ corresponding to the subintervals for which this happens.
187
+
188
+ **Weighting the integrand**
189
+
190
+ The input variables, *weight* and *wvar*, are used to weight the
191
+ integrand by a select list of functions. Different integration
192
+ methods are used to compute the integral with these weighting
193
+ functions, and these do not support specifying break points. The
194
+ possible values of weight and the corresponding weighting functions are.
195
+
196
+ ========== =================================== =====================
197
+ ``weight`` Weight function used ``wvar``
198
+ ========== =================================== =====================
199
+ 'cos' cos(w*x) wvar = w
200
+ 'sin' sin(w*x) wvar = w
201
+ 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta)
202
+ 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta)
203
+ 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta)
204
+ 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta)
205
+ 'cauchy' 1/(x-c) wvar = c
206
+ ========== =================================== =====================
207
+
208
+ wvar holds the parameter w, (alpha, beta), or c depending on the weight
209
+ selected. In these expressions, a and b are the integration limits.
210
+
211
+ For the 'cos' and 'sin' weighting, additional inputs and outputs are
212
+ available.
213
+
214
+ For finite integration limits, the integration is performed using a
215
+ Clenshaw-Curtis method which uses Chebyshev moments. For repeated
216
+ calculations, these moments are saved in the output dictionary:
217
+
218
+ 'momcom'
219
+ The maximum level of Chebyshev moments that have been computed,
220
+ i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been
221
+ computed for intervals of length ``|b-a| * 2**(-l)``,
222
+ ``l=0,1,...,M_c``.
223
+ 'nnlog'
224
+ A rank-1 integer array of length M(=limit), containing the
225
+ subdivision levels of the subintervals, i.e., an element of this
226
+ array is equal to l if the corresponding subinterval is
227
+ ``|b-a|* 2**(-l)``.
228
+ 'chebmo'
229
+ A rank-2 array of shape (25, maxp1) containing the computed
230
+ Chebyshev moments. These can be passed on to an integration
231
+ over the same interval by passing this array as the second
232
+ element of the sequence wopts and passing infodict['momcom'] as
233
+ the first element.
234
+
235
+ If one of the integration limits is infinite, then a Fourier integral is
236
+ computed (assuming w neq 0). If full_output is 1 and a numerical error
237
+ is encountered, besides the error message attached to the output tuple,
238
+ a dictionary is also appended to the output tuple which translates the
239
+ error codes in the array ``info['ierlst']`` to English messages. The
240
+ output information dictionary contains the following entries instead of
241
+ 'last', 'alist', 'blist', 'rlist', and 'elist':
242
+
243
+ 'lst'
244
+ The number of subintervals needed for the integration (call it ``K_f``).
245
+ 'rslst'
246
+ A rank-1 array of length M_f=limlst, whose first ``K_f`` elements
247
+ contain the integral contribution over the interval
248
+ ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|``
249
+ and ``k=1,2,...,K_f``.
250
+ 'erlst'
251
+ A rank-1 array of length ``M_f`` containing the error estimate
252
+ corresponding to the interval in the same position in
253
+ ``infodict['rslist']``.
254
+ 'ierlst'
255
+ A rank-1 integer array of length ``M_f`` containing an error flag
256
+ corresponding to the interval in the same position in
257
+ ``infodict['rslist']``. See the explanation dictionary (last entry
258
+ in the output tuple) for the meaning of the codes.
259
+
260
+
261
+ **Details of QUADPACK level routines**
262
+
263
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
264
+ provides details on the conditions for each routine to be called and a
265
+ short description of each routine. The routine called depends on
266
+ `weight`, `points` and the integration limits `a` and `b`.
267
+
268
+ ================ ============== ========== =====================
269
+ QUADPACK routine `weight` `points` infinite bounds
270
+ ================ ============== ========== =====================
271
+ qagse None No No
272
+ qagie None No Yes
273
+ qagpe None Yes No
274
+ qawoe 'sin', 'cos' No No
275
+ qawfe 'sin', 'cos' No either `a` or `b`
276
+ qawse 'alg*' No No
277
+ qawce 'cauchy' No No
278
+ ================ ============== ========== =====================
279
+
280
+ The following provides a short description from [1]_ for each
281
+ routine.
282
+
283
+ qagse
284
+ is an integrator based on globally adaptive interval
285
+ subdivision in connection with extrapolation, which will
286
+ eliminate the effects of integrand singularities of
287
+ several types.
288
+ qagie
289
+ handles integration over infinite intervals. The infinite range is
290
+ mapped onto a finite interval and subsequently the same strategy as
291
+ in ``QAGS`` is applied.
292
+ qagpe
293
+ serves the same purposes as QAGS, but also allows the
294
+ user to provide explicit information about the location
295
+ and type of trouble-spots i.e. the abscissae of internal
296
+ singularities, discontinuities and other difficulties of
297
+ the integrand function.
298
+ qawoe
299
+ is an integrator for the evaluation of
300
+ :math:`\\int^b_a \\cos(\\omega x)f(x)dx` or
301
+ :math:`\\int^b_a \\sin(\\omega x)f(x)dx`
302
+ over a finite interval [a,b], where :math:`\\omega` and :math:`f`
303
+ are specified by the user. The rule evaluation component is based
304
+ on the modified Clenshaw-Curtis technique
305
+
306
+ An adaptive subdivision scheme is used in connection
307
+ with an extrapolation procedure, which is a modification
308
+ of that in ``QAGS`` and allows the algorithm to deal with
309
+ singularities in :math:`f(x)`.
310
+ qawfe
311
+ calculates the Fourier transform
312
+ :math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or
313
+ :math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx`
314
+ for user-provided :math:`\\omega` and :math:`f`. The procedure of
315
+ ``QAWO`` is applied on successive finite intervals, and convergence
316
+ acceleration by means of the :math:`\\varepsilon`-algorithm is applied
317
+ to the series of integral approximations.
318
+ qawse
319
+ approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where
320
+ :math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with
321
+ :math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the
322
+ following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`,
323
+ :math:`\\log(x-a)\\log(b-x)`.
324
+
325
+ The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the
326
+ function :math:`v`. A globally adaptive subdivision strategy is
327
+ applied, with modified Clenshaw-Curtis integration on those
328
+ subintervals which contain `a` or `b`.
329
+ qawce
330
+ compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be
331
+ interpreted as a Cauchy principal value integral, for user specified
332
+ :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
333
+ Clenshaw-Curtis integration is used on those intervals containing the
334
+ point :math:`x = c`.
335
+
336
+ **Integration of Complex Function of a Real Variable**
337
+
338
+ A complex valued function, :math:`f`, of a real variable can be written as
339
+ :math:`f = g + ih`. Similarly, the integral of :math:`f` can be
340
+ written as
341
+
342
+ .. math::
343
+ \\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx
344
+
345
+ assuming that the integrals of :math:`g` and :math:`h` exist
346
+ over the interval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates
347
+ complex-valued functions by integrating the real and imaginary components
348
+ separately.
349
+
350
+
351
+ References
352
+ ----------
353
+
354
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
355
+ Überhuber, Christoph W.; Kahaner, David (1983).
356
+ QUADPACK: A subroutine package for automatic integration.
357
+ Springer-Verlag.
358
+ ISBN 978-3-540-12553-2.
359
+
360
+ .. [2] McCullough, Thomas; Phillips, Keith (1973).
361
+ Foundations of Analysis in the Complex Plane.
362
+ Holt Rinehart Winston.
363
+ ISBN 0-03-086370-8
364
+
365
+ Examples
366
+ --------
367
+ Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result
368
+
369
+ >>> from scipy import integrate
370
+ >>> import numpy as np
371
+ >>> x2 = lambda x: x**2
372
+ >>> integrate.quad(x2, 0, 4)
373
+ (21.333333333333332, 2.3684757858670003e-13)
374
+ >>> print(4**3 / 3.) # analytical result
375
+ 21.3333333333
376
+
377
+ Calculate :math:`\\int^\\infty_0 e^{-x} dx`
378
+
379
+ >>> invexp = lambda x: np.exp(-x)
380
+ >>> integrate.quad(invexp, 0, np.inf)
381
+ (1.0, 5.842605999138044e-11)
382
+
383
+ Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3`
384
+
385
+ >>> f = lambda x, a: a*x
386
+ >>> y, err = integrate.quad(f, 0, 1, args=(1,))
387
+ >>> y
388
+ 0.5
389
+ >>> y, err = integrate.quad(f, 0, 1, args=(3,))
390
+ >>> y
391
+ 1.5
392
+
393
+ Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding
394
+ y parameter as 1::
395
+
396
+ testlib.c =>
397
+ double func(int n, double args[n]){
398
+ return args[0]*args[0] + args[1]*args[1];}
399
+ compile to library testlib.*
400
+
401
+ ::
402
+
403
+ from scipy import integrate
404
+ import ctypes
405
+ lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path
406
+ lib.func.restype = ctypes.c_double
407
+ lib.func.argtypes = (ctypes.c_int,ctypes.c_double)
408
+ integrate.quad(lib.func,0,1,(1))
409
+ #(1.3333333333333333, 1.4802973661668752e-14)
410
+ print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result
411
+ # 1.3333333333333333
412
+
413
+ Be aware that pulse shapes and other sharp features as compared to the
414
+ size of the integration interval may not be integrated correctly using
415
+ this method. A simplified example of this limitation is integrating a
416
+ y-axis reflected step function with many zero values within the integrals
417
+ bounds.
418
+
419
+ >>> y = lambda x: 1 if x<=0 else 0
420
+ >>> integrate.quad(y, -1, 1)
421
+ (1.0, 1.1102230246251565e-14)
422
+ >>> integrate.quad(y, -1, 100)
423
+ (1.0000000002199108, 1.0189464580163188e-08)
424
+ >>> integrate.quad(y, -1, 10000)
425
+ (0.0, 0.0)
426
+
427
+ """
428
+ if not isinstance(args, tuple):
429
+ args = (args,)
430
+
431
+ # check the limits of integration: \int_a^b, expect a < b
432
+ flip, a, b = b < a, min(a, b), max(a, b)
433
+
434
+ if complex_func:
435
+ def imfunc(x, *args):
436
+ return func(x, *args).imag
437
+
438
+ def refunc(x, *args):
439
+ return func(x, *args).real
440
+
441
+ re_retval = quad(refunc, a, b, args, full_output, epsabs,
442
+ epsrel, limit, points, weight, wvar, wopts,
443
+ maxp1, limlst, complex_func=False)
444
+ im_retval = quad(imfunc, a, b, args, full_output, epsabs,
445
+ epsrel, limit, points, weight, wvar, wopts,
446
+ maxp1, limlst, complex_func=False)
447
+ integral = re_retval[0] + 1j*im_retval[0]
448
+ error_estimate = re_retval[1] + 1j*im_retval[1]
449
+ retval = integral, error_estimate
450
+ if full_output:
451
+ msgexp = {}
452
+ msgexp["real"] = re_retval[2:]
453
+ msgexp["imag"] = im_retval[2:]
454
+ retval = retval + (msgexp,)
455
+
456
+ return retval
457
+
458
+ if weight is None:
459
+ retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit,
460
+ points)
461
+ else:
462
+ if points is not None:
463
+ msg = ("Break points cannot be specified when using weighted integrand.\n"
464
+ "Continuing, ignoring specified points.")
465
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
466
+ retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
467
+ limlst, limit, maxp1, weight, wvar, wopts)
468
+
469
+ if flip:
470
+ retval = (-retval[0],) + retval[1:]
471
+
472
+ ier = retval[-1]
473
+ if ier == 0:
474
+ return retval[:-1]
475
+
476
+ msgs = {80: "A Python error occurred possibly while calling the function.",
477
+ 1: f"The maximum number of subdivisions ({limit}) has been achieved.\n "
478
+ f"If increasing the limit yields no improvement it is advised to "
479
+ f"analyze \n the integrand in order to determine the difficulties. "
480
+ f"If the position of a \n local difficulty can be determined "
481
+ f"(singularity, discontinuity) one will \n probably gain from "
482
+ f"splitting up the interval and calling the integrator \n on the "
483
+ f"subranges. Perhaps a special-purpose integrator should be used.",
484
+ 2: "The occurrence of roundoff error is detected, which prevents \n "
485
+ "the requested tolerance from being achieved. "
486
+ "The error may be \n underestimated.",
487
+ 3: "Extremely bad integrand behavior occurs at some points of the\n "
488
+ "integration interval.",
489
+ 4: "The algorithm does not converge. Roundoff error is detected\n "
490
+ "in the extrapolation table. It is assumed that the requested "
491
+ "tolerance\n cannot be achieved, and that the returned result "
492
+ "(if full_output = 1) is \n the best which can be obtained.",
493
+ 5: "The integral is probably divergent, or slowly convergent.",
494
+ 6: "The input is invalid.",
495
+ 7: "Abnormal termination of the routine. The estimates for result\n "
496
+ "and error are less reliable. It is assumed that the requested "
497
+ "accuracy\n has not been achieved.",
498
+ 'unknown': "Unknown error."}
499
+
500
+ if weight in ['cos','sin'] and (b == np.inf or a == -np.inf):
501
+ msgs[1] = (
502
+ "The maximum number of cycles allowed has been achieved., e.e.\n of "
503
+ "subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n "
504
+ "*pi/abs(omega), for k = 1, 2, ..., lst. "
505
+ "One can allow more cycles by increasing the value of limlst. "
506
+ "Look at info['ierlst'] with full_output=1."
507
+ )
508
+ msgs[4] = (
509
+ "The extrapolation table constructed for convergence acceleration\n of "
510
+ "the series formed by the integral contributions over the cycles, \n does "
511
+ "not converge to within the requested accuracy. "
512
+ "Look at \n info['ierlst'] with full_output=1."
513
+ )
514
+ msgs[7] = (
515
+ "Bad integrand behavior occurs within one or more of the cycles.\n "
516
+ "Location and type of the difficulty involved can be determined from \n "
517
+ "the vector info['ierlist'] obtained with full_output=1."
518
+ )
519
+ explain = {1: "The maximum number of subdivisions (= limit) has been \n "
520
+ "achieved on this cycle.",
521
+ 2: "The occurrence of roundoff error is detected and prevents\n "
522
+ "the tolerance imposed on this cycle from being achieved.",
523
+ 3: "Extremely bad integrand behavior occurs at some points of\n "
524
+ "this cycle.",
525
+ 4: "The integral over this cycle does not converge (to within the "
526
+ "required accuracy) due to roundoff in the extrapolation "
527
+ "procedure invoked on this cycle. It is assumed that the result "
528
+ "on this interval is the best which can be obtained.",
529
+ 5: "The integral over this cycle is probably divergent or "
530
+ "slowly convergent."}
531
+
532
+ try:
533
+ msg = msgs[ier]
534
+ except KeyError:
535
+ msg = msgs['unknown']
536
+
537
+ if ier in [1,2,3,4,5,7]:
538
+ if full_output:
539
+ if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf):
540
+ return retval[:-1] + (msg, explain)
541
+ else:
542
+ return retval[:-1] + (msg,)
543
+ else:
544
+ warnings.warn(msg, IntegrationWarning, stacklevel=2)
545
+ return retval[:-1]
546
+
547
+ elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6
548
+ if epsabs <= 0: # Small error tolerance - applies to all methods
549
+ if epsrel < max(50 * sys.float_info.epsilon, 5e-29):
550
+ msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both"
551
+ " 5e-29 and 50*(machine epsilon).")
552
+ elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf):
553
+ msg = ("Sine or cosine weighted integrals with infinite domain"
554
+ " must have 'epsabs'>0.")
555
+
556
+ elif weight is None:
557
+ if points is None: # QAGSE/QAGIE
558
+ msg = ("Invalid 'limit' argument. There must be"
559
+ " at least one subinterval")
560
+ else: # QAGPE
561
+ if not (min(a, b) <= min(points) <= max(points) <= max(a, b)):
562
+ msg = ("All break points in 'points' must lie within the"
563
+ " integration limits.")
564
+ elif len(points) >= limit:
565
+ msg = (f"Number of break points ({len(points):d}) "
566
+ f"must be less than subinterval limit ({limit:d})")
567
+
568
+ else:
569
+ if maxp1 < 1:
570
+ msg = "Chebyshev moment limit maxp1 must be >=1."
571
+
572
+ elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE
573
+ msg = "Cycle limit limlst must be >=3."
574
+
575
+ elif weight.startswith('alg'): # QAWSE
576
+ if min(wvar) < -1:
577
+ msg = "wvar parameters (alpha, beta) must both be >= -1."
578
+ if b < a:
579
+ msg = "Integration limits a, b must satistfy a<b."
580
+
581
+ elif weight == 'cauchy' and wvar in (a, b):
582
+ msg = ("Parameter 'wvar' must not equal"
583
+ " integration limits 'a' or 'b'.")
584
+
585
+ raise ValueError(msg)
586
+
587
+
588
+ def _quad(func,a,b,args,full_output,epsabs,epsrel,limit,points):
589
+ infbounds = 0
590
+ if (b != np.inf and a != -np.inf):
591
+ pass # standard integration
592
+ elif (b == np.inf and a != -np.inf):
593
+ infbounds = 1
594
+ bound = a
595
+ elif (b == np.inf and a == -np.inf):
596
+ infbounds = 2
597
+ bound = 0 # ignored
598
+ elif (b != np.inf and a == -np.inf):
599
+ infbounds = -1
600
+ bound = b
601
+ else:
602
+ raise RuntimeError("Infinity comparisons don't work for you.")
603
+
604
+ if points is None:
605
+ if infbounds == 0:
606
+ return _quadpack._qagse(func,a,b,args,full_output,epsabs,epsrel,limit)
607
+ else:
608
+ return _quadpack._qagie(func, bound, infbounds, args, full_output,
609
+ epsabs, epsrel, limit)
610
+ else:
611
+ if infbounds != 0:
612
+ raise ValueError("Infinity inputs cannot be used with break points.")
613
+ else:
614
+ #Duplicates force function evaluation at singular points
615
+ the_points = np.unique(points)
616
+ the_points = the_points[a < the_points]
617
+ the_points = the_points[the_points < b]
618
+ the_points = np.concatenate((the_points, (0., 0.)))
619
+ return _quadpack._qagpe(func, a, b, the_points, args, full_output,
620
+ epsabs, epsrel, limit)
621
+
622
+
623
+ def _quad_weight(func, a, b, args, full_output, epsabs, epsrel,
624
+ limlst, limit, maxp1,weight, wvar, wopts):
625
+ if weight not in ['cos','sin','alg','alg-loga','alg-logb','alg-log','cauchy']:
626
+ raise ValueError("%s not a recognized weighting function." % weight)
627
+
628
+ strdict = {'cos':1,'sin':2,'alg':1,'alg-loga':2,'alg-logb':3,'alg-log':4}
629
+
630
+ if weight in ['cos','sin']:
631
+ integr = strdict[weight]
632
+ if (b != np.inf and a != -np.inf): # finite limits
633
+ if wopts is None: # no precomputed Chebyshev moments
634
+ return _quadpack._qawoe(func, a, b, wvar, integr, args, full_output,
635
+ epsabs, epsrel, limit, maxp1,1)
636
+ else: # precomputed Chebyshev moments
637
+ momcom = wopts[0]
638
+ chebcom = wopts[1]
639
+ return _quadpack._qawoe(func, a, b, wvar, integr, args,
640
+ full_output,epsabs, epsrel, limit, maxp1, 2,
641
+ momcom, chebcom)
642
+
643
+ elif (b == np.inf and a != -np.inf):
644
+ return _quadpack._qawfe(func, a, wvar, integr, args, full_output,
645
+ epsabs, limlst, limit, maxp1)
646
+ elif (b != np.inf and a == -np.inf): # remap function and interval
647
+ if weight == 'cos':
648
+ def thefunc(x,*myargs):
649
+ y = -x
650
+ func = myargs[0]
651
+ myargs = (y,) + myargs[1:]
652
+ return func(*myargs)
653
+ else:
654
+ def thefunc(x,*myargs):
655
+ y = -x
656
+ func = myargs[0]
657
+ myargs = (y,) + myargs[1:]
658
+ return -func(*myargs)
659
+ args = (func,) + args
660
+ return _quadpack._qawfe(thefunc, -b, wvar, integr, args,
661
+ full_output, epsabs, limlst, limit, maxp1)
662
+ else:
663
+ raise ValueError("Cannot integrate with this weight from -Inf to +Inf.")
664
+ else:
665
+ if a in [-np.inf, np.inf] or b in [-np.inf, np.inf]:
666
+ message = "Cannot integrate with this weight over an infinite interval."
667
+ raise ValueError(message)
668
+
669
+ if weight.startswith('alg'):
670
+ integr = strdict[weight]
671
+ return _quadpack._qawse(func, a, b, wvar, integr, args,
672
+ full_output, epsabs, epsrel, limit)
673
+ else: # weight == 'cauchy'
674
+ return _quadpack._qawce(func, a, b, wvar, args, full_output,
675
+ epsabs, epsrel, limit)
676
+
677
+
678
+ def dblquad(func, a, b, gfun, hfun, args=(), epsabs=1.49e-8, epsrel=1.49e-8):
679
+ """
680
+ Compute a double integral.
681
+
682
+ Return the double (definite) integral of ``func(y, x)`` from ``x = a..b``
683
+ and ``y = gfun(x)..hfun(x)``.
684
+
685
+ Parameters
686
+ ----------
687
+ func : callable
688
+ A Python function or method of at least two variables: y must be the
689
+ first argument and x the second argument.
690
+ a, b : float
691
+ The limits of integration in x: `a` < `b`
692
+ gfun : callable or float
693
+ The lower boundary curve in y which is a function taking a single
694
+ floating point argument (x) and returning a floating point result
695
+ or a float indicating a constant boundary curve.
696
+ hfun : callable or float
697
+ The upper boundary curve in y (same requirements as `gfun`).
698
+ args : sequence, optional
699
+ Extra arguments to pass to `func`.
700
+ epsabs : float, optional
701
+ Absolute tolerance passed directly to the inner 1-D quadrature
702
+ integration. Default is 1.49e-8. ``dblquad`` tries to obtain
703
+ an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))``
704
+ where ``i`` = inner integral of ``func(y, x)`` from ``gfun(x)``
705
+ to ``hfun(x)``, and ``result`` is the numerical approximation.
706
+ See `epsrel` below.
707
+ epsrel : float, optional
708
+ Relative tolerance of the inner 1-D integrals. Default is 1.49e-8.
709
+ If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29
710
+ and ``50 * (machine epsilon)``. See `epsabs` above.
711
+
712
+ Returns
713
+ -------
714
+ y : float
715
+ The resultant integral.
716
+ abserr : float
717
+ An estimate of the error.
718
+
719
+ See Also
720
+ --------
721
+ quad : single integral
722
+ tplquad : triple integral
723
+ nquad : N-dimensional integrals
724
+ fixed_quad : fixed-order Gaussian quadrature
725
+ simpson : integrator for sampled data
726
+ romb : integrator for sampled data
727
+ scipy.special : for coefficients and roots of orthogonal polynomials
728
+
729
+
730
+ Notes
731
+ -----
732
+ For valid results, the integral must converge; behavior for divergent
733
+ integrals is not guaranteed.
734
+
735
+ **Details of QUADPACK level routines**
736
+
737
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
738
+ provides details on the conditions for each routine to be called and a
739
+ short description of each routine. For each level of integration, ``qagse``
740
+ is used for finite limits or ``qagie`` is used if either limit (or both!)
741
+ are infinite. The following provides a short description from [1]_ for each
742
+ routine.
743
+
744
+ qagse
745
+ is an integrator based on globally adaptive interval
746
+ subdivision in connection with extrapolation, which will
747
+ eliminate the effects of integrand singularities of
748
+ several types.
749
+ qagie
750
+ handles integration over infinite intervals. The infinite range is
751
+ mapped onto a finite interval and subsequently the same strategy as
752
+ in ``QAGS`` is applied.
753
+
754
+ References
755
+ ----------
756
+
757
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
758
+ Überhuber, Christoph W.; Kahaner, David (1983).
759
+ QUADPACK: A subroutine package for automatic integration.
760
+ Springer-Verlag.
761
+ ISBN 978-3-540-12553-2.
762
+
763
+ Examples
764
+ --------
765
+ Compute the double integral of ``x * y**2`` over the box
766
+ ``x`` ranging from 0 to 2 and ``y`` ranging from 0 to 1.
767
+ That is, :math:`\\int^{x=2}_{x=0} \\int^{y=1}_{y=0} x y^2 \\,dy \\,dx`.
768
+
769
+ >>> import numpy as np
770
+ >>> from scipy import integrate
771
+ >>> f = lambda y, x: x*y**2
772
+ >>> integrate.dblquad(f, 0, 2, 0, 1)
773
+ (0.6666666666666667, 7.401486830834377e-15)
774
+
775
+ Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1
776
+ \\,dy \\,dx`.
777
+
778
+ >>> f = lambda y, x: 1
779
+ >>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos)
780
+ (0.41421356237309503, 1.1083280054755938e-14)
781
+
782
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx`
783
+ for :math:`a=1, 3`.
784
+
785
+ >>> f = lambda y, x, a: a*x*y
786
+ >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,))
787
+ (0.33333333333333337, 5.551115123125783e-15)
788
+ >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,))
789
+ (0.9999999999999999, 1.6653345369377348e-14)
790
+
791
+ Compute the two-dimensional Gaussian Integral, which is the integral of the
792
+ Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over
793
+ :math:`(-\\infty,+\\infty)`. That is, compute the integral
794
+ :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`.
795
+
796
+ >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2))
797
+ >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf)
798
+ (3.141592653589777, 2.5173086737433208e-08)
799
+
800
+ """
801
+
802
+ def temp_ranges(*args):
803
+ return [gfun(args[0]) if callable(gfun) else gfun,
804
+ hfun(args[0]) if callable(hfun) else hfun]
805
+
806
+ return nquad(func, [temp_ranges, [a, b]], args=args,
807
+ opts={"epsabs": epsabs, "epsrel": epsrel})
808
+
809
+
810
+ def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8,
811
+ epsrel=1.49e-8):
812
+ """
813
+ Compute a triple (definite) integral.
814
+
815
+ Return the triple integral of ``func(z, y, x)`` from ``x = a..b``,
816
+ ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``.
817
+
818
+ Parameters
819
+ ----------
820
+ func : function
821
+ A Python function or method of at least three variables in the
822
+ order (z, y, x).
823
+ a, b : float
824
+ The limits of integration in x: `a` < `b`
825
+ gfun : function or float
826
+ The lower boundary curve in y which is a function taking a single
827
+ floating point argument (x) and returning a floating point result
828
+ or a float indicating a constant boundary curve.
829
+ hfun : function or float
830
+ The upper boundary curve in y (same requirements as `gfun`).
831
+ qfun : function or float
832
+ The lower boundary surface in z. It must be a function that takes
833
+ two floats in the order (x, y) and returns a float or a float
834
+ indicating a constant boundary surface.
835
+ rfun : function or float
836
+ The upper boundary surface in z. (Same requirements as `qfun`.)
837
+ args : tuple, optional
838
+ Extra arguments to pass to `func`.
839
+ epsabs : float, optional
840
+ Absolute tolerance passed directly to the innermost 1-D quadrature
841
+ integration. Default is 1.49e-8.
842
+ epsrel : float, optional
843
+ Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8.
844
+
845
+ Returns
846
+ -------
847
+ y : float
848
+ The resultant integral.
849
+ abserr : float
850
+ An estimate of the error.
851
+
852
+ See Also
853
+ --------
854
+ quad : Adaptive quadrature using QUADPACK
855
+ fixed_quad : Fixed-order Gaussian quadrature
856
+ dblquad : Double integrals
857
+ nquad : N-dimensional integrals
858
+ romb : Integrators for sampled data
859
+ simpson : Integrators for sampled data
860
+ scipy.special : For coefficients and roots of orthogonal polynomials
861
+
862
+ Notes
863
+ -----
864
+ For valid results, the integral must converge; behavior for divergent
865
+ integrals is not guaranteed.
866
+
867
+ **Details of QUADPACK level routines**
868
+
869
+ `quad` calls routines from the FORTRAN library QUADPACK. This section
870
+ provides details on the conditions for each routine to be called and a
871
+ short description of each routine. For each level of integration, ``qagse``
872
+ is used for finite limits or ``qagie`` is used, if either limit (or both!)
873
+ are infinite. The following provides a short description from [1]_ for each
874
+ routine.
875
+
876
+ qagse
877
+ is an integrator based on globally adaptive interval
878
+ subdivision in connection with extrapolation, which will
879
+ eliminate the effects of integrand singularities of
880
+ several types.
881
+ qagie
882
+ handles integration over infinite intervals. The infinite range is
883
+ mapped onto a finite interval and subsequently the same strategy as
884
+ in ``QAGS`` is applied.
885
+
886
+ References
887
+ ----------
888
+
889
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
890
+ Überhuber, Christoph W.; Kahaner, David (1983).
891
+ QUADPACK: A subroutine package for automatic integration.
892
+ Springer-Verlag.
893
+ ISBN 978-3-540-12553-2.
894
+
895
+ Examples
896
+ --------
897
+ Compute the triple integral of ``x * y * z``, over ``x`` ranging
898
+ from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1.
899
+ That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z
900
+ \\,dz \\,dy \\,dx`.
901
+
902
+ >>> import numpy as np
903
+ >>> from scipy import integrate
904
+ >>> f = lambda z, y, x: x*y*z
905
+ >>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1)
906
+ (1.8749999999999998, 3.3246447942574074e-14)
907
+
908
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0}
909
+ \\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`.
910
+ Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f``
911
+ takes arguments in the order (z, y, x).
912
+
913
+ >>> f = lambda z, y, x: x*y*z
914
+ >>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y)
915
+ (0.05416666666666668, 2.1774196738157757e-14)
916
+
917
+ Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0}
918
+ a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`.
919
+
920
+ >>> f = lambda z, y, x, a: a*x*y*z
921
+ >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,))
922
+ (0.125, 5.527033708952211e-15)
923
+ >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,))
924
+ (0.375, 1.6581101126856635e-14)
925
+
926
+ Compute the three-dimensional Gaussian Integral, which is the integral of
927
+ the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over
928
+ :math:`(-\\infty,+\\infty)`. That is, compute the integral
929
+ :math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz
930
+ \\,dy\\,dx`.
931
+
932
+ >>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2))
933
+ >>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf)
934
+ (5.568327996830833, 4.4619078828029765e-08)
935
+
936
+ """
937
+ # f(z, y, x)
938
+ # qfun/rfun(x, y)
939
+ # gfun/hfun(x)
940
+ # nquad will hand (y, x, t0, ...) to ranges0
941
+ # nquad will hand (x, t0, ...) to ranges1
942
+ # Only qfun / rfun is different API...
943
+
944
+ def ranges0(*args):
945
+ return [qfun(args[1], args[0]) if callable(qfun) else qfun,
946
+ rfun(args[1], args[0]) if callable(rfun) else rfun]
947
+
948
+ def ranges1(*args):
949
+ return [gfun(args[0]) if callable(gfun) else gfun,
950
+ hfun(args[0]) if callable(hfun) else hfun]
951
+
952
+ ranges = [ranges0, ranges1, [a, b]]
953
+ return nquad(func, ranges, args=args,
954
+ opts={"epsabs": epsabs, "epsrel": epsrel})
955
+
956
+
957
+ def nquad(func, ranges, args=None, opts=None, full_output=False):
958
+ r"""
959
+ Integration over multiple variables.
960
+
961
+ Wraps `quad` to enable integration over multiple variables.
962
+ Various options allow improved integration of discontinuous functions, as
963
+ well as the use of weighted integration, and generally finer control of the
964
+ integration process.
965
+
966
+ Parameters
967
+ ----------
968
+ func : {callable, scipy.LowLevelCallable}
969
+ The function to be integrated. Has arguments of ``x0, ... xn``,
970
+ ``t0, ... tm``, where integration is carried out over ``x0, ... xn``,
971
+ which must be floats. Where ``t0, ... tm`` are extra arguments
972
+ passed in args.
973
+ Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``.
974
+ Integration is carried out in order. That is, integration over ``x0``
975
+ is the innermost integral, and ``xn`` is the outermost.
976
+
977
+ If the user desires improved integration performance, then `f` may
978
+ be a `scipy.LowLevelCallable` with one of the signatures::
979
+
980
+ double func(int n, double *xx)
981
+ double func(int n, double *xx, void *user_data)
982
+
983
+ where ``n`` is the number of variables and args. The ``xx`` array
984
+ contains the coordinates and extra arguments. ``user_data`` is the data
985
+ contained in the `scipy.LowLevelCallable`.
986
+ ranges : iterable object
987
+ Each element of ranges may be either a sequence of 2 numbers, or else
988
+ a callable that returns such a sequence. ``ranges[0]`` corresponds to
989
+ integration over x0, and so on. If an element of ranges is a callable,
990
+ then it will be called with all of the integration arguments available,
991
+ as well as any parametric arguments. e.g., if
992
+ ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as
993
+ either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``.
994
+ args : iterable object, optional
995
+ Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``,
996
+ and ``opts``.
997
+ opts : iterable object or dict, optional
998
+ Options to be passed to `quad`. May be empty, a dict, or
999
+ a sequence of dicts or functions that return a dict. If empty, the
1000
+ default options from scipy.integrate.quad are used. If a dict, the same
1001
+ options are used for all levels of integraion. If a sequence, then each
1002
+ element of the sequence corresponds to a particular integration. e.g.,
1003
+ ``opts[0]`` corresponds to integration over ``x0``, and so on. If a
1004
+ callable, the signature must be the same as for ``ranges``. The
1005
+ available options together with their default values are:
1006
+
1007
+ - epsabs = 1.49e-08
1008
+ - epsrel = 1.49e-08
1009
+ - limit = 50
1010
+ - points = None
1011
+ - weight = None
1012
+ - wvar = None
1013
+ - wopts = None
1014
+
1015
+ For more information on these options, see `quad`.
1016
+
1017
+ full_output : bool, optional
1018
+ Partial implementation of ``full_output`` from scipy.integrate.quad.
1019
+ The number of integrand function evaluations ``neval`` can be obtained
1020
+ by setting ``full_output=True`` when calling nquad.
1021
+
1022
+ Returns
1023
+ -------
1024
+ result : float
1025
+ The result of the integration.
1026
+ abserr : float
1027
+ The maximum of the estimates of the absolute error in the various
1028
+ integration results.
1029
+ out_dict : dict, optional
1030
+ A dict containing additional information on the integration.
1031
+
1032
+ See Also
1033
+ --------
1034
+ quad : 1-D numerical integration
1035
+ dblquad, tplquad : double and triple integrals
1036
+ fixed_quad : fixed-order Gaussian quadrature
1037
+
1038
+ Notes
1039
+ -----
1040
+ For valid results, the integral must converge; behavior for divergent
1041
+ integrals is not guaranteed.
1042
+
1043
+ **Details of QUADPACK level routines**
1044
+
1045
+ `nquad` calls routines from the FORTRAN library QUADPACK. This section
1046
+ provides details on the conditions for each routine to be called and a
1047
+ short description of each routine. The routine called depends on
1048
+ `weight`, `points` and the integration limits `a` and `b`.
1049
+
1050
+ ================ ============== ========== =====================
1051
+ QUADPACK routine `weight` `points` infinite bounds
1052
+ ================ ============== ========== =====================
1053
+ qagse None No No
1054
+ qagie None No Yes
1055
+ qagpe None Yes No
1056
+ qawoe 'sin', 'cos' No No
1057
+ qawfe 'sin', 'cos' No either `a` or `b`
1058
+ qawse 'alg*' No No
1059
+ qawce 'cauchy' No No
1060
+ ================ ============== ========== =====================
1061
+
1062
+ The following provides a short description from [1]_ for each
1063
+ routine.
1064
+
1065
+ qagse
1066
+ is an integrator based on globally adaptive interval
1067
+ subdivision in connection with extrapolation, which will
1068
+ eliminate the effects of integrand singularities of
1069
+ several types.
1070
+ qagie
1071
+ handles integration over infinite intervals. The infinite range is
1072
+ mapped onto a finite interval and subsequently the same strategy as
1073
+ in ``QAGS`` is applied.
1074
+ qagpe
1075
+ serves the same purposes as QAGS, but also allows the
1076
+ user to provide explicit information about the location
1077
+ and type of trouble-spots i.e. the abscissae of internal
1078
+ singularities, discontinuities and other difficulties of
1079
+ the integrand function.
1080
+ qawoe
1081
+ is an integrator for the evaluation of
1082
+ :math:`\int^b_a \cos(\omega x)f(x)dx` or
1083
+ :math:`\int^b_a \sin(\omega x)f(x)dx`
1084
+ over a finite interval [a,b], where :math:`\omega` and :math:`f`
1085
+ are specified by the user. The rule evaluation component is based
1086
+ on the modified Clenshaw-Curtis technique
1087
+
1088
+ An adaptive subdivision scheme is used in connection
1089
+ with an extrapolation procedure, which is a modification
1090
+ of that in ``QAGS`` and allows the algorithm to deal with
1091
+ singularities in :math:`f(x)`.
1092
+ qawfe
1093
+ calculates the Fourier transform
1094
+ :math:`\int^\infty_a \cos(\omega x)f(x)dx` or
1095
+ :math:`\int^\infty_a \sin(\omega x)f(x)dx`
1096
+ for user-provided :math:`\omega` and :math:`f`. The procedure of
1097
+ ``QAWO`` is applied on successive finite intervals, and convergence
1098
+ acceleration by means of the :math:`\varepsilon`-algorithm is applied
1099
+ to the series of integral approximations.
1100
+ qawse
1101
+ approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where
1102
+ :math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with
1103
+ :math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the
1104
+ following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`,
1105
+ :math:`\log(x-a)\log(b-x)`.
1106
+
1107
+ The user specifies :math:`\alpha`, :math:`\beta` and the type of the
1108
+ function :math:`v`. A globally adaptive subdivision strategy is
1109
+ applied, with modified Clenshaw-Curtis integration on those
1110
+ subintervals which contain `a` or `b`.
1111
+ qawce
1112
+ compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be
1113
+ interpreted as a Cauchy principal value integral, for user specified
1114
+ :math:`c` and :math:`f`. The strategy is globally adaptive. Modified
1115
+ Clenshaw-Curtis integration is used on those intervals containing the
1116
+ point :math:`x = c`.
1117
+
1118
+ References
1119
+ ----------
1120
+
1121
+ .. [1] Piessens, Robert; de Doncker-Kapenga, Elise;
1122
+ Überhuber, Christoph W.; Kahaner, David (1983).
1123
+ QUADPACK: A subroutine package for automatic integration.
1124
+ Springer-Verlag.
1125
+ ISBN 978-3-540-12553-2.
1126
+
1127
+ Examples
1128
+ --------
1129
+ Calculate
1130
+
1131
+ .. math::
1132
+
1133
+ \int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0}
1134
+ f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 ,
1135
+
1136
+ where
1137
+
1138
+ .. math::
1139
+
1140
+ f(x_0, x_1, x_2, x_3) = \begin{cases}
1141
+ x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\
1142
+ x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0)
1143
+ \end{cases} .
1144
+
1145
+ >>> import numpy as np
1146
+ >>> from scipy import integrate
1147
+ >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + (
1148
+ ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0)
1149
+ >>> def opts0(*args, **kwargs):
1150
+ ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]}
1151
+ >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]],
1152
+ ... opts=[opts0,{},{},{}], full_output=True)
1153
+ (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962})
1154
+
1155
+ Calculate
1156
+
1157
+ .. math::
1158
+
1159
+ \int^{t_0+t_1+1}_{t_0+t_1-1}
1160
+ \int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1}
1161
+ \int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1}
1162
+ f(x_0,x_1, x_2,t_0,t_1)
1163
+ \,dx_0 \,dx_1 \,dx_2,
1164
+
1165
+ where
1166
+
1167
+ .. math::
1168
+
1169
+ f(x_0, x_1, x_2, t_0, t_1) = \begin{cases}
1170
+ x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\
1171
+ x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0)
1172
+ \end{cases}
1173
+
1174
+ and :math:`(t_0, t_1) = (0, 1)` .
1175
+
1176
+ >>> def func2(x0, x1, x2, t0, t1):
1177
+ ... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0)
1178
+ >>> def lim0(x1, x2, t0, t1):
1179
+ ... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1]
1180
+ >>> def lim1(x2, t0, t1):
1181
+ ... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1]
1182
+ >>> def lim2(t0, t1):
1183
+ ... return [t0 + t1 - 1, t0 + t1 + 1]
1184
+ >>> def opts0(x1, x2, t0, t1):
1185
+ ... return {'points' : [t0 - t1*x1]}
1186
+ >>> def opts1(x2, t0, t1):
1187
+ ... return {}
1188
+ >>> def opts2(t0, t1):
1189
+ ... return {}
1190
+ >>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1),
1191
+ ... opts=[opts0, opts1, opts2])
1192
+ (36.099919226771625, 1.8546948553373528e-07)
1193
+
1194
+ """
1195
+ depth = len(ranges)
1196
+ ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges]
1197
+ if args is None:
1198
+ args = ()
1199
+ if opts is None:
1200
+ opts = [dict([])] * depth
1201
+
1202
+ if isinstance(opts, dict):
1203
+ opts = [_OptFunc(opts)] * depth
1204
+ else:
1205
+ opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts]
1206
+ return _NQuad(func, ranges, opts, full_output).integrate(*args)
1207
+
1208
+
1209
+ class _RangeFunc:
1210
+ def __init__(self, range_):
1211
+ self.range_ = range_
1212
+
1213
+ def __call__(self, *args):
1214
+ """Return stored value.
1215
+
1216
+ *args needed because range_ can be float or func, and is called with
1217
+ variable number of parameters.
1218
+ """
1219
+ return self.range_
1220
+
1221
+
1222
+ class _OptFunc:
1223
+ def __init__(self, opt):
1224
+ self.opt = opt
1225
+
1226
+ def __call__(self, *args):
1227
+ """Return stored dict."""
1228
+ return self.opt
1229
+
1230
+
1231
+ class _NQuad:
1232
+ def __init__(self, func, ranges, opts, full_output):
1233
+ self.abserr = 0
1234
+ self.func = func
1235
+ self.ranges = ranges
1236
+ self.opts = opts
1237
+ self.maxdepth = len(ranges)
1238
+ self.full_output = full_output
1239
+ if self.full_output:
1240
+ self.out_dict = {'neval': 0}
1241
+
1242
+ def integrate(self, *args, **kwargs):
1243
+ depth = kwargs.pop('depth', 0)
1244
+ if kwargs:
1245
+ raise ValueError('unexpected kwargs')
1246
+
1247
+ # Get the integration range and options for this depth.
1248
+ ind = -(depth + 1)
1249
+ fn_range = self.ranges[ind]
1250
+ low, high = fn_range(*args)
1251
+ fn_opt = self.opts[ind]
1252
+ opt = dict(fn_opt(*args))
1253
+
1254
+ if 'points' in opt:
1255
+ opt['points'] = [x for x in opt['points'] if low <= x <= high]
1256
+ if depth + 1 == self.maxdepth:
1257
+ f = self.func
1258
+ else:
1259
+ f = partial(self.integrate, depth=depth+1)
1260
+ quad_r = quad(f, low, high, args=args, full_output=self.full_output,
1261
+ **opt)
1262
+ value = quad_r[0]
1263
+ abserr = quad_r[1]
1264
+ if self.full_output:
1265
+ infodict = quad_r[2]
1266
+ # The 'neval' parameter in full_output returns the total
1267
+ # number of times the integrand function was evaluated.
1268
+ # Therefore, only the innermost integration loop counts.
1269
+ if depth + 1 == self.maxdepth:
1270
+ self.out_dict['neval'] += infodict['neval']
1271
+ self.abserr = max(self.abserr, abserr)
1272
+ if depth > 0:
1273
+ return value
1274
+ else:
1275
+ # Final result of N-D integration with error
1276
+ if self.full_output:
1277
+ return value, self.abserr, self.out_dict
1278
+ else:
1279
+ return value, self.abserr
parrot/lib/python3.10/site-packages/scipy/integrate/_quadrature.py ADDED
@@ -0,0 +1,1684 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import annotations
2
+ from typing import TYPE_CHECKING, Callable, Any, cast
3
+ import numpy as np
4
+ import numpy.typing as npt
5
+ import math
6
+ import warnings
7
+ from collections import namedtuple
8
+
9
+ from scipy.special import roots_legendre
10
+ from scipy.special import gammaln, logsumexp
11
+ from scipy._lib._util import _rng_spawn
12
+ from scipy._lib.deprecation import _deprecated
13
+
14
+
15
+ __all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb',
16
+ 'trapezoid', 'simpson',
17
+ 'cumulative_trapezoid', 'newton_cotes',
18
+ 'qmc_quad', 'AccuracyWarning', 'cumulative_simpson']
19
+
20
+
21
+ def trapezoid(y, x=None, dx=1.0, axis=-1):
22
+ r"""
23
+ Integrate along the given axis using the composite trapezoidal rule.
24
+
25
+ If `x` is provided, the integration happens in sequence along its
26
+ elements - they are not sorted.
27
+
28
+ Integrate `y` (`x`) along each 1d slice on the given axis, compute
29
+ :math:`\int y(x) dx`.
30
+ When `x` is specified, this integrates along the parametric curve,
31
+ computing :math:`\int_t y(t) dt =
32
+ \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`.
33
+
34
+ Parameters
35
+ ----------
36
+ y : array_like
37
+ Input array to integrate.
38
+ x : array_like, optional
39
+ The sample points corresponding to the `y` values. If `x` is None,
40
+ the sample points are assumed to be evenly spaced `dx` apart. The
41
+ default is None.
42
+ dx : scalar, optional
43
+ The spacing between sample points when `x` is None. The default is 1.
44
+ axis : int, optional
45
+ The axis along which to integrate.
46
+
47
+ Returns
48
+ -------
49
+ trapezoid : float or ndarray
50
+ Definite integral of `y` = n-dimensional array as approximated along
51
+ a single axis by the trapezoidal rule. If `y` is a 1-dimensional array,
52
+ then the result is a float. If `n` is greater than 1, then the result
53
+ is an `n`-1 dimensional array.
54
+
55
+ See Also
56
+ --------
57
+ cumulative_trapezoid, simpson, romb
58
+
59
+ Notes
60
+ -----
61
+ Image [2]_ illustrates trapezoidal rule -- y-axis locations of points
62
+ will be taken from `y` array, by default x-axis distances between
63
+ points will be 1.0, alternatively they can be provided with `x` array
64
+ or with `dx` scalar. Return value will be equal to combined area under
65
+ the red lines.
66
+
67
+ References
68
+ ----------
69
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule
70
+
71
+ .. [2] Illustration image:
72
+ https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png
73
+
74
+ Examples
75
+ --------
76
+ Use the trapezoidal rule on evenly spaced points:
77
+
78
+ >>> import numpy as np
79
+ >>> from scipy import integrate
80
+ >>> integrate.trapezoid([1, 2, 3])
81
+ 4.0
82
+
83
+ The spacing between sample points can be selected by either the
84
+ ``x`` or ``dx`` arguments:
85
+
86
+ >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8])
87
+ 8.0
88
+ >>> integrate.trapezoid([1, 2, 3], dx=2)
89
+ 8.0
90
+
91
+ Using a decreasing ``x`` corresponds to integrating in reverse:
92
+
93
+ >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4])
94
+ -8.0
95
+
96
+ More generally ``x`` is used to integrate along a parametric curve. We can
97
+ estimate the integral :math:`\int_0^1 x^2 = 1/3` using:
98
+
99
+ >>> x = np.linspace(0, 1, num=50)
100
+ >>> y = x**2
101
+ >>> integrate.trapezoid(y, x)
102
+ 0.33340274885464394
103
+
104
+ Or estimate the area of a circle, noting we repeat the sample which closes
105
+ the curve:
106
+
107
+ >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True)
108
+ >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta))
109
+ 3.141571941375841
110
+
111
+ ``trapezoid`` can be applied along a specified axis to do multiple
112
+ computations in one call:
113
+
114
+ >>> a = np.arange(6).reshape(2, 3)
115
+ >>> a
116
+ array([[0, 1, 2],
117
+ [3, 4, 5]])
118
+ >>> integrate.trapezoid(a, axis=0)
119
+ array([1.5, 2.5, 3.5])
120
+ >>> integrate.trapezoid(a, axis=1)
121
+ array([2., 8.])
122
+ """
123
+ y = np.asanyarray(y)
124
+ if x is None:
125
+ d = dx
126
+ else:
127
+ x = np.asanyarray(x)
128
+ if x.ndim == 1:
129
+ d = np.diff(x)
130
+ # reshape to correct shape
131
+ shape = [1]*y.ndim
132
+ shape[axis] = d.shape[0]
133
+ d = d.reshape(shape)
134
+ else:
135
+ d = np.diff(x, axis=axis)
136
+ nd = y.ndim
137
+ slice1 = [slice(None)]*nd
138
+ slice2 = [slice(None)]*nd
139
+ slice1[axis] = slice(1, None)
140
+ slice2[axis] = slice(None, -1)
141
+ try:
142
+ ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis)
143
+ except ValueError:
144
+ # Operations didn't work, cast to ndarray
145
+ d = np.asarray(d)
146
+ y = np.asarray(y)
147
+ ret = np.add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis)
148
+ return ret
149
+
150
+
151
+ class AccuracyWarning(Warning):
152
+ pass
153
+
154
+
155
+ if TYPE_CHECKING:
156
+ # workaround for mypy function attributes see:
157
+ # https://github.com/python/mypy/issues/2087#issuecomment-462726600
158
+ from typing import Protocol
159
+
160
+ class CacheAttributes(Protocol):
161
+ cache: dict[int, tuple[Any, Any]]
162
+ else:
163
+ CacheAttributes = Callable
164
+
165
+
166
+ def cache_decorator(func: Callable) -> CacheAttributes:
167
+ return cast(CacheAttributes, func)
168
+
169
+
170
+ @cache_decorator
171
+ def _cached_roots_legendre(n):
172
+ """
173
+ Cache roots_legendre results to speed up calls of the fixed_quad
174
+ function.
175
+ """
176
+ if n in _cached_roots_legendre.cache:
177
+ return _cached_roots_legendre.cache[n]
178
+
179
+ _cached_roots_legendre.cache[n] = roots_legendre(n)
180
+ return _cached_roots_legendre.cache[n]
181
+
182
+
183
+ _cached_roots_legendre.cache = dict()
184
+
185
+
186
+ def fixed_quad(func, a, b, args=(), n=5):
187
+ """
188
+ Compute a definite integral using fixed-order Gaussian quadrature.
189
+
190
+ Integrate `func` from `a` to `b` using Gaussian quadrature of
191
+ order `n`.
192
+
193
+ Parameters
194
+ ----------
195
+ func : callable
196
+ A Python function or method to integrate (must accept vector inputs).
197
+ If integrating a vector-valued function, the returned array must have
198
+ shape ``(..., len(x))``.
199
+ a : float
200
+ Lower limit of integration.
201
+ b : float
202
+ Upper limit of integration.
203
+ args : tuple, optional
204
+ Extra arguments to pass to function, if any.
205
+ n : int, optional
206
+ Order of quadrature integration. Default is 5.
207
+
208
+ Returns
209
+ -------
210
+ val : float
211
+ Gaussian quadrature approximation to the integral
212
+ none : None
213
+ Statically returned value of None
214
+
215
+ See Also
216
+ --------
217
+ quad : adaptive quadrature using QUADPACK
218
+ dblquad : double integrals
219
+ tplquad : triple integrals
220
+ romb : integrators for sampled data
221
+ simpson : integrators for sampled data
222
+ cumulative_trapezoid : cumulative integration for sampled data
223
+
224
+ Examples
225
+ --------
226
+ >>> from scipy import integrate
227
+ >>> import numpy as np
228
+ >>> f = lambda x: x**8
229
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
230
+ (0.1110884353741496, None)
231
+ >>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
232
+ (0.11111111111111102, None)
233
+ >>> print(1/9.0) # analytical result
234
+ 0.1111111111111111
235
+
236
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
237
+ (0.9999999771971152, None)
238
+ >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
239
+ (1.000000000039565, None)
240
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
241
+ 1.0
242
+
243
+ """
244
+ x, w = _cached_roots_legendre(n)
245
+ x = np.real(x)
246
+ if np.isinf(a) or np.isinf(b):
247
+ raise ValueError("Gaussian quadrature is only available for "
248
+ "finite limits.")
249
+ y = (b-a)*(x+1)/2.0 + a
250
+ return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
251
+
252
+
253
+ def vectorize1(func, args=(), vec_func=False):
254
+ """Vectorize the call to a function.
255
+
256
+ This is an internal utility function used by `romberg` and
257
+ `quadrature` to create a vectorized version of a function.
258
+
259
+ If `vec_func` is True, the function `func` is assumed to take vector
260
+ arguments.
261
+
262
+ Parameters
263
+ ----------
264
+ func : callable
265
+ User defined function.
266
+ args : tuple, optional
267
+ Extra arguments for the function.
268
+ vec_func : bool, optional
269
+ True if the function func takes vector arguments.
270
+
271
+ Returns
272
+ -------
273
+ vfunc : callable
274
+ A function that will take a vector argument and return the
275
+ result.
276
+
277
+ """
278
+ if vec_func:
279
+ def vfunc(x):
280
+ return func(x, *args)
281
+ else:
282
+ def vfunc(x):
283
+ if np.isscalar(x):
284
+ return func(x, *args)
285
+ x = np.asarray(x)
286
+ # call with first point to get output type
287
+ y0 = func(x[0], *args)
288
+ n = len(x)
289
+ dtype = getattr(y0, 'dtype', type(y0))
290
+ output = np.empty((n,), dtype=dtype)
291
+ output[0] = y0
292
+ for i in range(1, n):
293
+ output[i] = func(x[i], *args)
294
+ return output
295
+ return vfunc
296
+
297
+
298
+ @_deprecated("`scipy.integrate.quadrature` is deprecated as of SciPy 1.12.0"
299
+ "and will be removed in SciPy 1.15.0. Please use"
300
+ "`scipy.integrate.quad` instead.")
301
+ def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
302
+ vec_func=True, miniter=1):
303
+ """
304
+ Compute a definite integral using fixed-tolerance Gaussian quadrature.
305
+
306
+ .. deprecated:: 1.12.0
307
+
308
+ This function is deprecated as of SciPy 1.12.0 and will be removed
309
+ in SciPy 1.15.0. Please use `scipy.integrate.quad` instead.
310
+
311
+ Integrate `func` from `a` to `b` using Gaussian quadrature
312
+ with absolute tolerance `tol`.
313
+
314
+ Parameters
315
+ ----------
316
+ func : function
317
+ A Python function or method to integrate.
318
+ a : float
319
+ Lower limit of integration.
320
+ b : float
321
+ Upper limit of integration.
322
+ args : tuple, optional
323
+ Extra arguments to pass to function.
324
+ tol, rtol : float, optional
325
+ Iteration stops when error between last two iterates is less than
326
+ `tol` OR the relative change is less than `rtol`.
327
+ maxiter : int, optional
328
+ Maximum order of Gaussian quadrature.
329
+ vec_func : bool, optional
330
+ True or False if func handles arrays as arguments (is
331
+ a "vector" function). Default is True.
332
+ miniter : int, optional
333
+ Minimum order of Gaussian quadrature.
334
+
335
+ Returns
336
+ -------
337
+ val : float
338
+ Gaussian quadrature approximation (within tolerance) to integral.
339
+ err : float
340
+ Difference between last two estimates of the integral.
341
+
342
+ See Also
343
+ --------
344
+ fixed_quad : fixed-order Gaussian quadrature
345
+ quad : adaptive quadrature using QUADPACK
346
+ dblquad : double integrals
347
+ tplquad : triple integrals
348
+ romb : integrator for sampled data
349
+ simpson : integrator for sampled data
350
+ cumulative_trapezoid : cumulative integration for sampled data
351
+
352
+ Examples
353
+ --------
354
+ >>> from scipy import integrate
355
+ >>> import numpy as np
356
+ >>> f = lambda x: x**8
357
+ >>> integrate.quadrature(f, 0.0, 1.0)
358
+ (0.11111111111111106, 4.163336342344337e-17)
359
+ >>> print(1/9.0) # analytical result
360
+ 0.1111111111111111
361
+
362
+ >>> integrate.quadrature(np.cos, 0.0, np.pi/2)
363
+ (0.9999999999999536, 3.9611425250996035e-11)
364
+ >>> np.sin(np.pi/2)-np.sin(0) # analytical result
365
+ 1.0
366
+
367
+ """
368
+ if not isinstance(args, tuple):
369
+ args = (args,)
370
+ vfunc = vectorize1(func, args, vec_func=vec_func)
371
+ val = np.inf
372
+ err = np.inf
373
+ maxiter = max(miniter+1, maxiter)
374
+ for n in range(miniter, maxiter+1):
375
+ newval = fixed_quad(vfunc, a, b, (), n)[0]
376
+ err = abs(newval-val)
377
+ val = newval
378
+
379
+ if err < tol or err < rtol*abs(val):
380
+ break
381
+ else:
382
+ warnings.warn(
383
+ "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
384
+ AccuracyWarning, stacklevel=2
385
+ )
386
+ return val, err
387
+
388
+
389
+ def tupleset(t, i, value):
390
+ l = list(t)
391
+ l[i] = value
392
+ return tuple(l)
393
+
394
+
395
+ def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None):
396
+ """
397
+ Cumulatively integrate y(x) using the composite trapezoidal rule.
398
+
399
+ Parameters
400
+ ----------
401
+ y : array_like
402
+ Values to integrate.
403
+ x : array_like, optional
404
+ The coordinate to integrate along. If None (default), use spacing `dx`
405
+ between consecutive elements in `y`.
406
+ dx : float, optional
407
+ Spacing between elements of `y`. Only used if `x` is None.
408
+ axis : int, optional
409
+ Specifies the axis to cumulate. Default is -1 (last axis).
410
+ initial : scalar, optional
411
+ If given, insert this value at the beginning of the returned result.
412
+ 0 or None are the only values accepted. Default is None, which means
413
+ `res` has one element less than `y` along the axis of integration.
414
+
415
+ .. deprecated:: 1.12.0
416
+ The option for non-zero inputs for `initial` will be deprecated in
417
+ SciPy 1.15.0. After this time, a ValueError will be raised if
418
+ `initial` is not None or 0.
419
+
420
+ Returns
421
+ -------
422
+ res : ndarray
423
+ The result of cumulative integration of `y` along `axis`.
424
+ If `initial` is None, the shape is such that the axis of integration
425
+ has one less value than `y`. If `initial` is given, the shape is equal
426
+ to that of `y`.
427
+
428
+ See Also
429
+ --------
430
+ numpy.cumsum, numpy.cumprod
431
+ cumulative_simpson : cumulative integration using Simpson's 1/3 rule
432
+ quad : adaptive quadrature using QUADPACK
433
+ fixed_quad : fixed-order Gaussian quadrature
434
+ dblquad : double integrals
435
+ tplquad : triple integrals
436
+ romb : integrators for sampled data
437
+
438
+ Examples
439
+ --------
440
+ >>> from scipy import integrate
441
+ >>> import numpy as np
442
+ >>> import matplotlib.pyplot as plt
443
+
444
+ >>> x = np.linspace(-2, 2, num=20)
445
+ >>> y = x
446
+ >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0)
447
+ >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
448
+ >>> plt.show()
449
+
450
+ """
451
+ y = np.asarray(y)
452
+ if y.shape[axis] == 0:
453
+ raise ValueError("At least one point is required along `axis`.")
454
+ if x is None:
455
+ d = dx
456
+ else:
457
+ x = np.asarray(x)
458
+ if x.ndim == 1:
459
+ d = np.diff(x)
460
+ # reshape to correct shape
461
+ shape = [1] * y.ndim
462
+ shape[axis] = -1
463
+ d = d.reshape(shape)
464
+ elif len(x.shape) != len(y.shape):
465
+ raise ValueError("If given, shape of x must be 1-D or the "
466
+ "same as y.")
467
+ else:
468
+ d = np.diff(x, axis=axis)
469
+
470
+ if d.shape[axis] != y.shape[axis] - 1:
471
+ raise ValueError("If given, length of x along axis must be the "
472
+ "same as y.")
473
+
474
+ nd = len(y.shape)
475
+ slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
476
+ slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
477
+ res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
478
+
479
+ if initial is not None:
480
+ if initial != 0:
481
+ warnings.warn(
482
+ "The option for values for `initial` other than None or 0 is "
483
+ "deprecated as of SciPy 1.12.0 and will raise a value error in"
484
+ " SciPy 1.15.0.",
485
+ DeprecationWarning, stacklevel=2
486
+ )
487
+ if not np.isscalar(initial):
488
+ raise ValueError("`initial` parameter should be a scalar.")
489
+
490
+ shape = list(res.shape)
491
+ shape[axis] = 1
492
+ res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
493
+ axis=axis)
494
+
495
+ return res
496
+
497
+
498
+ def _basic_simpson(y, start, stop, x, dx, axis):
499
+ nd = len(y.shape)
500
+ if start is None:
501
+ start = 0
502
+ step = 2
503
+ slice_all = (slice(None),)*nd
504
+ slice0 = tupleset(slice_all, axis, slice(start, stop, step))
505
+ slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
506
+ slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
507
+
508
+ if x is None: # Even-spaced Simpson's rule.
509
+ result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis)
510
+ result *= dx / 3.0
511
+ else:
512
+ # Account for possibly different spacings.
513
+ # Simpson's rule changes a bit.
514
+ h = np.diff(x, axis=axis)
515
+ sl0 = tupleset(slice_all, axis, slice(start, stop, step))
516
+ sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
517
+ h0 = h[sl0].astype(float, copy=False)
518
+ h1 = h[sl1].astype(float, copy=False)
519
+ hsum = h0 + h1
520
+ hprod = h0 * h1
521
+ h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0)
522
+ tmp = hsum/6.0 * (y[slice0] *
523
+ (2.0 - np.true_divide(1.0, h0divh1,
524
+ out=np.zeros_like(h0divh1),
525
+ where=h0divh1 != 0)) +
526
+ y[slice1] * (hsum *
527
+ np.true_divide(hsum, hprod,
528
+ out=np.zeros_like(hsum),
529
+ where=hprod != 0)) +
530
+ y[slice2] * (2.0 - h0divh1))
531
+ result = np.sum(tmp, axis=axis)
532
+ return result
533
+
534
+
535
+ def simpson(y, *, x=None, dx=1.0, axis=-1):
536
+ """
537
+ Integrate y(x) using samples along the given axis and the composite
538
+ Simpson's rule. If x is None, spacing of dx is assumed.
539
+
540
+ If there are an even number of samples, N, then there are an odd
541
+ number of intervals (N-1), but Simpson's rule requires an even number
542
+ of intervals. The parameter 'even' controls how this is handled.
543
+
544
+ Parameters
545
+ ----------
546
+ y : array_like
547
+ Array to be integrated.
548
+ x : array_like, optional
549
+ If given, the points at which `y` is sampled.
550
+ dx : float, optional
551
+ Spacing of integration points along axis of `x`. Only used when
552
+ `x` is None. Default is 1.
553
+ axis : int, optional
554
+ Axis along which to integrate. Default is the last axis.
555
+
556
+ Returns
557
+ -------
558
+ float
559
+ The estimated integral computed with the composite Simpson's rule.
560
+
561
+ See Also
562
+ --------
563
+ quad : adaptive quadrature using QUADPACK
564
+ fixed_quad : fixed-order Gaussian quadrature
565
+ dblquad : double integrals
566
+ tplquad : triple integrals
567
+ romb : integrators for sampled data
568
+ cumulative_trapezoid : cumulative integration for sampled data
569
+ cumulative_simpson : cumulative integration using Simpson's 1/3 rule
570
+
571
+ Notes
572
+ -----
573
+ For an odd number of samples that are equally spaced the result is
574
+ exact if the function is a polynomial of order 3 or less. If
575
+ the samples are not equally spaced, then the result is exact only
576
+ if the function is a polynomial of order 2 or less.
577
+
578
+ References
579
+ ----------
580
+ .. [1] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with
581
+ MS Excel and Irregularly-spaced Data. Journal of Mathematical
582
+ Sciences and Mathematics Education. 12 (2): 1-9
583
+
584
+ Examples
585
+ --------
586
+ >>> from scipy import integrate
587
+ >>> import numpy as np
588
+ >>> x = np.arange(0, 10)
589
+ >>> y = np.arange(0, 10)
590
+
591
+ >>> integrate.simpson(y, x=x)
592
+ 40.5
593
+
594
+ >>> y = np.power(x, 3)
595
+ >>> integrate.simpson(y, x=x)
596
+ 1640.5
597
+ >>> integrate.quad(lambda x: x**3, 0, 9)[0]
598
+ 1640.25
599
+
600
+ """
601
+ y = np.asarray(y)
602
+ nd = len(y.shape)
603
+ N = y.shape[axis]
604
+ last_dx = dx
605
+ returnshape = 0
606
+ if x is not None:
607
+ x = np.asarray(x)
608
+ if len(x.shape) == 1:
609
+ shapex = [1] * nd
610
+ shapex[axis] = x.shape[0]
611
+ saveshape = x.shape
612
+ returnshape = 1
613
+ x = x.reshape(tuple(shapex))
614
+ elif len(x.shape) != len(y.shape):
615
+ raise ValueError("If given, shape of x must be 1-D or the "
616
+ "same as y.")
617
+ if x.shape[axis] != N:
618
+ raise ValueError("If given, length of x along axis must be the "
619
+ "same as y.")
620
+
621
+ if N % 2 == 0:
622
+ val = 0.0
623
+ result = 0.0
624
+ slice_all = (slice(None),) * nd
625
+
626
+ if N == 2:
627
+ # need at least 3 points in integration axis to form parabolic
628
+ # segment. If there are two points then any of 'avg', 'first',
629
+ # 'last' should give the same result.
630
+ slice1 = tupleset(slice_all, axis, -1)
631
+ slice2 = tupleset(slice_all, axis, -2)
632
+ if x is not None:
633
+ last_dx = x[slice1] - x[slice2]
634
+ val += 0.5 * last_dx * (y[slice1] + y[slice2])
635
+ else:
636
+ # use Simpson's rule on first intervals
637
+ result = _basic_simpson(y, 0, N-3, x, dx, axis)
638
+
639
+ slice1 = tupleset(slice_all, axis, -1)
640
+ slice2 = tupleset(slice_all, axis, -2)
641
+ slice3 = tupleset(slice_all, axis, -3)
642
+
643
+ h = np.asarray([dx, dx], dtype=np.float64)
644
+ if x is not None:
645
+ # grab the last two spacings from the appropriate axis
646
+ hm2 = tupleset(slice_all, axis, slice(-2, -1, 1))
647
+ hm1 = tupleset(slice_all, axis, slice(-1, None, 1))
648
+
649
+ diffs = np.float64(np.diff(x, axis=axis))
650
+ h = [np.squeeze(diffs[hm2], axis=axis),
651
+ np.squeeze(diffs[hm1], axis=axis)]
652
+
653
+ # This is the correction for the last interval according to
654
+ # Cartwright.
655
+ # However, I used the equations given at
656
+ # https://en.wikipedia.org/wiki/Simpson%27s_rule#Composite_Simpson's_rule_for_irregularly_spaced_data
657
+ # A footnote on Wikipedia says:
658
+ # Cartwright 2017, Equation 8. The equation in Cartwright is
659
+ # calculating the first interval whereas the equations in the
660
+ # Wikipedia article are adjusting for the last integral. If the
661
+ # proper algebraic substitutions are made, the equation results in
662
+ # the values shown.
663
+ num = 2 * h[1] ** 2 + 3 * h[0] * h[1]
664
+ den = 6 * (h[1] + h[0])
665
+ alpha = np.true_divide(
666
+ num,
667
+ den,
668
+ out=np.zeros_like(den),
669
+ where=den != 0
670
+ )
671
+
672
+ num = h[1] ** 2 + 3.0 * h[0] * h[1]
673
+ den = 6 * h[0]
674
+ beta = np.true_divide(
675
+ num,
676
+ den,
677
+ out=np.zeros_like(den),
678
+ where=den != 0
679
+ )
680
+
681
+ num = 1 * h[1] ** 3
682
+ den = 6 * h[0] * (h[0] + h[1])
683
+ eta = np.true_divide(
684
+ num,
685
+ den,
686
+ out=np.zeros_like(den),
687
+ where=den != 0
688
+ )
689
+
690
+ result += alpha*y[slice1] + beta*y[slice2] - eta*y[slice3]
691
+
692
+ result += val
693
+ else:
694
+ result = _basic_simpson(y, 0, N-2, x, dx, axis)
695
+ if returnshape:
696
+ x = x.reshape(saveshape)
697
+ return result
698
+
699
+
700
+ def _cumulatively_sum_simpson_integrals(
701
+ y: np.ndarray,
702
+ dx: np.ndarray,
703
+ integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray],
704
+ ) -> np.ndarray:
705
+ """Calculate cumulative sum of Simpson integrals.
706
+ Takes as input the integration function to be used.
707
+ The integration_func is assumed to return the cumulative sum using
708
+ composite Simpson's rule. Assumes the axis of summation is -1.
709
+ """
710
+ sub_integrals_h1 = integration_func(y, dx)
711
+ sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1]
712
+
713
+ shape = list(sub_integrals_h1.shape)
714
+ shape[-1] += 1
715
+ sub_integrals = np.empty(shape)
716
+ sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2]
717
+ sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2]
718
+ # Integral over last subinterval can only be calculated from
719
+ # formula for h2
720
+ sub_integrals[..., -1] = sub_integrals_h2[..., -1]
721
+ res = np.cumsum(sub_integrals, axis=-1)
722
+ return res
723
+
724
+
725
+ def _cumulative_simpson_equal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:
726
+ """Calculate the Simpson integrals for all h1 intervals assuming equal interval
727
+ widths. The function can also be used to calculate the integral for all
728
+ h2 intervals by reversing the inputs, `y` and `dx`.
729
+ """
730
+ d = dx[..., :-1]
731
+ f1 = y[..., :-2]
732
+ f2 = y[..., 1:-1]
733
+ f3 = y[..., 2:]
734
+
735
+ # Calculate integral over the subintervals (eqn (10) of Reference [2])
736
+ return d / 3 * (5 * f1 / 4 + 2 * f2 - f3 / 4)
737
+
738
+
739
+ def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray:
740
+ """Calculate the Simpson integrals for all h1 intervals assuming unequal interval
741
+ widths. The function can also be used to calculate the integral for all
742
+ h2 intervals by reversing the inputs, `y` and `dx`.
743
+ """
744
+ x21 = dx[..., :-1]
745
+ x32 = dx[..., 1:]
746
+ f1 = y[..., :-2]
747
+ f2 = y[..., 1:-1]
748
+ f3 = y[..., 2:]
749
+
750
+ x31 = x21 + x32
751
+ x21_x31 = x21/x31
752
+ x21_x32 = x21/x32
753
+ x21x21_x31x32 = x21_x31 * x21_x32
754
+
755
+ # Calculate integral over the subintervals (eqn (8) of Reference [2])
756
+ coeff1 = 3 - x21_x31
757
+ coeff2 = 3 + x21x21_x31x32 + x21_x31
758
+ coeff3 = -x21x21_x31x32
759
+
760
+ return x21/6 * (coeff1*f1 + coeff2*f2 + coeff3*f3)
761
+
762
+
763
+ def _ensure_float_array(arr: npt.ArrayLike) -> np.ndarray:
764
+ arr = np.asarray(arr)
765
+ if np.issubdtype(arr.dtype, np.integer):
766
+ arr = arr.astype(float, copy=False)
767
+ return arr
768
+
769
+
770
+ def cumulative_simpson(y, *, x=None, dx=1.0, axis=-1, initial=None):
771
+ r"""
772
+ Cumulatively integrate y(x) using the composite Simpson's 1/3 rule.
773
+ The integral of the samples at every point is calculated by assuming a
774
+ quadratic relationship between each point and the two adjacent points.
775
+
776
+ Parameters
777
+ ----------
778
+ y : array_like
779
+ Values to integrate. Requires at least one point along `axis`. If two or fewer
780
+ points are provided along `axis`, Simpson's integration is not possible and the
781
+ result is calculated with `cumulative_trapezoid`.
782
+ x : array_like, optional
783
+ The coordinate to integrate along. Must have the same shape as `y` or
784
+ must be 1D with the same length as `y` along `axis`. `x` must also be
785
+ strictly increasing along `axis`.
786
+ If `x` is None (default), integration is performed using spacing `dx`
787
+ between consecutive elements in `y`.
788
+ dx : scalar or array_like, optional
789
+ Spacing between elements of `y`. Only used if `x` is None. Can either
790
+ be a float, or an array with the same shape as `y`, but of length one along
791
+ `axis`. Default is 1.0.
792
+ axis : int, optional
793
+ Specifies the axis to integrate along. Default is -1 (last axis).
794
+ initial : scalar or array_like, optional
795
+ If given, insert this value at the beginning of the returned result,
796
+ and add it to the rest of the result. Default is None, which means no
797
+ value at ``x[0]`` is returned and `res` has one element less than `y`
798
+ along the axis of integration. Can either be a float, or an array with
799
+ the same shape as `y`, but of length one along `axis`.
800
+
801
+ Returns
802
+ -------
803
+ res : ndarray
804
+ The result of cumulative integration of `y` along `axis`.
805
+ If `initial` is None, the shape is such that the axis of integration
806
+ has one less value than `y`. If `initial` is given, the shape is equal
807
+ to that of `y`.
808
+
809
+ See Also
810
+ --------
811
+ numpy.cumsum
812
+ cumulative_trapezoid : cumulative integration using the composite
813
+ trapezoidal rule
814
+ simpson : integrator for sampled data using the Composite Simpson's Rule
815
+
816
+ Notes
817
+ -----
818
+
819
+ .. versionadded:: 1.12.0
820
+
821
+ The composite Simpson's 1/3 method can be used to approximate the definite
822
+ integral of a sampled input function :math:`y(x)` [1]_. The method assumes
823
+ a quadratic relationship over the interval containing any three consecutive
824
+ sampled points.
825
+
826
+ Consider three consecutive points:
827
+ :math:`(x_1, y_1), (x_2, y_2), (x_3, y_3)`.
828
+
829
+ Assuming a quadratic relationship over the three points, the integral over
830
+ the subinterval between :math:`x_1` and :math:`x_2` is given by formula
831
+ (8) of [2]_:
832
+
833
+ .. math::
834
+ \int_{x_1}^{x_2} y(x) dx\ &= \frac{x_2-x_1}{6}\left[\
835
+ \left\{3-\frac{x_2-x_1}{x_3-x_1}\right\} y_1 + \
836
+ \left\{3 + \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} + \
837
+ \frac{x_2-x_1}{x_3-x_1}\right\} y_2\\
838
+ - \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} y_3\right]
839
+
840
+ The integral between :math:`x_2` and :math:`x_3` is given by swapping
841
+ appearances of :math:`x_1` and :math:`x_3`. The integral is estimated
842
+ separately for each subinterval and then cumulatively summed to obtain
843
+ the final result.
844
+
845
+ For samples that are equally spaced, the result is exact if the function
846
+ is a polynomial of order three or less [1]_ and the number of subintervals
847
+ is even. Otherwise, the integral is exact for polynomials of order two or
848
+ less.
849
+
850
+ References
851
+ ----------
852
+ .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Simpson's_rule
853
+ .. [2] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with
854
+ MS Excel and Irregularly-spaced Data. Journal of Mathematical
855
+ Sciences and Mathematics Education. 12 (2): 1-9
856
+
857
+ Examples
858
+ --------
859
+ >>> from scipy import integrate
860
+ >>> import numpy as np
861
+ >>> import matplotlib.pyplot as plt
862
+ >>> x = np.linspace(-2, 2, num=20)
863
+ >>> y = x**2
864
+ >>> y_int = integrate.cumulative_simpson(y, x=x, initial=0)
865
+ >>> fig, ax = plt.subplots()
866
+ >>> ax.plot(x, y_int, 'ro', x, x**3/3 - (x[0])**3/3, 'b-')
867
+ >>> ax.grid()
868
+ >>> plt.show()
869
+
870
+ The output of `cumulative_simpson` is similar to that of iteratively
871
+ calling `simpson` with successively higher upper limits of integration, but
872
+ not identical.
873
+
874
+ >>> def cumulative_simpson_reference(y, x):
875
+ ... return np.asarray([integrate.simpson(y[:i], x=x[:i])
876
+ ... for i in range(2, len(y) + 1)])
877
+ >>>
878
+ >>> rng = np.random.default_rng(354673834679465)
879
+ >>> x, y = rng.random(size=(2, 10))
880
+ >>> x.sort()
881
+ >>>
882
+ >>> res = integrate.cumulative_simpson(y, x=x)
883
+ >>> ref = cumulative_simpson_reference(y, x)
884
+ >>> equal = np.abs(res - ref) < 1e-15
885
+ >>> equal # not equal when `simpson` has even number of subintervals
886
+ array([False, True, False, True, False, True, False, True, True])
887
+
888
+ This is expected: because `cumulative_simpson` has access to more
889
+ information than `simpson`, it can typically produce more accurate
890
+ estimates of the underlying integral over subintervals.
891
+
892
+ """
893
+ y = _ensure_float_array(y)
894
+
895
+ # validate `axis` and standardize to work along the last axis
896
+ original_y = y
897
+ original_shape = y.shape
898
+ try:
899
+ y = np.swapaxes(y, axis, -1)
900
+ except IndexError as e:
901
+ message = f"`axis={axis}` is not valid for `y` with `y.ndim={y.ndim}`."
902
+ raise ValueError(message) from e
903
+ if y.shape[-1] < 3:
904
+ res = cumulative_trapezoid(original_y, x, dx=dx, axis=axis, initial=None)
905
+ res = np.swapaxes(res, axis, -1)
906
+
907
+ elif x is not None:
908
+ x = _ensure_float_array(x)
909
+ message = ("If given, shape of `x` must be the same as `y` or 1-D with "
910
+ "the same length as `y` along `axis`.")
911
+ if not (x.shape == original_shape
912
+ or (x.ndim == 1 and len(x) == original_shape[axis])):
913
+ raise ValueError(message)
914
+
915
+ x = np.broadcast_to(x, y.shape) if x.ndim == 1 else np.swapaxes(x, axis, -1)
916
+ dx = np.diff(x, axis=-1)
917
+ if np.any(dx <= 0):
918
+ raise ValueError("Input x must be strictly increasing.")
919
+ res = _cumulatively_sum_simpson_integrals(
920
+ y, dx, _cumulative_simpson_unequal_intervals
921
+ )
922
+
923
+ else:
924
+ dx = _ensure_float_array(dx)
925
+ final_dx_shape = tupleset(original_shape, axis, original_shape[axis] - 1)
926
+ alt_input_dx_shape = tupleset(original_shape, axis, 1)
927
+ message = ("If provided, `dx` must either be a scalar or have the same "
928
+ "shape as `y` but with only 1 point along `axis`.")
929
+ if not (dx.ndim == 0 or dx.shape == alt_input_dx_shape):
930
+ raise ValueError(message)
931
+ dx = np.broadcast_to(dx, final_dx_shape)
932
+ dx = np.swapaxes(dx, axis, -1)
933
+ res = _cumulatively_sum_simpson_integrals(
934
+ y, dx, _cumulative_simpson_equal_intervals
935
+ )
936
+
937
+ if initial is not None:
938
+ initial = _ensure_float_array(initial)
939
+ alt_initial_input_shape = tupleset(original_shape, axis, 1)
940
+ message = ("If provided, `initial` must either be a scalar or have the "
941
+ "same shape as `y` but with only 1 point along `axis`.")
942
+ if not (initial.ndim == 0 or initial.shape == alt_initial_input_shape):
943
+ raise ValueError(message)
944
+ initial = np.broadcast_to(initial, alt_initial_input_shape)
945
+ initial = np.swapaxes(initial, axis, -1)
946
+
947
+ res += initial
948
+ res = np.concatenate((initial, res), axis=-1)
949
+
950
+ res = np.swapaxes(res, -1, axis)
951
+ return res
952
+
953
+
954
+ def romb(y, dx=1.0, axis=-1, show=False):
955
+ """
956
+ Romberg integration using samples of a function.
957
+
958
+ Parameters
959
+ ----------
960
+ y : array_like
961
+ A vector of ``2**k + 1`` equally-spaced samples of a function.
962
+ dx : float, optional
963
+ The sample spacing. Default is 1.
964
+ axis : int, optional
965
+ The axis along which to integrate. Default is -1 (last axis).
966
+ show : bool, optional
967
+ When `y` is a single 1-D array, then if this argument is True
968
+ print the table showing Richardson extrapolation from the
969
+ samples. Default is False.
970
+
971
+ Returns
972
+ -------
973
+ romb : ndarray
974
+ The integrated result for `axis`.
975
+
976
+ See Also
977
+ --------
978
+ quad : adaptive quadrature using QUADPACK
979
+ fixed_quad : fixed-order Gaussian quadrature
980
+ dblquad : double integrals
981
+ tplquad : triple integrals
982
+ simpson : integrators for sampled data
983
+ cumulative_trapezoid : cumulative integration for sampled data
984
+
985
+ Examples
986
+ --------
987
+ >>> from scipy import integrate
988
+ >>> import numpy as np
989
+ >>> x = np.arange(10, 14.25, 0.25)
990
+ >>> y = np.arange(3, 12)
991
+
992
+ >>> integrate.romb(y)
993
+ 56.0
994
+
995
+ >>> y = np.sin(np.power(x, 2.5))
996
+ >>> integrate.romb(y)
997
+ -0.742561336672229
998
+
999
+ >>> integrate.romb(y, show=True)
1000
+ Richardson Extrapolation Table for Romberg Integration
1001
+ ======================================================
1002
+ -0.81576
1003
+ 4.63862 6.45674
1004
+ -1.10581 -3.02062 -3.65245
1005
+ -2.57379 -3.06311 -3.06595 -3.05664
1006
+ -1.34093 -0.92997 -0.78776 -0.75160 -0.74256
1007
+ ======================================================
1008
+ -0.742561336672229 # may vary
1009
+
1010
+ """
1011
+ y = np.asarray(y)
1012
+ nd = len(y.shape)
1013
+ Nsamps = y.shape[axis]
1014
+ Ninterv = Nsamps-1
1015
+ n = 1
1016
+ k = 0
1017
+ while n < Ninterv:
1018
+ n <<= 1
1019
+ k += 1
1020
+ if n != Ninterv:
1021
+ raise ValueError("Number of samples must be one plus a "
1022
+ "non-negative power of 2.")
1023
+
1024
+ R = {}
1025
+ slice_all = (slice(None),) * nd
1026
+ slice0 = tupleset(slice_all, axis, 0)
1027
+ slicem1 = tupleset(slice_all, axis, -1)
1028
+ h = Ninterv * np.asarray(dx, dtype=float)
1029
+ R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
1030
+ slice_R = slice_all
1031
+ start = stop = step = Ninterv
1032
+ for i in range(1, k+1):
1033
+ start >>= 1
1034
+ slice_R = tupleset(slice_R, axis, slice(start, stop, step))
1035
+ step >>= 1
1036
+ R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
1037
+ for j in range(1, i+1):
1038
+ prev = R[(i, j-1)]
1039
+ R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
1040
+ h /= 2.0
1041
+
1042
+ if show:
1043
+ if not np.isscalar(R[(0, 0)]):
1044
+ print("*** Printing table only supported for integrals" +
1045
+ " of a single data set.")
1046
+ else:
1047
+ try:
1048
+ precis = show[0]
1049
+ except (TypeError, IndexError):
1050
+ precis = 5
1051
+ try:
1052
+ width = show[1]
1053
+ except (TypeError, IndexError):
1054
+ width = 8
1055
+ formstr = "%%%d.%df" % (width, precis)
1056
+
1057
+ title = "Richardson Extrapolation Table for Romberg Integration"
1058
+ print(title, "=" * len(title), sep="\n", end="\n")
1059
+ for i in range(k+1):
1060
+ for j in range(i+1):
1061
+ print(formstr % R[(i, j)], end=" ")
1062
+ print()
1063
+ print("=" * len(title))
1064
+
1065
+ return R[(k, k)]
1066
+
1067
+ # Romberg quadratures for numeric integration.
1068
+ #
1069
+ # Written by Scott M. Ransom <ransom@cfa.harvard.edu>
1070
+ # last revision: 14 Nov 98
1071
+ #
1072
+ # Cosmetic changes by Konrad Hinsen <hinsen@cnrs-orleans.fr>
1073
+ # last revision: 1999-7-21
1074
+ #
1075
+ # Adapted to SciPy by Travis Oliphant <oliphant.travis@ieee.org>
1076
+ # last revision: Dec 2001
1077
+
1078
+
1079
+ def _difftrap(function, interval, numtraps):
1080
+ """
1081
+ Perform part of the trapezoidal rule to integrate a function.
1082
+ Assume that we had called difftrap with all lower powers-of-2
1083
+ starting with 1. Calling difftrap only returns the summation
1084
+ of the new ordinates. It does _not_ multiply by the width
1085
+ of the trapezoids. This must be performed by the caller.
1086
+ 'function' is the function to evaluate (must accept vector arguments).
1087
+ 'interval' is a sequence with lower and upper limits
1088
+ of integration.
1089
+ 'numtraps' is the number of trapezoids to use (must be a
1090
+ power-of-2).
1091
+ """
1092
+ if numtraps <= 0:
1093
+ raise ValueError("numtraps must be > 0 in difftrap().")
1094
+ elif numtraps == 1:
1095
+ return 0.5*(function(interval[0])+function(interval[1]))
1096
+ else:
1097
+ numtosum = numtraps/2
1098
+ h = float(interval[1]-interval[0])/numtosum
1099
+ lox = interval[0] + 0.5 * h
1100
+ points = lox + h * np.arange(numtosum)
1101
+ s = np.sum(function(points), axis=0)
1102
+ return s
1103
+
1104
+
1105
+ def _romberg_diff(b, c, k):
1106
+ """
1107
+ Compute the differences for the Romberg quadrature corrections.
1108
+ See Forman Acton's "Real Computing Made Real," p 143.
1109
+ """
1110
+ tmp = 4.0**k
1111
+ return (tmp * c - b)/(tmp - 1.0)
1112
+
1113
+
1114
+ def _printresmat(function, interval, resmat):
1115
+ # Print the Romberg result matrix.
1116
+ i = j = 0
1117
+ print('Romberg integration of', repr(function), end=' ')
1118
+ print('from', interval)
1119
+ print('')
1120
+ print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
1121
+ for i in range(len(resmat)):
1122
+ print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
1123
+ for j in range(i+1):
1124
+ print('%9f' % (resmat[i][j]), end=' ')
1125
+ print('')
1126
+ print('')
1127
+ print('The final result is', resmat[i][j], end=' ')
1128
+ print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
1129
+
1130
+
1131
+ @_deprecated("`scipy.integrate.romberg` is deprecated as of SciPy 1.12.0"
1132
+ "and will be removed in SciPy 1.15.0. Please use"
1133
+ "`scipy.integrate.quad` instead.")
1134
+ def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
1135
+ divmax=10, vec_func=False):
1136
+ """
1137
+ Romberg integration of a callable function or method.
1138
+
1139
+ .. deprecated:: 1.12.0
1140
+
1141
+ This function is deprecated as of SciPy 1.12.0 and will be removed
1142
+ in SciPy 1.15.0. Please use `scipy.integrate.quad` instead.
1143
+
1144
+ Returns the integral of `function` (a function of one variable)
1145
+ over the interval (`a`, `b`).
1146
+
1147
+ If `show` is 1, the triangular array of the intermediate results
1148
+ will be printed. If `vec_func` is True (default is False), then
1149
+ `function` is assumed to support vector arguments.
1150
+
1151
+ Parameters
1152
+ ----------
1153
+ function : callable
1154
+ Function to be integrated.
1155
+ a : float
1156
+ Lower limit of integration.
1157
+ b : float
1158
+ Upper limit of integration.
1159
+
1160
+ Returns
1161
+ -------
1162
+ results : float
1163
+ Result of the integration.
1164
+
1165
+ Other Parameters
1166
+ ----------------
1167
+ args : tuple, optional
1168
+ Extra arguments to pass to function. Each element of `args` will
1169
+ be passed as a single argument to `func`. Default is to pass no
1170
+ extra arguments.
1171
+ tol, rtol : float, optional
1172
+ The desired absolute and relative tolerances. Defaults are 1.48e-8.
1173
+ show : bool, optional
1174
+ Whether to print the results. Default is False.
1175
+ divmax : int, optional
1176
+ Maximum order of extrapolation. Default is 10.
1177
+ vec_func : bool, optional
1178
+ Whether `func` handles arrays as arguments (i.e., whether it is a
1179
+ "vector" function). Default is False.
1180
+
1181
+ See Also
1182
+ --------
1183
+ fixed_quad : Fixed-order Gaussian quadrature.
1184
+ quad : Adaptive quadrature using QUADPACK.
1185
+ dblquad : Double integrals.
1186
+ tplquad : Triple integrals.
1187
+ romb : Integrators for sampled data.
1188
+ simpson : Integrators for sampled data.
1189
+ cumulative_trapezoid : Cumulative integration for sampled data.
1190
+
1191
+ References
1192
+ ----------
1193
+ .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
1194
+
1195
+ Examples
1196
+ --------
1197
+ Integrate a gaussian from 0 to 1 and compare to the error function.
1198
+
1199
+ >>> from scipy import integrate
1200
+ >>> from scipy.special import erf
1201
+ >>> import numpy as np
1202
+ >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
1203
+ >>> result = integrate.romberg(gaussian, 0, 1, show=True)
1204
+ Romberg integration of <function vfunc at ...> from [0, 1]
1205
+
1206
+ ::
1207
+
1208
+ Steps StepSize Results
1209
+ 1 1.000000 0.385872
1210
+ 2 0.500000 0.412631 0.421551
1211
+ 4 0.250000 0.419184 0.421368 0.421356
1212
+ 8 0.125000 0.420810 0.421352 0.421350 0.421350
1213
+ 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
1214
+ 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
1215
+
1216
+ The final result is 0.421350396475 after 33 function evaluations.
1217
+
1218
+ >>> print("%g %g" % (2*result, erf(1)))
1219
+ 0.842701 0.842701
1220
+
1221
+ """
1222
+ if np.isinf(a) or np.isinf(b):
1223
+ raise ValueError("Romberg integration only available "
1224
+ "for finite limits.")
1225
+ vfunc = vectorize1(function, args, vec_func=vec_func)
1226
+ n = 1
1227
+ interval = [a, b]
1228
+ intrange = b - a
1229
+ ordsum = _difftrap(vfunc, interval, n)
1230
+ result = intrange * ordsum
1231
+ resmat = [[result]]
1232
+ err = np.inf
1233
+ last_row = resmat[0]
1234
+ for i in range(1, divmax+1):
1235
+ n *= 2
1236
+ ordsum += _difftrap(vfunc, interval, n)
1237
+ row = [intrange * ordsum / n]
1238
+ for k in range(i):
1239
+ row.append(_romberg_diff(last_row[k], row[k], k+1))
1240
+ result = row[i]
1241
+ lastresult = last_row[i-1]
1242
+ if show:
1243
+ resmat.append(row)
1244
+ err = abs(result - lastresult)
1245
+ if err < tol or err < rtol * abs(result):
1246
+ break
1247
+ last_row = row
1248
+ else:
1249
+ warnings.warn(
1250
+ "divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
1251
+ AccuracyWarning, stacklevel=2)
1252
+
1253
+ if show:
1254
+ _printresmat(vfunc, interval, resmat)
1255
+ return result
1256
+
1257
+
1258
+ # Coefficients for Newton-Cotes quadrature
1259
+ #
1260
+ # These are the points being used
1261
+ # to construct the local interpolating polynomial
1262
+ # a are the weights for Newton-Cotes integration
1263
+ # B is the error coefficient.
1264
+ # error in these coefficients grows as N gets larger.
1265
+ # or as samples are closer and closer together
1266
+
1267
+ # You can use maxima to find these rational coefficients
1268
+ # for equally spaced data using the commands
1269
+ # a(i,N) := (integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N)
1270
+ # / ((N-i)! * i!) * (-1)^(N-i));
1271
+ # Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
1272
+ # Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
1273
+ # B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
1274
+ #
1275
+ # pre-computed for equally-spaced weights
1276
+ #
1277
+ # num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
1278
+ #
1279
+ # a = num_a*array(int_a)/den_a
1280
+ # B = num_B*1.0 / den_B
1281
+ #
1282
+ # integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
1283
+ # where k = N // 2
1284
+ #
1285
+ _builtincoeffs = {
1286
+ 1: (1,2,[1,1],-1,12),
1287
+ 2: (1,3,[1,4,1],-1,90),
1288
+ 3: (3,8,[1,3,3,1],-3,80),
1289
+ 4: (2,45,[7,32,12,32,7],-8,945),
1290
+ 5: (5,288,[19,75,50,50,75,19],-275,12096),
1291
+ 6: (1,140,[41,216,27,272,27,216,41],-9,1400),
1292
+ 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
1293
+ 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
1294
+ -2368,467775),
1295
+ 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
1296
+ 15741,2857], -4671, 394240),
1297
+ 10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
1298
+ -260550,272400,-48525,106300,16067],
1299
+ -673175, 163459296),
1300
+ 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
1301
+ 15493566,15493566,-9595542,25226685,-3237113,
1302
+ 13486539,2171465], -2224234463, 237758976000),
1303
+ 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
1304
+ 87516288,-87797136,87516288,-51491295,35725120,
1305
+ -7587864,9903168,1364651], -3012, 875875),
1306
+ 13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
1307
+ 156074417954,-151659573325,206683437987,
1308
+ -43111992612,-43111992612,206683437987,
1309
+ -151659573325,156074417954,-31268252574,
1310
+ 56280729661,8181904909], -2639651053,
1311
+ 344881152000),
1312
+ 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
1313
+ -6625093363,12630121616,-16802270373,19534438464,
1314
+ -16802270373,12630121616,-6625093363,3501442784,
1315
+ -770720657,710986864,90241897], -3740727473,
1316
+ 1275983280000)
1317
+ }
1318
+
1319
+
1320
+ def newton_cotes(rn, equal=0):
1321
+ r"""
1322
+ Return weights and error coefficient for Newton-Cotes integration.
1323
+
1324
+ Suppose we have (N+1) samples of f at the positions
1325
+ x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
1326
+ integral between x_0 and x_N is:
1327
+
1328
+ :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
1329
+ + B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
1330
+
1331
+ where :math:`\xi \in [x_0,x_N]`
1332
+ and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
1333
+
1334
+ If the samples are equally-spaced and N is even, then the error
1335
+ term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
1336
+
1337
+ Parameters
1338
+ ----------
1339
+ rn : int
1340
+ The integer order for equally-spaced data or the relative positions of
1341
+ the samples with the first sample at 0 and the last at N, where N+1 is
1342
+ the length of `rn`. N is the order of the Newton-Cotes integration.
1343
+ equal : int, optional
1344
+ Set to 1 to enforce equally spaced data.
1345
+
1346
+ Returns
1347
+ -------
1348
+ an : ndarray
1349
+ 1-D array of weights to apply to the function at the provided sample
1350
+ positions.
1351
+ B : float
1352
+ Error coefficient.
1353
+
1354
+ Notes
1355
+ -----
1356
+ Normally, the Newton-Cotes rules are used on smaller integration
1357
+ regions and a composite rule is used to return the total integral.
1358
+
1359
+ Examples
1360
+ --------
1361
+ Compute the integral of sin(x) in [0, :math:`\pi`]:
1362
+
1363
+ >>> from scipy.integrate import newton_cotes
1364
+ >>> import numpy as np
1365
+ >>> def f(x):
1366
+ ... return np.sin(x)
1367
+ >>> a = 0
1368
+ >>> b = np.pi
1369
+ >>> exact = 2
1370
+ >>> for N in [2, 4, 6, 8, 10]:
1371
+ ... x = np.linspace(a, b, N + 1)
1372
+ ... an, B = newton_cotes(N, 1)
1373
+ ... dx = (b - a) / N
1374
+ ... quad = dx * np.sum(an * f(x))
1375
+ ... error = abs(quad - exact)
1376
+ ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
1377
+ ...
1378
+ 2 2.094395102 9.43951e-02
1379
+ 4 1.998570732 1.42927e-03
1380
+ 6 2.000017814 1.78136e-05
1381
+ 8 1.999999835 1.64725e-07
1382
+ 10 2.000000001 1.14677e-09
1383
+
1384
+ """
1385
+ try:
1386
+ N = len(rn)-1
1387
+ if equal:
1388
+ rn = np.arange(N+1)
1389
+ elif np.all(np.diff(rn) == 1):
1390
+ equal = 1
1391
+ except Exception:
1392
+ N = rn
1393
+ rn = np.arange(N+1)
1394
+ equal = 1
1395
+
1396
+ if equal and N in _builtincoeffs:
1397
+ na, da, vi, nb, db = _builtincoeffs[N]
1398
+ an = na * np.array(vi, dtype=float) / da
1399
+ return an, float(nb)/db
1400
+
1401
+ if (rn[0] != 0) or (rn[-1] != N):
1402
+ raise ValueError("The sample positions must start at 0"
1403
+ " and end at N")
1404
+ yi = rn / float(N)
1405
+ ti = 2 * yi - 1
1406
+ nvec = np.arange(N+1)
1407
+ C = ti ** nvec[:, np.newaxis]
1408
+ Cinv = np.linalg.inv(C)
1409
+ # improve precision of result
1410
+ for i in range(2):
1411
+ Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
1412
+ vec = 2.0 / (nvec[::2]+1)
1413
+ ai = Cinv[:, ::2].dot(vec) * (N / 2.)
1414
+
1415
+ if (N % 2 == 0) and equal:
1416
+ BN = N/(N+3.)
1417
+ power = N+2
1418
+ else:
1419
+ BN = N/(N+2.)
1420
+ power = N+1
1421
+
1422
+ BN = BN - np.dot(yi**power, ai)
1423
+ p1 = power+1
1424
+ fac = power*math.log(N) - gammaln(p1)
1425
+ fac = math.exp(fac)
1426
+ return ai, BN*fac
1427
+
1428
+
1429
+ def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log):
1430
+
1431
+ # lazy import to avoid issues with partially-initialized submodule
1432
+ if not hasattr(qmc_quad, 'qmc'):
1433
+ from scipy import stats
1434
+ qmc_quad.stats = stats
1435
+ else:
1436
+ stats = qmc_quad.stats
1437
+
1438
+ if not callable(func):
1439
+ message = "`func` must be callable."
1440
+ raise TypeError(message)
1441
+
1442
+ # a, b will be modified, so copy. Oh well if it's copied twice.
1443
+ a = np.atleast_1d(a).copy()
1444
+ b = np.atleast_1d(b).copy()
1445
+ a, b = np.broadcast_arrays(a, b)
1446
+ dim = a.shape[0]
1447
+
1448
+ try:
1449
+ func((a + b) / 2)
1450
+ except Exception as e:
1451
+ message = ("`func` must evaluate the integrand at points within "
1452
+ "the integration range; e.g. `func( (a + b) / 2)` "
1453
+ "must return the integrand at the centroid of the "
1454
+ "integration volume.")
1455
+ raise ValueError(message) from e
1456
+
1457
+ try:
1458
+ func(np.array([a, b]).T)
1459
+ vfunc = func
1460
+ except Exception as e:
1461
+ message = ("Exception encountered when attempting vectorized call to "
1462
+ f"`func`: {e}. For better performance, `func` should "
1463
+ "accept two-dimensional array `x` with shape `(len(a), "
1464
+ "n_points)` and return an array of the integrand value at "
1465
+ "each of the `n_points.")
1466
+ warnings.warn(message, stacklevel=3)
1467
+
1468
+ def vfunc(x):
1469
+ return np.apply_along_axis(func, axis=-1, arr=x)
1470
+
1471
+ n_points_int = np.int64(n_points)
1472
+ if n_points != n_points_int:
1473
+ message = "`n_points` must be an integer."
1474
+ raise TypeError(message)
1475
+
1476
+ n_estimates_int = np.int64(n_estimates)
1477
+ if n_estimates != n_estimates_int:
1478
+ message = "`n_estimates` must be an integer."
1479
+ raise TypeError(message)
1480
+
1481
+ if qrng is None:
1482
+ qrng = stats.qmc.Halton(dim)
1483
+ elif not isinstance(qrng, stats.qmc.QMCEngine):
1484
+ message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine."
1485
+ raise TypeError(message)
1486
+
1487
+ if qrng.d != a.shape[0]:
1488
+ message = ("`qrng` must be initialized with dimensionality equal to "
1489
+ "the number of variables in `a`, i.e., "
1490
+ "`qrng.random().shape[-1]` must equal `a.shape[0]`.")
1491
+ raise ValueError(message)
1492
+
1493
+ rng_seed = getattr(qrng, 'rng_seed', None)
1494
+ rng = stats._qmc.check_random_state(rng_seed)
1495
+
1496
+ if log not in {True, False}:
1497
+ message = "`log` must be boolean (`True` or `False`)."
1498
+ raise TypeError(message)
1499
+
1500
+ return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats)
1501
+
1502
+
1503
+ QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error'])
1504
+
1505
+
1506
+ def qmc_quad(func, a, b, *, n_estimates=8, n_points=1024, qrng=None,
1507
+ log=False):
1508
+ """
1509
+ Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature.
1510
+
1511
+ Parameters
1512
+ ----------
1513
+ func : callable
1514
+ The integrand. Must accept a single argument ``x``, an array which
1515
+ specifies the point(s) at which to evaluate the scalar-valued
1516
+ integrand, and return the value(s) of the integrand.
1517
+ For efficiency, the function should be vectorized to accept an array of
1518
+ shape ``(d, n_points)``, where ``d`` is the number of variables (i.e.
1519
+ the dimensionality of the function domain) and `n_points` is the number
1520
+ of quadrature points, and return an array of shape ``(n_points,)``,
1521
+ the integrand at each quadrature point.
1522
+ a, b : array-like
1523
+ One-dimensional arrays specifying the lower and upper integration
1524
+ limits, respectively, of each of the ``d`` variables.
1525
+ n_estimates, n_points : int, optional
1526
+ `n_estimates` (default: 8) statistically independent QMC samples, each
1527
+ of `n_points` (default: 1024) points, will be generated by `qrng`.
1528
+ The total number of points at which the integrand `func` will be
1529
+ evaluated is ``n_points * n_estimates``. See Notes for details.
1530
+ qrng : `~scipy.stats.qmc.QMCEngine`, optional
1531
+ An instance of the QMCEngine from which to sample QMC points.
1532
+ The QMCEngine must be initialized to a number of dimensions ``d``
1533
+ corresponding with the number of variables ``x1, ..., xd`` passed to
1534
+ `func`.
1535
+ The provided QMCEngine is used to produce the first integral estimate.
1536
+ If `n_estimates` is greater than one, additional QMCEngines are
1537
+ spawned from the first (with scrambling enabled, if it is an option.)
1538
+ If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton`
1539
+ will be initialized with the number of dimensions determine from
1540
+ the length of `a`.
1541
+ log : boolean, default: False
1542
+ When set to True, `func` returns the log of the integrand, and
1543
+ the result object contains the log of the integral.
1544
+
1545
+ Returns
1546
+ -------
1547
+ result : object
1548
+ A result object with attributes:
1549
+
1550
+ integral : float
1551
+ The estimate of the integral.
1552
+ standard_error :
1553
+ The error estimate. See Notes for interpretation.
1554
+
1555
+ Notes
1556
+ -----
1557
+ Values of the integrand at each of the `n_points` points of a QMC sample
1558
+ are used to produce an estimate of the integral. This estimate is drawn
1559
+ from a population of possible estimates of the integral, the value of
1560
+ which we obtain depends on the particular points at which the integral
1561
+ was evaluated. We perform this process `n_estimates` times, each time
1562
+ evaluating the integrand at different scrambled QMC points, effectively
1563
+ drawing i.i.d. random samples from the population of integral estimates.
1564
+ The sample mean :math:`m` of these integral estimates is an
1565
+ unbiased estimator of the true value of the integral, and the standard
1566
+ error of the mean :math:`s` of these estimates may be used to generate
1567
+ confidence intervals using the t distribution with ``n_estimates - 1``
1568
+ degrees of freedom. Perhaps counter-intuitively, increasing `n_points`
1569
+ while keeping the total number of function evaluation points
1570
+ ``n_points * n_estimates`` fixed tends to reduce the actual error, whereas
1571
+ increasing `n_estimates` tends to decrease the error estimate.
1572
+
1573
+ Examples
1574
+ --------
1575
+ QMC quadrature is particularly useful for computing integrals in higher
1576
+ dimensions. An example integrand is the probability density function
1577
+ of a multivariate normal distribution.
1578
+
1579
+ >>> import numpy as np
1580
+ >>> from scipy import stats
1581
+ >>> dim = 8
1582
+ >>> mean = np.zeros(dim)
1583
+ >>> cov = np.eye(dim)
1584
+ >>> def func(x):
1585
+ ... # `multivariate_normal` expects the _last_ axis to correspond with
1586
+ ... # the dimensionality of the space, so `x` must be transposed
1587
+ ... return stats.multivariate_normal.pdf(x.T, mean, cov)
1588
+
1589
+ To compute the integral over the unit hypercube:
1590
+
1591
+ >>> from scipy.integrate import qmc_quad
1592
+ >>> a = np.zeros(dim)
1593
+ >>> b = np.ones(dim)
1594
+ >>> rng = np.random.default_rng()
1595
+ >>> qrng = stats.qmc.Halton(d=dim, seed=rng)
1596
+ >>> n_estimates = 8
1597
+ >>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng)
1598
+ >>> res.integral, res.standard_error
1599
+ (0.00018429555666024108, 1.0389431116001344e-07)
1600
+
1601
+ A two-sided, 99% confidence interval for the integral may be estimated
1602
+ as:
1603
+
1604
+ >>> t = stats.t(df=n_estimates-1, loc=res.integral,
1605
+ ... scale=res.standard_error)
1606
+ >>> t.interval(0.99)
1607
+ (0.0001839319802536469, 0.00018465913306683527)
1608
+
1609
+ Indeed, the value reported by `scipy.stats.multivariate_normal` is
1610
+ within this range.
1611
+
1612
+ >>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a)
1613
+ 0.00018430867675187443
1614
+
1615
+ """
1616
+ args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log)
1617
+ func, a, b, n_points, n_estimates, qrng, rng, log, stats = args
1618
+
1619
+ def sum_product(integrands, dA, log=False):
1620
+ if log:
1621
+ return logsumexp(integrands) + np.log(dA)
1622
+ else:
1623
+ return np.sum(integrands * dA)
1624
+
1625
+ def mean(estimates, log=False):
1626
+ if log:
1627
+ return logsumexp(estimates) - np.log(n_estimates)
1628
+ else:
1629
+ return np.mean(estimates)
1630
+
1631
+ def std(estimates, m=None, ddof=0, log=False):
1632
+ m = m or mean(estimates, log)
1633
+ if log:
1634
+ estimates, m = np.broadcast_arrays(estimates, m)
1635
+ temp = np.vstack((estimates, m + np.pi * 1j))
1636
+ diff = logsumexp(temp, axis=0)
1637
+ return np.real(0.5 * (logsumexp(2 * diff)
1638
+ - np.log(n_estimates - ddof)))
1639
+ else:
1640
+ return np.std(estimates, ddof=ddof)
1641
+
1642
+ def sem(estimates, m=None, s=None, log=False):
1643
+ m = m or mean(estimates, log)
1644
+ s = s or std(estimates, m, ddof=1, log=log)
1645
+ if log:
1646
+ return s - 0.5*np.log(n_estimates)
1647
+ else:
1648
+ return s / np.sqrt(n_estimates)
1649
+
1650
+ # The sign of the integral depends on the order of the limits. Fix this by
1651
+ # ensuring that lower bounds are indeed lower and setting sign of resulting
1652
+ # integral manually
1653
+ if np.any(a == b):
1654
+ message = ("A lower limit was equal to an upper limit, so the value "
1655
+ "of the integral is zero by definition.")
1656
+ warnings.warn(message, stacklevel=2)
1657
+ return QMCQuadResult(-np.inf if log else 0, 0)
1658
+
1659
+ i_swap = b < a
1660
+ sign = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative
1661
+ a[i_swap], b[i_swap] = b[i_swap], a[i_swap]
1662
+
1663
+ A = np.prod(b - a)
1664
+ dA = A / n_points
1665
+
1666
+ estimates = np.zeros(n_estimates)
1667
+ rngs = _rng_spawn(qrng.rng, n_estimates)
1668
+ for i in range(n_estimates):
1669
+ # Generate integral estimate
1670
+ sample = qrng.random(n_points)
1671
+ # The rationale for transposing is that this allows users to easily
1672
+ # unpack `x` into separate variables, if desired. This is consistent
1673
+ # with the `xx` array passed into the `scipy.integrate.nquad` `func`.
1674
+ x = stats.qmc.scale(sample, a, b).T # (n_dim, n_points)
1675
+ integrands = func(x)
1676
+ estimates[i] = sum_product(integrands, dA, log)
1677
+
1678
+ # Get a new, independently-scrambled QRNG for next time
1679
+ qrng = type(qrng)(seed=rngs[i], **qrng._init_quad)
1680
+
1681
+ integral = mean(estimates, log)
1682
+ standard_error = sem(estimates, m=integral, log=log)
1683
+ integral = integral + np.pi*1j if (log and sign < 0) else integral*sign
1684
+ return QMCQuadResult(integral, standard_error)
parrot/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py ADDED
@@ -0,0 +1,1231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import numpy as np
3
+ from scipy import special
4
+ import scipy._lib._elementwise_iterative_method as eim
5
+ from scipy._lib._util import _RichResult
6
+
7
+ # todo:
8
+ # figure out warning situation
9
+ # address https://github.com/scipy/scipy/pull/18650#discussion_r1233032521
10
+ # without `minweight`, we are also suppressing infinities within the interval.
11
+ # Is that OK? If so, we can probably get rid of `status=3`.
12
+ # Add heuristic to stop when improvement is too slow / antithrashing
13
+ # support singularities? interval subdivision? this feature will be added
14
+ # eventually, but do we adjust the interface now?
15
+ # When doing log-integration, should the tolerances control the error of the
16
+ # log-integral or the error of the integral? The trouble is that `log`
17
+ # inherently looses some precision so it may not be possible to refine
18
+ # the integral further. Example: 7th moment of stats.f(15, 20)
19
+ # respect function evaluation limit?
20
+ # make public?
21
+
22
+
23
+ def _tanhsinh(f, a, b, *, args=(), log=False, maxfun=None, maxlevel=None,
24
+ minlevel=2, atol=None, rtol=None, preserve_shape=False,
25
+ callback=None):
26
+ """Evaluate a convergent integral numerically using tanh-sinh quadrature.
27
+
28
+ In practice, tanh-sinh quadrature achieves quadratic convergence for
29
+ many integrands: the number of accurate *digits* scales roughly linearly
30
+ with the number of function evaluations [1]_.
31
+
32
+ Either or both of the limits of integration may be infinite, and
33
+ singularities at the endpoints are acceptable. Divergent integrals and
34
+ integrands with non-finite derivatives or singularities within an interval
35
+ are out of scope, but the latter may be evaluated be calling `_tanhsinh` on
36
+ each sub-interval separately.
37
+
38
+ Parameters
39
+ ----------
40
+ f : callable
41
+ The function to be integrated. The signature must be::
42
+ func(x: ndarray, *fargs) -> ndarray
43
+ where each element of ``x`` is a finite real and ``fargs`` is a tuple,
44
+ which may contain an arbitrary number of arrays that are broadcastable
45
+ with `x`. ``func`` must be an elementwise-scalar function; see
46
+ documentation of parameter `preserve_shape` for details.
47
+ If ``func`` returns a value with complex dtype when evaluated at
48
+ either endpoint, subsequent arguments ``x`` will have complex dtype
49
+ (but zero imaginary part).
50
+ a, b : array_like
51
+ Real lower and upper limits of integration. Must be broadcastable.
52
+ Elements may be infinite.
53
+ args : tuple, optional
54
+ Additional positional arguments to be passed to `func`. Must be arrays
55
+ broadcastable with `a` and `b`. If the callable to be integrated
56
+ requires arguments that are not broadcastable with `a` and `b`, wrap
57
+ that callable with `f`. See Examples.
58
+ log : bool, default: False
59
+ Setting to True indicates that `f` returns the log of the integrand
60
+ and that `atol` and `rtol` are expressed as the logs of the absolute
61
+ and relative errors. In this case, the result object will contain the
62
+ log of the integral and error. This is useful for integrands for which
63
+ numerical underflow or overflow would lead to inaccuracies.
64
+ When ``log=True``, the integrand (the exponential of `f`) must be real,
65
+ but it may be negative, in which case the log of the integrand is a
66
+ complex number with an imaginary part that is an odd multiple of π.
67
+ maxlevel : int, default: 10
68
+ The maximum refinement level of the algorithm.
69
+
70
+ At the zeroth level, `f` is called once, performing 16 function
71
+ evaluations. At each subsequent level, `f` is called once more,
72
+ approximately doubling the number of function evaluations that have
73
+ been performed. Accordingly, for many integrands, each successive level
74
+ will double the number of accurate digits in the result (up to the
75
+ limits of floating point precision).
76
+
77
+ The algorithm will terminate after completing level `maxlevel` or after
78
+ another termination condition is satisfied, whichever comes first.
79
+ minlevel : int, default: 2
80
+ The level at which to begin iteration (default: 2). This does not
81
+ change the total number of function evaluations or the abscissae at
82
+ which the function is evaluated; it changes only the *number of times*
83
+ `f` is called. If ``minlevel=k``, then the integrand is evaluated at
84
+ all abscissae from levels ``0`` through ``k`` in a single call.
85
+ Note that if `minlevel` exceeds `maxlevel`, the provided `minlevel` is
86
+ ignored, and `minlevel` is set equal to `maxlevel`.
87
+ atol, rtol : float, optional
88
+ Absolute termination tolerance (default: 0) and relative termination
89
+ tolerance (default: ``eps**0.75``, where ``eps`` is the precision of
90
+ the result dtype), respectively. The error estimate is as
91
+ described in [1]_ Section 5. While not theoretically rigorous or
92
+ conservative, it is said to work well in practice. Must be non-negative
93
+ and finite if `log` is False, and must be expressed as the log of a
94
+ non-negative and finite number if `log` is True.
95
+ preserve_shape : bool, default: False
96
+ In the following, "arguments of `f`" refers to the array ``x`` and
97
+ any arrays within ``fargs``. Let ``shape`` be the broadcasted shape
98
+ of `a`, `b`, and all elements of `args` (which is conceptually
99
+ distinct from ``fargs`` passed into `f`).
100
+
101
+ - When ``preserve_shape=False`` (default), `f` must accept arguments
102
+ of *any* broadcastable shapes.
103
+
104
+ - When ``preserve_shape=True``, `f` must accept arguments of shape
105
+ ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of
106
+ abscissae at which the function is being evaluated.
107
+
108
+ In either case, for each scalar element ``xi`` within `x`, the array
109
+ returned by `f` must include the scalar ``f(xi)`` at the same index.
110
+ Consequently, the shape of the output is always the shape of the input
111
+ ``x``.
112
+
113
+ See Examples.
114
+
115
+ callback : callable, optional
116
+ An optional user-supplied function to be called before the first
117
+ iteration and after each iteration.
118
+ Called as ``callback(res)``, where ``res`` is a ``_RichResult``
119
+ similar to that returned by `_differentiate` (but containing the
120
+ current iterate's values of all variables). If `callback` raises a
121
+ ``StopIteration``, the algorithm will terminate immediately and
122
+ `_tanhsinh` will return a result object.
123
+
124
+ Returns
125
+ -------
126
+ res : _RichResult
127
+ An instance of `scipy._lib._util._RichResult` with the following
128
+ attributes. (The descriptions are written as though the values will be
129
+ scalars; however, if `func` returns an array, the outputs will be
130
+ arrays of the same shape.)
131
+ success : bool
132
+ ``True`` when the algorithm terminated successfully (status ``0``).
133
+ status : int
134
+ An integer representing the exit status of the algorithm.
135
+ ``0`` : The algorithm converged to the specified tolerances.
136
+ ``-1`` : (unused)
137
+ ``-2`` : The maximum number of iterations was reached.
138
+ ``-3`` : A non-finite value was encountered.
139
+ ``-4`` : Iteration was terminated by `callback`.
140
+ ``1`` : The algorithm is proceeding normally (in `callback` only).
141
+ integral : float
142
+ An estimate of the integral
143
+ error : float
144
+ An estimate of the error. Only available if level two or higher
145
+ has been completed; otherwise NaN.
146
+ maxlevel : int
147
+ The maximum refinement level used.
148
+ nfev : int
149
+ The number of points at which `func` was evaluated.
150
+
151
+ See Also
152
+ --------
153
+ quad, quadrature
154
+
155
+ Notes
156
+ -----
157
+ Implements the algorithm as described in [1]_ with minor adaptations for
158
+ finite-precision arithmetic, including some described by [2]_ and [3]_. The
159
+ tanh-sinh scheme was originally introduced in [4]_.
160
+
161
+ Due to floating-point error in the abscissae, the function may be evaluated
162
+ at the endpoints of the interval during iterations. The values returned by
163
+ the function at the endpoints will be ignored.
164
+
165
+ References
166
+ ----------
167
+ [1] Bailey, David H., Karthik Jeyabalan, and Xiaoye S. Li. "A comparison of
168
+ three high-precision quadrature schemes." Experimental Mathematics 14.3
169
+ (2005): 317-329.
170
+ [2] Vanherck, Joren, Bart Sorée, and Wim Magnus. "Tanh-sinh quadrature for
171
+ single and multiple integration using floating-point arithmetic."
172
+ arXiv preprint arXiv:2007.15057 (2020).
173
+ [3] van Engelen, Robert A. "Improving the Double Exponential Quadrature
174
+ Tanh-Sinh, Sinh-Sinh and Exp-Sinh Formulas."
175
+ https://www.genivia.com/files/qthsh.pdf
176
+ [4] Takahasi, Hidetosi, and Masatake Mori. "Double exponential formulas for
177
+ numerical integration." Publications of the Research Institute for
178
+ Mathematical Sciences 9.3 (1974): 721-741.
179
+
180
+ Example
181
+ -------
182
+ Evaluate the Gaussian integral:
183
+
184
+ >>> import numpy as np
185
+ >>> from scipy.integrate._tanhsinh import _tanhsinh
186
+ >>> def f(x):
187
+ ... return np.exp(-x**2)
188
+ >>> res = _tanhsinh(f, -np.inf, np.inf)
189
+ >>> res.integral # true value is np.sqrt(np.pi), 1.7724538509055159
190
+ 1.7724538509055159
191
+ >>> res.error # actual error is 0
192
+ 4.0007963937534104e-16
193
+
194
+ The value of the Gaussian function (bell curve) is nearly zero for
195
+ arguments sufficiently far from zero, so the value of the integral
196
+ over a finite interval is nearly the same.
197
+
198
+ >>> _tanhsinh(f, -20, 20).integral
199
+ 1.772453850905518
200
+
201
+ However, with unfavorable integration limits, the integration scheme
202
+ may not be able to find the important region.
203
+
204
+ >>> _tanhsinh(f, -np.inf, 1000).integral
205
+ 4.500490856620352
206
+
207
+ In such cases, or when there are singularities within the interval,
208
+ break the integral into parts with endpoints at the important points.
209
+
210
+ >>> _tanhsinh(f, -np.inf, 0).integral + _tanhsinh(f, 0, 1000).integral
211
+ 1.772453850905404
212
+
213
+ For integration involving very large or very small magnitudes, use
214
+ log-integration. (For illustrative purposes, the following example shows a
215
+ case in which both regular and log-integration work, but for more extreme
216
+ limits of integration, log-integration would avoid the underflow
217
+ experienced when evaluating the integral normally.)
218
+
219
+ >>> res = _tanhsinh(f, 20, 30, rtol=1e-10)
220
+ >>> res.integral, res.error
221
+ 4.7819613911309014e-176, 4.670364401645202e-187
222
+ >>> def log_f(x):
223
+ ... return -x**2
224
+ >>> np.exp(res.integral), np.exp(res.error)
225
+ 4.7819613911306924e-176, 4.670364401645093e-187
226
+
227
+ The limits of integration and elements of `args` may be broadcastable
228
+ arrays, and integration is performed elementwise.
229
+
230
+ >>> from scipy import stats
231
+ >>> dist = stats.gausshyper(13.8, 3.12, 2.51, 5.18)
232
+ >>> a, b = dist.support()
233
+ >>> x = np.linspace(a, b, 100)
234
+ >>> res = _tanhsinh(dist.pdf, a, x)
235
+ >>> ref = dist.cdf(x)
236
+ >>> np.allclose(res.integral, ref)
237
+
238
+ By default, `preserve_shape` is False, and therefore the callable
239
+ `f` may be called with arrays of any broadcastable shapes.
240
+ For example:
241
+
242
+ >>> shapes = []
243
+ >>> def f(x, c):
244
+ ... shape = np.broadcast_shapes(x.shape, c.shape)
245
+ ... shapes.append(shape)
246
+ ... return np.sin(c*x)
247
+ >>>
248
+ >>> c = [1, 10, 30, 100]
249
+ >>> res = _tanhsinh(f, 0, 1, args=(c,), minlevel=1)
250
+ >>> shapes
251
+ [(4,), (4, 66), (3, 64), (2, 128), (1, 256)]
252
+
253
+ To understand where these shapes are coming from - and to better
254
+ understand how `_tanhsinh` computes accurate results - note that
255
+ higher values of ``c`` correspond with higher frequency sinusoids.
256
+ The higher frequency sinusoids make the integrand more complicated,
257
+ so more function evaluations are required to achieve the target
258
+ accuracy:
259
+
260
+ >>> res.nfev
261
+ array([ 67, 131, 259, 515])
262
+
263
+ The initial ``shape``, ``(4,)``, corresponds with evaluating the
264
+ integrand at a single abscissa and all four frequencies; this is used
265
+ for input validation and to determine the size and dtype of the arrays
266
+ that store results. The next shape corresponds with evaluating the
267
+ integrand at an initial grid of abscissae and all four frequencies.
268
+ Successive calls to the function double the total number of abscissae at
269
+ which the function has been evaluated. However, in later function
270
+ evaluations, the integrand is evaluated at fewer frequencies because
271
+ the corresponding integral has already converged to the required
272
+ tolerance. This saves function evaluations to improve performance, but
273
+ it requires the function to accept arguments of any shape.
274
+
275
+ "Vector-valued" integrands, such as those written for use with
276
+ `scipy.integrate.quad_vec`, are unlikely to satisfy this requirement.
277
+ For example, consider
278
+
279
+ >>> def f(x):
280
+ ... return [x, np.sin(10*x), np.cos(30*x), x*np.sin(100*x)**2]
281
+
282
+ This integrand is not compatible with `_tanhsinh` as written; for instance,
283
+ the shape of the output will not be the same as the shape of ``x``. Such a
284
+ function *could* be converted to a compatible form with the introduction of
285
+ additional parameters, but this would be inconvenient. In such cases,
286
+ a simpler solution would be to use `preserve_shape`.
287
+
288
+ >>> shapes = []
289
+ >>> def f(x):
290
+ ... shapes.append(x.shape)
291
+ ... x0, x1, x2, x3 = x
292
+ ... return [x0, np.sin(10*x1), np.cos(30*x2), x3*np.sin(100*x3)]
293
+ >>>
294
+ >>> a = np.zeros(4)
295
+ >>> res = _tanhsinh(f, a, 1, preserve_shape=True)
296
+ >>> shapes
297
+ [(4,), (4, 66), (4, 64), (4, 128), (4, 256)]
298
+
299
+ Here, the broadcasted shape of `a` and `b` is ``(4,)``. With
300
+ ``preserve_shape=True``, the function may be called with argument
301
+ ``x`` of shape ``(4,)`` or ``(4, n)``, and this is what we observe.
302
+
303
+ """
304
+ (f, a, b, log, maxfun, maxlevel, minlevel,
305
+ atol, rtol, args, preserve_shape, callback) = _tanhsinh_iv(
306
+ f, a, b, log, maxfun, maxlevel, minlevel, atol,
307
+ rtol, args, preserve_shape, callback)
308
+
309
+ # Initialization
310
+ # `eim._initialize` does several important jobs, including
311
+ # ensuring that limits, each of the `args`, and the output of `f`
312
+ # broadcast correctly and are of consistent types. To save a function
313
+ # evaluation, I pass the midpoint of the integration interval. This comes
314
+ # at a cost of some gymnastics to ensure that the midpoint has the right
315
+ # shape and dtype. Did you know that 0d and >0d arrays follow different
316
+ # type promotion rules?
317
+ with np.errstate(over='ignore', invalid='ignore', divide='ignore'):
318
+ c = ((a.ravel() + b.ravel())/2).reshape(a.shape)
319
+ inf_a, inf_b = np.isinf(a), np.isinf(b)
320
+ c[inf_a] = b[inf_a] - 1 # takes care of infinite a
321
+ c[inf_b] = a[inf_b] + 1 # takes care of infinite b
322
+ c[inf_a & inf_b] = 0 # takes care of infinite a and b
323
+ temp = eim._initialize(f, (c,), args, complex_ok=True,
324
+ preserve_shape=preserve_shape)
325
+ f, xs, fs, args, shape, dtype, xp = temp
326
+ a = np.broadcast_to(a, shape).astype(dtype).ravel()
327
+ b = np.broadcast_to(b, shape).astype(dtype).ravel()
328
+
329
+ # Transform improper integrals
330
+ a, b, a0, negative, abinf, ainf, binf = _transform_integrals(a, b)
331
+
332
+ # Define variables we'll need
333
+ nit, nfev = 0, 1 # one function evaluation performed above
334
+ zero = -np.inf if log else 0
335
+ pi = dtype.type(np.pi)
336
+ maxiter = maxlevel - minlevel + 1
337
+ eps = np.finfo(dtype).eps
338
+ if rtol is None:
339
+ rtol = 0.75*np.log(eps) if log else eps**0.75
340
+
341
+ Sn = np.full(shape, zero, dtype=dtype).ravel() # latest integral estimate
342
+ Sn[np.isnan(a) | np.isnan(b) | np.isnan(fs[0])] = np.nan
343
+ Sk = np.empty_like(Sn).reshape(-1, 1)[:, 0:0] # all integral estimates
344
+ aerr = np.full(shape, np.nan, dtype=dtype).ravel() # absolute error
345
+ status = np.full(shape, eim._EINPROGRESS, dtype=int).ravel()
346
+ h0 = np.real(_get_base_step(dtype=dtype)) # base step
347
+
348
+ # For term `d4` of error estimate ([1] Section 5), we need to keep the
349
+ # most extreme abscissae and corresponding `fj`s, `wj`s in Euler-Maclaurin
350
+ # sum. Here, we initialize these variables.
351
+ xr0 = np.full(shape, -np.inf, dtype=dtype).ravel()
352
+ fr0 = np.full(shape, np.nan, dtype=dtype).ravel()
353
+ wr0 = np.zeros(shape, dtype=dtype).ravel()
354
+ xl0 = np.full(shape, np.inf, dtype=dtype).ravel()
355
+ fl0 = np.full(shape, np.nan, dtype=dtype).ravel()
356
+ wl0 = np.zeros(shape, dtype=dtype).ravel()
357
+ d4 = np.zeros(shape, dtype=dtype).ravel()
358
+
359
+ work = _RichResult(
360
+ Sn=Sn, Sk=Sk, aerr=aerr, h=h0, log=log, dtype=dtype, pi=pi, eps=eps,
361
+ a=a.reshape(-1, 1), b=b.reshape(-1, 1), # integration limits
362
+ n=minlevel, nit=nit, nfev=nfev, status=status, # iter/eval counts
363
+ xr0=xr0, fr0=fr0, wr0=wr0, xl0=xl0, fl0=fl0, wl0=wl0, d4=d4, # err est
364
+ ainf=ainf, binf=binf, abinf=abinf, a0=a0.reshape(-1, 1)) # transforms
365
+ # Constant scalars don't need to be put in `work` unless they need to be
366
+ # passed outside `tanhsinh`. Examples: atol, rtol, h0, minlevel.
367
+
368
+ # Correspondence between terms in the `work` object and the result
369
+ res_work_pairs = [('status', 'status'), ('integral', 'Sn'),
370
+ ('error', 'aerr'), ('nit', 'nit'), ('nfev', 'nfev')]
371
+
372
+ def pre_func_eval(work):
373
+ # Determine abscissae at which to evaluate `f`
374
+ work.h = h0 / 2**work.n
375
+ xjc, wj = _get_pairs(work.n, h0, dtype=work.dtype,
376
+ inclusive=(work.n == minlevel))
377
+ work.xj, work.wj = _transform_to_limits(xjc, wj, work.a, work.b)
378
+
379
+ # Perform abscissae substitutions for infinite limits of integration
380
+ xj = work.xj.copy()
381
+ xj[work.abinf] = xj[work.abinf] / (1 - xj[work.abinf]**2)
382
+ xj[work.binf] = 1/xj[work.binf] - 1 + work.a0[work.binf]
383
+ xj[work.ainf] *= -1
384
+ return xj
385
+
386
+ def post_func_eval(x, fj, work):
387
+ # Weight integrand as required by substitutions for infinite limits
388
+ if work.log:
389
+ fj[work.abinf] += (np.log(1 + work.xj[work.abinf] ** 2)
390
+ - 2*np.log(1 - work.xj[work.abinf] ** 2))
391
+ fj[work.binf] -= 2 * np.log(work.xj[work.binf])
392
+ else:
393
+ fj[work.abinf] *= ((1 + work.xj[work.abinf]**2) /
394
+ (1 - work.xj[work.abinf]**2)**2)
395
+ fj[work.binf] *= work.xj[work.binf]**-2.
396
+
397
+ # Estimate integral with Euler-Maclaurin Sum
398
+ fjwj, Sn = _euler_maclaurin_sum(fj, work)
399
+ if work.Sk.shape[-1]:
400
+ Snm1 = work.Sk[:, -1]
401
+ Sn = (special.logsumexp([Snm1 - np.log(2), Sn], axis=0) if log
402
+ else Snm1 / 2 + Sn)
403
+
404
+ work.fjwj = fjwj
405
+ work.Sn = Sn
406
+
407
+ def check_termination(work):
408
+ """Terminate due to convergence or encountering non-finite values"""
409
+ stop = np.zeros(work.Sn.shape, dtype=bool)
410
+
411
+ # Terminate before first iteration if integration limits are equal
412
+ if work.nit == 0:
413
+ i = (work.a == work.b).ravel() # ravel singleton dimension
414
+ zero = -np.inf if log else 0
415
+ work.Sn[i] = zero
416
+ work.aerr[i] = zero
417
+ work.status[i] = eim._ECONVERGED
418
+ stop[i] = True
419
+ else:
420
+ # Terminate if convergence criterion is met
421
+ work.rerr, work.aerr = _estimate_error(work)
422
+ i = ((work.rerr < rtol) | (work.rerr + np.real(work.Sn) < atol) if log
423
+ else (work.rerr < rtol) | (work.rerr * abs(work.Sn) < atol))
424
+ work.status[i] = eim._ECONVERGED
425
+ stop[i] = True
426
+
427
+ # Terminate if integral estimate becomes invalid
428
+ if log:
429
+ i = (np.isposinf(np.real(work.Sn)) | np.isnan(work.Sn)) & ~stop
430
+ else:
431
+ i = ~np.isfinite(work.Sn) & ~stop
432
+ work.status[i] = eim._EVALUEERR
433
+ stop[i] = True
434
+
435
+ return stop
436
+
437
+ def post_termination_check(work):
438
+ work.n += 1
439
+ work.Sk = np.concatenate((work.Sk, work.Sn[:, np.newaxis]), axis=-1)
440
+ return
441
+
442
+ def customize_result(res, shape):
443
+ # If the integration limits were such that b < a, we reversed them
444
+ # to perform the calculation, and the final result needs to be negated.
445
+ if log and np.any(negative):
446
+ pi = res['integral'].dtype.type(np.pi)
447
+ j = np.complex64(1j) # minimum complex type
448
+ res['integral'] = res['integral'] + negative*pi*j
449
+ else:
450
+ res['integral'][negative] *= -1
451
+
452
+ # For this algorithm, it seems more appropriate to report the maximum
453
+ # level rather than the number of iterations in which it was performed.
454
+ res['maxlevel'] = minlevel + res['nit'] - 1
455
+ res['maxlevel'][res['nit'] == 0] = -1
456
+ del res['nit']
457
+ return shape
458
+
459
+ # Suppress all warnings initially, since there are many places in the code
460
+ # for which this is expected behavior.
461
+ with np.errstate(over='ignore', invalid='ignore', divide='ignore'):
462
+ res = eim._loop(work, callback, shape, maxiter, f, args, dtype, pre_func_eval,
463
+ post_func_eval, check_termination, post_termination_check,
464
+ customize_result, res_work_pairs, xp, preserve_shape)
465
+ return res
466
+
467
+
468
+ def _get_base_step(dtype=np.float64):
469
+ # Compute the base step length for the provided dtype. Theoretically, the
470
+ # Euler-Maclaurin sum is infinite, but it gets cut off when either the
471
+ # weights underflow or the abscissae cannot be distinguished from the
472
+ # limits of integration. The latter happens to occur first for float32 and
473
+ # float64, and it occurs when `xjc` (the abscissa complement)
474
+ # in `_compute_pair` underflows. We can solve for the argument `tmax` at
475
+ # which it will underflow using [2] Eq. 13.
476
+ fmin = 4*np.finfo(dtype).tiny # stay a little away from the limit
477
+ tmax = np.arcsinh(np.log(2/fmin - 1) / np.pi)
478
+
479
+ # Based on this, we can choose a base step size `h` for level 0.
480
+ # The number of function evaluations will be `2 + m*2^(k+1)`, where `k` is
481
+ # the level and `m` is an integer we get to choose. I choose
482
+ # m = _N_BASE_STEPS = `8` somewhat arbitrarily, but a rationale is that a
483
+ # power of 2 makes floating point arithmetic more predictable. It also
484
+ # results in a base step size close to `1`, which is what [1] uses (and I
485
+ # used here until I found [2] and these ideas settled).
486
+ h0 = tmax / _N_BASE_STEPS
487
+ return h0.astype(dtype)
488
+
489
+
490
+ _N_BASE_STEPS = 8
491
+
492
+
493
+ def _compute_pair(k, h0):
494
+ # Compute the abscissa-weight pairs for each level k. See [1] page 9.
495
+
496
+ # For now, we compute and store in 64-bit precision. If higher-precision
497
+ # data types become better supported, it would be good to compute these
498
+ # using the highest precision available. Or, once there is an Array API-
499
+ # compatible arbitrary precision array, we can compute at the required
500
+ # precision.
501
+
502
+ # "....each level k of abscissa-weight pairs uses h = 2 **-k"
503
+ # We adapt to floating point arithmetic using ideas of [2].
504
+ h = h0 / 2**k
505
+ max = _N_BASE_STEPS * 2**k
506
+
507
+ # For iterations after the first, "....the integrand function needs to be
508
+ # evaluated only at the odd-indexed abscissas at each level."
509
+ j = np.arange(max+1) if k == 0 else np.arange(1, max+1, 2)
510
+ jh = j * h
511
+
512
+ # "In this case... the weights wj = u1/cosh(u2)^2, where..."
513
+ pi_2 = np.pi / 2
514
+ u1 = pi_2*np.cosh(jh)
515
+ u2 = pi_2*np.sinh(jh)
516
+ # Denominators get big here. Overflow then underflow doesn't need warning.
517
+ # with np.errstate(under='ignore', over='ignore'):
518
+ wj = u1 / np.cosh(u2)**2
519
+ # "We actually store 1-xj = 1/(...)."
520
+ xjc = 1 / (np.exp(u2) * np.cosh(u2)) # complement of xj = np.tanh(u2)
521
+
522
+ # When level k == 0, the zeroth xj corresponds with xj = 0. To simplify
523
+ # code, the function will be evaluated there twice; each gets half weight.
524
+ wj[0] = wj[0] / 2 if k == 0 else wj[0]
525
+
526
+ return xjc, wj # store at full precision
527
+
528
+
529
+ def _pair_cache(k, h0):
530
+ # Cache the abscissa-weight pairs up to a specified level.
531
+ # Abscissae and weights of consecutive levels are concatenated.
532
+ # `index` records the indices that correspond with each level:
533
+ # `xjc[index[k]:index[k+1]` extracts the level `k` abscissae.
534
+ if h0 != _pair_cache.h0:
535
+ _pair_cache.xjc = np.empty(0)
536
+ _pair_cache.wj = np.empty(0)
537
+ _pair_cache.indices = [0]
538
+
539
+ xjcs = [_pair_cache.xjc]
540
+ wjs = [_pair_cache.wj]
541
+
542
+ for i in range(len(_pair_cache.indices)-1, k + 1):
543
+ xjc, wj = _compute_pair(i, h0)
544
+ xjcs.append(xjc)
545
+ wjs.append(wj)
546
+ _pair_cache.indices.append(_pair_cache.indices[-1] + len(xjc))
547
+
548
+ _pair_cache.xjc = np.concatenate(xjcs)
549
+ _pair_cache.wj = np.concatenate(wjs)
550
+ _pair_cache.h0 = h0
551
+
552
+ _pair_cache.xjc = np.empty(0)
553
+ _pair_cache.wj = np.empty(0)
554
+ _pair_cache.indices = [0]
555
+ _pair_cache.h0 = None
556
+
557
+
558
+ def _get_pairs(k, h0, inclusive=False, dtype=np.float64):
559
+ # Retrieve the specified abscissa-weight pairs from the cache
560
+ # If `inclusive`, return all up to and including the specified level
561
+ if len(_pair_cache.indices) <= k+2 or h0 != _pair_cache.h0:
562
+ _pair_cache(k, h0)
563
+
564
+ xjc = _pair_cache.xjc
565
+ wj = _pair_cache.wj
566
+ indices = _pair_cache.indices
567
+
568
+ start = 0 if inclusive else indices[k]
569
+ end = indices[k+1]
570
+
571
+ return xjc[start:end].astype(dtype), wj[start:end].astype(dtype)
572
+
573
+
574
+ def _transform_to_limits(xjc, wj, a, b):
575
+ # Transform integral according to user-specified limits. This is just
576
+ # math that follows from the fact that the standard limits are (-1, 1).
577
+ # Note: If we had stored xj instead of xjc, we would have
578
+ # xj = alpha * xj + beta, where beta = (a + b)/2
579
+ alpha = (b - a) / 2
580
+ xj = np.concatenate((-alpha * xjc + b, alpha * xjc + a), axis=-1)
581
+ wj = wj*alpha # arguments get broadcasted, so we can't use *=
582
+ wj = np.concatenate((wj, wj), axis=-1)
583
+
584
+ # Points at the boundaries can be generated due to finite precision
585
+ # arithmetic, but these function values aren't supposed to be included in
586
+ # the Euler-Maclaurin sum. Ideally we wouldn't evaluate the function at
587
+ # these points; however, we can't easily filter out points since this
588
+ # function is vectorized. Instead, zero the weights.
589
+ invalid = (xj <= a) | (xj >= b)
590
+ wj[invalid] = 0
591
+ return xj, wj
592
+
593
+
594
+ def _euler_maclaurin_sum(fj, work):
595
+ # Perform the Euler-Maclaurin Sum, [1] Section 4
596
+
597
+ # The error estimate needs to know the magnitude of the last term
598
+ # omitted from the Euler-Maclaurin sum. This is a bit involved because
599
+ # it may have been computed at a previous level. I sure hope it's worth
600
+ # all the trouble.
601
+ xr0, fr0, wr0 = work.xr0, work.fr0, work.wr0
602
+ xl0, fl0, wl0 = work.xl0, work.fl0, work.wl0
603
+
604
+ # It is much more convenient to work with the transposes of our work
605
+ # variables here.
606
+ xj, fj, wj = work.xj.T, fj.T, work.wj.T
607
+ n_x, n_active = xj.shape # number of abscissae, number of active elements
608
+
609
+ # We'll work with the left and right sides separately
610
+ xr, xl = xj.reshape(2, n_x // 2, n_active).copy() # this gets modified
611
+ fr, fl = fj.reshape(2, n_x // 2, n_active)
612
+ wr, wl = wj.reshape(2, n_x // 2, n_active)
613
+
614
+ invalid_r = ~np.isfinite(fr) | (wr == 0)
615
+ invalid_l = ~np.isfinite(fl) | (wl == 0)
616
+
617
+ # integer index of the maximum abscissa at this level
618
+ xr[invalid_r] = -np.inf
619
+ ir = np.argmax(xr, axis=0, keepdims=True)
620
+ # abscissa, function value, and weight at this index
621
+ xr_max = np.take_along_axis(xr, ir, axis=0)[0]
622
+ fr_max = np.take_along_axis(fr, ir, axis=0)[0]
623
+ wr_max = np.take_along_axis(wr, ir, axis=0)[0]
624
+ # boolean indices at which maximum abscissa at this level exceeds
625
+ # the incumbent maximum abscissa (from all previous levels)
626
+ j = xr_max > xr0
627
+ # Update record of the incumbent abscissa, function value, and weight
628
+ xr0[j] = xr_max[j]
629
+ fr0[j] = fr_max[j]
630
+ wr0[j] = wr_max[j]
631
+
632
+ # integer index of the minimum abscissa at this level
633
+ xl[invalid_l] = np.inf
634
+ il = np.argmin(xl, axis=0, keepdims=True)
635
+ # abscissa, function value, and weight at this index
636
+ xl_min = np.take_along_axis(xl, il, axis=0)[0]
637
+ fl_min = np.take_along_axis(fl, il, axis=0)[0]
638
+ wl_min = np.take_along_axis(wl, il, axis=0)[0]
639
+ # boolean indices at which minimum abscissa at this level is less than
640
+ # the incumbent minimum abscissa (from all previous levels)
641
+ j = xl_min < xl0
642
+ # Update record of the incumbent abscissa, function value, and weight
643
+ xl0[j] = xl_min[j]
644
+ fl0[j] = fl_min[j]
645
+ wl0[j] = wl_min[j]
646
+ fj = fj.T
647
+
648
+ # Compute the error estimate `d4` - the magnitude of the leftmost or
649
+ # rightmost term, whichever is greater.
650
+ flwl0 = fl0 + np.log(wl0) if work.log else fl0 * wl0 # leftmost term
651
+ frwr0 = fr0 + np.log(wr0) if work.log else fr0 * wr0 # rightmost term
652
+ magnitude = np.real if work.log else np.abs
653
+ work.d4 = np.maximum(magnitude(flwl0), magnitude(frwr0))
654
+
655
+ # There are two approaches to dealing with function values that are
656
+ # numerically infinite due to approaching a singularity - zero them, or
657
+ # replace them with the function value at the nearest non-infinite point.
658
+ # [3] pg. 22 suggests the latter, so let's do that given that we have the
659
+ # information.
660
+ fr0b = np.broadcast_to(fr0[np.newaxis, :], fr.shape)
661
+ fl0b = np.broadcast_to(fl0[np.newaxis, :], fl.shape)
662
+ fr[invalid_r] = fr0b[invalid_r]
663
+ fl[invalid_l] = fl0b[invalid_l]
664
+
665
+ # When wj is zero, log emits a warning
666
+ # with np.errstate(divide='ignore'):
667
+ fjwj = fj + np.log(work.wj) if work.log else fj * work.wj
668
+
669
+ # update integral estimate
670
+ Sn = (special.logsumexp(fjwj + np.log(work.h), axis=-1) if work.log
671
+ else np.sum(fjwj, axis=-1) * work.h)
672
+
673
+ work.xr0, work.fr0, work.wr0 = xr0, fr0, wr0
674
+ work.xl0, work.fl0, work.wl0 = xl0, fl0, wl0
675
+
676
+ return fjwj, Sn
677
+
678
+
679
+ def _estimate_error(work):
680
+ # Estimate the error according to [1] Section 5
681
+
682
+ if work.n == 0 or work.nit == 0:
683
+ # The paper says to use "one" as the error before it can be calculated.
684
+ # NaN seems to be more appropriate.
685
+ nan = np.full_like(work.Sn, np.nan)
686
+ return nan, nan
687
+
688
+ indices = _pair_cache.indices
689
+
690
+ n_active = len(work.Sn) # number of active elements
691
+ axis_kwargs = dict(axis=-1, keepdims=True)
692
+
693
+ # With a jump start (starting at level higher than 0), we haven't
694
+ # explicitly calculated the integral estimate at lower levels. But we have
695
+ # all the function value-weight products, so we can compute the
696
+ # lower-level estimates.
697
+ if work.Sk.shape[-1] == 0:
698
+ h = 2 * work.h # step size at this level
699
+ n_x = indices[work.n] # number of abscissa up to this level
700
+ # The right and left fjwj terms from all levels are concatenated along
701
+ # the last axis. Get out only the terms up to this level.
702
+ fjwj_rl = work.fjwj.reshape(n_active, 2, -1)
703
+ fjwj = fjwj_rl[:, :, :n_x].reshape(n_active, 2*n_x)
704
+ # Compute the Euler-Maclaurin sum at this level
705
+ Snm1 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log
706
+ else np.sum(fjwj, **axis_kwargs) * h)
707
+ work.Sk = np.concatenate((Snm1, work.Sk), axis=-1)
708
+
709
+ if work.n == 1:
710
+ nan = np.full_like(work.Sn, np.nan)
711
+ return nan, nan
712
+
713
+ # The paper says not to calculate the error for n<=2, but it's not clear
714
+ # about whether it starts at level 0 or level 1. We start at level 0, so
715
+ # why not compute the error beginning in level 2?
716
+ if work.Sk.shape[-1] < 2:
717
+ h = 4 * work.h # step size at this level
718
+ n_x = indices[work.n-1] # number of abscissa up to this level
719
+ # The right and left fjwj terms from all levels are concatenated along
720
+ # the last axis. Get out only the terms up to this level.
721
+ fjwj_rl = work.fjwj.reshape(len(work.Sn), 2, -1)
722
+ fjwj = fjwj_rl[..., :n_x].reshape(n_active, 2*n_x)
723
+ # Compute the Euler-Maclaurin sum at this level
724
+ Snm2 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log
725
+ else np.sum(fjwj, **axis_kwargs) * h)
726
+ work.Sk = np.concatenate((Snm2, work.Sk), axis=-1)
727
+
728
+ Snm2 = work.Sk[..., -2]
729
+ Snm1 = work.Sk[..., -1]
730
+
731
+ e1 = work.eps
732
+
733
+ if work.log:
734
+ log_e1 = np.log(e1)
735
+ # Currently, only real integrals are supported in log-scale. All
736
+ # complex values have imaginary part in increments of pi*j, which just
737
+ # carries sign information of the original integral, so use of
738
+ # `np.real` here is equivalent to absolute value in real scale.
739
+ d1 = np.real(special.logsumexp([work.Sn, Snm1 + work.pi*1j], axis=0))
740
+ d2 = np.real(special.logsumexp([work.Sn, Snm2 + work.pi*1j], axis=0))
741
+ d3 = log_e1 + np.max(np.real(work.fjwj), axis=-1)
742
+ d4 = work.d4
743
+ aerr = np.max([d1 ** 2 / d2, 2 * d1, d3, d4], axis=0)
744
+ rerr = np.maximum(log_e1, aerr - np.real(work.Sn))
745
+ else:
746
+ # Note: explicit computation of log10 of each of these is unnecessary.
747
+ d1 = np.abs(work.Sn - Snm1)
748
+ d2 = np.abs(work.Sn - Snm2)
749
+ d3 = e1 * np.max(np.abs(work.fjwj), axis=-1)
750
+ d4 = work.d4
751
+ # If `d1` is 0, no need to warn. This does the right thing.
752
+ # with np.errstate(divide='ignore'):
753
+ aerr = np.max([d1**(np.log(d1)/np.log(d2)), d1**2, d3, d4], axis=0)
754
+ rerr = np.maximum(e1, aerr/np.abs(work.Sn))
755
+ return rerr, aerr.reshape(work.Sn.shape)
756
+
757
+
758
+ def _transform_integrals(a, b):
759
+ # Transform integrals to a form with finite a < b
760
+ # For b < a, we reverse the limits and will multiply the final result by -1
761
+ # For infinite limit on the right, we use the substitution x = 1/t - 1 + a
762
+ # For infinite limit on the left, we substitute x = -x and treat as above
763
+ # For infinite limits, we substitute x = t / (1-t**2)
764
+
765
+ negative = b < a
766
+ a[negative], b[negative] = b[negative], a[negative]
767
+
768
+ abinf = np.isinf(a) & np.isinf(b)
769
+ a[abinf], b[abinf] = -1, 1
770
+
771
+ ainf = np.isinf(a)
772
+ a[ainf], b[ainf] = -b[ainf], -a[ainf]
773
+
774
+ binf = np.isinf(b)
775
+ a0 = a.copy()
776
+ a[binf], b[binf] = 0, 1
777
+
778
+ return a, b, a0, negative, abinf, ainf, binf
779
+
780
+
781
+ def _tanhsinh_iv(f, a, b, log, maxfun, maxlevel, minlevel,
782
+ atol, rtol, args, preserve_shape, callback):
783
+ # Input validation and standardization
784
+
785
+ message = '`f` must be callable.'
786
+ if not callable(f):
787
+ raise ValueError(message)
788
+
789
+ message = 'All elements of `a` and `b` must be real numbers.'
790
+ a, b = np.broadcast_arrays(a, b)
791
+ if np.any(np.iscomplex(a)) or np.any(np.iscomplex(b)):
792
+ raise ValueError(message)
793
+
794
+ message = '`log` must be True or False.'
795
+ if log not in {True, False}:
796
+ raise ValueError(message)
797
+ log = bool(log)
798
+
799
+ if atol is None:
800
+ atol = -np.inf if log else 0
801
+
802
+ rtol_temp = rtol if rtol is not None else 0.
803
+
804
+ params = np.asarray([atol, rtol_temp, 0.])
805
+ message = "`atol` and `rtol` must be real numbers."
806
+ if not np.issubdtype(params.dtype, np.floating):
807
+ raise ValueError(message)
808
+
809
+ if log:
810
+ message = '`atol` and `rtol` may not be positive infinity.'
811
+ if np.any(np.isposinf(params)):
812
+ raise ValueError(message)
813
+ else:
814
+ message = '`atol` and `rtol` must be non-negative and finite.'
815
+ if np.any(params < 0) or np.any(np.isinf(params)):
816
+ raise ValueError(message)
817
+ atol = params[0]
818
+ rtol = rtol if rtol is None else params[1]
819
+
820
+ BIGINT = float(2**62)
821
+ if maxfun is None and maxlevel is None:
822
+ maxlevel = 10
823
+
824
+ maxfun = BIGINT if maxfun is None else maxfun
825
+ maxlevel = BIGINT if maxlevel is None else maxlevel
826
+
827
+ message = '`maxfun`, `maxlevel`, and `minlevel` must be integers.'
828
+ params = np.asarray([maxfun, maxlevel, minlevel])
829
+ if not (np.issubdtype(params.dtype, np.number)
830
+ and np.all(np.isreal(params))
831
+ and np.all(params.astype(np.int64) == params)):
832
+ raise ValueError(message)
833
+ message = '`maxfun`, `maxlevel`, and `minlevel` must be non-negative.'
834
+ if np.any(params < 0):
835
+ raise ValueError(message)
836
+ maxfun, maxlevel, minlevel = params.astype(np.int64)
837
+ minlevel = min(minlevel, maxlevel)
838
+
839
+ if not np.iterable(args):
840
+ args = (args,)
841
+
842
+ message = '`preserve_shape` must be True or False.'
843
+ if preserve_shape not in {True, False}:
844
+ raise ValueError(message)
845
+
846
+ if callback is not None and not callable(callback):
847
+ raise ValueError('`callback` must be callable.')
848
+
849
+ return (f, a, b, log, maxfun, maxlevel, minlevel,
850
+ atol, rtol, args, preserve_shape, callback)
851
+
852
+
853
+ def _logsumexp(x, axis=0):
854
+ # logsumexp raises with empty array
855
+ x = np.asarray(x)
856
+ shape = list(x.shape)
857
+ if shape[axis] == 0:
858
+ shape.pop(axis)
859
+ return np.full(shape, fill_value=-np.inf, dtype=x.dtype)
860
+ else:
861
+ return special.logsumexp(x, axis=axis)
862
+
863
+
864
+ def _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol):
865
+ # Input validation and standardization
866
+
867
+ message = '`f` must be callable.'
868
+ if not callable(f):
869
+ raise ValueError(message)
870
+
871
+ message = 'All elements of `a`, `b`, and `step` must be real numbers.'
872
+ a, b, step = np.broadcast_arrays(a, b, step)
873
+ dtype = np.result_type(a.dtype, b.dtype, step.dtype)
874
+ if not np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.complexfloating):
875
+ raise ValueError(message)
876
+
877
+ valid_a = np.isfinite(a)
878
+ valid_b = b >= a # NaNs will be False
879
+ valid_step = np.isfinite(step) & (step > 0)
880
+ valid_abstep = valid_a & valid_b & valid_step
881
+
882
+ message = '`log` must be True or False.'
883
+ if log not in {True, False}:
884
+ raise ValueError(message)
885
+
886
+ if atol is None:
887
+ atol = -np.inf if log else 0
888
+
889
+ rtol_temp = rtol if rtol is not None else 0.
890
+
891
+ params = np.asarray([atol, rtol_temp, 0.])
892
+ message = "`atol` and `rtol` must be real numbers."
893
+ if not np.issubdtype(params.dtype, np.floating):
894
+ raise ValueError(message)
895
+
896
+ if log:
897
+ message = '`atol`, `rtol` may not be positive infinity or NaN.'
898
+ if np.any(np.isposinf(params) | np.isnan(params)):
899
+ raise ValueError(message)
900
+ else:
901
+ message = '`atol`, and `rtol` must be non-negative and finite.'
902
+ if np.any((params < 0) | (~np.isfinite(params))):
903
+ raise ValueError(message)
904
+ atol = params[0]
905
+ rtol = rtol if rtol is None else params[1]
906
+
907
+ maxterms_int = int(maxterms)
908
+ if maxterms_int != maxterms or maxterms < 0:
909
+ message = "`maxterms` must be a non-negative integer."
910
+ raise ValueError(message)
911
+
912
+ if not np.iterable(args):
913
+ args = (args,)
914
+
915
+ return f, a, b, step, valid_abstep, args, log, maxterms_int, atol, rtol
916
+
917
+
918
+ def _nsum(f, a, b, step=1, args=(), log=False, maxterms=int(2**20), atol=None,
919
+ rtol=None):
920
+ r"""Evaluate a convergent sum.
921
+
922
+ For finite `b`, this evaluates::
923
+
924
+ f(a + np.arange(n)*step).sum()
925
+
926
+ where ``n = int((b - a) / step) + 1``. If `f` is smooth, positive, and
927
+ monotone decreasing, `b` may be infinite, in which case the infinite sum
928
+ is approximated using integration.
929
+
930
+ Parameters
931
+ ----------
932
+ f : callable
933
+ The function that evaluates terms to be summed. The signature must be::
934
+
935
+ f(x: ndarray, *args) -> ndarray
936
+
937
+ where each element of ``x`` is a finite real and ``args`` is a tuple,
938
+ which may contain an arbitrary number of arrays that are broadcastable
939
+ with `x`. `f` must represent a smooth, positive, and monotone decreasing
940
+ function of `x`; `_nsum` performs no checks to verify that these conditions
941
+ are met and may return erroneous results if they are violated.
942
+ a, b : array_like
943
+ Real lower and upper limits of summed terms. Must be broadcastable.
944
+ Each element of `a` must be finite and less than the corresponding
945
+ element in `b`, but elements of `b` may be infinite.
946
+ step : array_like
947
+ Finite, positive, real step between summed terms. Must be broadcastable
948
+ with `a` and `b`.
949
+ args : tuple, optional
950
+ Additional positional arguments to be passed to `f`. Must be arrays
951
+ broadcastable with `a`, `b`, and `step`. If the callable to be summed
952
+ requires arguments that are not broadcastable with `a`, `b`, and `step`,
953
+ wrap that callable with `f`. See Examples.
954
+ log : bool, default: False
955
+ Setting to True indicates that `f` returns the log of the terms
956
+ and that `atol` and `rtol` are expressed as the logs of the absolute
957
+ and relative errors. In this case, the result object will contain the
958
+ log of the sum and error. This is useful for summands for which
959
+ numerical underflow or overflow would lead to inaccuracies.
960
+ maxterms : int, default: 2**32
961
+ The maximum number of terms to evaluate when summing directly.
962
+ Additional function evaluations may be performed for input
963
+ validation and integral evaluation.
964
+ atol, rtol : float, optional
965
+ Absolute termination tolerance (default: 0) and relative termination
966
+ tolerance (default: ``eps**0.5``, where ``eps`` is the precision of
967
+ the result dtype), respectively. Must be non-negative
968
+ and finite if `log` is False, and must be expressed as the log of a
969
+ non-negative and finite number if `log` is True.
970
+
971
+ Returns
972
+ -------
973
+ res : _RichResult
974
+ An instance of `scipy._lib._util._RichResult` with the following
975
+ attributes. (The descriptions are written as though the values will be
976
+ scalars; however, if `func` returns an array, the outputs will be
977
+
978
+ arrays of the same shape.)
979
+ success : bool
980
+ ``True`` when the algorithm terminated successfully (status ``0``).
981
+ status : int
982
+ An integer representing the exit status of the algorithm.
983
+ ``0`` : The algorithm converged to the specified tolerances.
984
+ ``-1`` : Element(s) of `a`, `b`, or `step` are invalid
985
+ ``-2`` : Numerical integration reached its iteration limit; the sum may be divergent.
986
+ ``-3`` : A non-finite value was encountered.
987
+ sum : float
988
+ An estimate of the sum.
989
+ error : float
990
+ An estimate of the absolute error, assuming all terms are non-negative.
991
+ nfev : int
992
+ The number of points at which `func` was evaluated.
993
+
994
+ See Also
995
+ --------
996
+ tanhsinh
997
+
998
+ Notes
999
+ -----
1000
+ The method implemented for infinite summation is related to the integral
1001
+ test for convergence of an infinite series: assuming `step` size 1 for
1002
+ simplicity of exposition, the sum of a monotone decreasing function is bounded by
1003
+
1004
+ .. math::
1005
+
1006
+ \int_u^\infty f(x) dx \leq \sum_{k=u}^\infty f(k) \leq \int_u^\infty f(x) dx + f(u)
1007
+
1008
+ Let :math:`a` represent `a`, :math:`n` represent `maxterms`, :math:`\epsilon_a`
1009
+ represent `atol`, and :math:`\epsilon_r` represent `rtol`.
1010
+ The implementation first evaluates the integral :math:`S_l=\int_a^\infty f(x) dx`
1011
+ as a lower bound of the infinite sum. Then, it seeks a value :math:`c > a` such
1012
+ that :math:`f(c) < \epsilon_a + S_l \epsilon_r`, if it exists; otherwise,
1013
+ let :math:`c = a + n`. Then the infinite sum is approximated as
1014
+
1015
+ .. math::
1016
+
1017
+ \sum_{k=a}^{c-1} f(k) + \int_c^\infty f(x) dx + f(c)/2
1018
+
1019
+ and the reported error is :math:`f(c)/2` plus the error estimate of
1020
+ numerical integration. The approach described above is generalized for non-unit
1021
+ `step` and finite `b` that is too large for direct evaluation of the sum,
1022
+ i.e. ``b - a + 1 > maxterms``.
1023
+
1024
+ References
1025
+ ----------
1026
+ [1] Wikipedia. "Integral test for convergence."
1027
+ https://en.wikipedia.org/wiki/Integral_test_for_convergence
1028
+
1029
+ Examples
1030
+ --------
1031
+ Compute the infinite sum of the reciprocals of squared integers.
1032
+
1033
+ >>> import numpy as np
1034
+ >>> from scipy.integrate._tanhsinh import _nsum
1035
+ >>> res = _nsum(lambda k: 1/k**2, 1, np.inf, maxterms=1e3)
1036
+ >>> ref = np.pi**2/6 # true value
1037
+ >>> res.error # estimated error
1038
+ 4.990014980029223e-07
1039
+ >>> (res.sum - ref)/ref # true error
1040
+ -1.0101760641302586e-10
1041
+ >>> res.nfev # number of points at which callable was evaluated
1042
+ 1142
1043
+
1044
+ Compute the infinite sums of the reciprocals of integers raised to powers ``p``.
1045
+
1046
+ >>> from scipy import special
1047
+ >>> p = np.arange(2, 10)
1048
+ >>> res = _nsum(lambda k, p: 1/k**p, 1, np.inf, maxterms=1e3, args=(p,))
1049
+ >>> ref = special.zeta(p, 1)
1050
+ >>> np.allclose(res.sum, ref)
1051
+ True
1052
+
1053
+ """ # noqa: E501
1054
+ # Potential future work:
1055
+ # - more careful testing of when `b` is slightly less than `a` plus an
1056
+ # integer multiple of step (needed before this is public)
1057
+ # - improve error estimate of `_direct` sum
1058
+ # - add other methods for convergence acceleration (Richardson, epsilon)
1059
+ # - support infinite lower limit?
1060
+ # - support negative monotone increasing functions?
1061
+ # - b < a / negative step?
1062
+ # - complex-valued function?
1063
+ # - check for violations of monotonicity?
1064
+
1065
+ # Function-specific input validation / standardization
1066
+ tmp = _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol)
1067
+ f, a, b, step, valid_abstep, args, log, maxterms, atol, rtol = tmp
1068
+
1069
+ # Additional elementwise algorithm input validation / standardization
1070
+ tmp = eim._initialize(f, (a,), args, complex_ok=False)
1071
+ f, xs, fs, args, shape, dtype, xp = tmp
1072
+
1073
+ # Finish preparing `a`, `b`, and `step` arrays
1074
+ a = xs[0]
1075
+ b = np.broadcast_to(b, shape).ravel().astype(dtype)
1076
+ step = np.broadcast_to(step, shape).ravel().astype(dtype)
1077
+ valid_abstep = np.broadcast_to(valid_abstep, shape).ravel()
1078
+ nterms = np.floor((b - a) / step)
1079
+ b = a + nterms*step
1080
+
1081
+ # Define constants
1082
+ eps = np.finfo(dtype).eps
1083
+ zero = np.asarray(-np.inf if log else 0, dtype=dtype)[()]
1084
+ if rtol is None:
1085
+ rtol = 0.5*np.log(eps) if log else eps**0.5
1086
+ constants = (dtype, log, eps, zero, rtol, atol, maxterms)
1087
+
1088
+ # Prepare result arrays
1089
+ S = np.empty_like(a)
1090
+ E = np.empty_like(a)
1091
+ status = np.zeros(len(a), dtype=int)
1092
+ nfev = np.ones(len(a), dtype=int) # one function evaluation above
1093
+
1094
+ # Branch for direct sum evaluation / integral approximation / invalid input
1095
+ i1 = (nterms + 1 <= maxterms) & valid_abstep
1096
+ i2 = (nterms + 1 > maxterms) & valid_abstep
1097
+ i3 = ~valid_abstep
1098
+
1099
+ if np.any(i1):
1100
+ args_direct = [arg[i1] for arg in args]
1101
+ tmp = _direct(f, a[i1], b[i1], step[i1], args_direct, constants)
1102
+ S[i1], E[i1] = tmp[:-1]
1103
+ nfev[i1] += tmp[-1]
1104
+ status[i1] = -3 * (~np.isfinite(S[i1]))
1105
+
1106
+ if np.any(i2):
1107
+ args_indirect = [arg[i2] for arg in args]
1108
+ tmp = _integral_bound(f, a[i2], b[i2], step[i2], args_indirect, constants)
1109
+ S[i2], E[i2], status[i2] = tmp[:-1]
1110
+ nfev[i2] += tmp[-1]
1111
+
1112
+ if np.any(i3):
1113
+ S[i3], E[i3] = np.nan, np.nan
1114
+ status[i3] = -1
1115
+
1116
+ # Return results
1117
+ S, E = S.reshape(shape)[()], E.reshape(shape)[()]
1118
+ status, nfev = status.reshape(shape)[()], nfev.reshape(shape)[()]
1119
+ return _RichResult(sum=S, error=E, status=status, success=status == 0,
1120
+ nfev=nfev)
1121
+
1122
+
1123
+ def _direct(f, a, b, step, args, constants, inclusive=True):
1124
+ # Directly evaluate the sum.
1125
+
1126
+ # When used in the context of distributions, `args` would contain the
1127
+ # distribution parameters. We have broadcasted for simplicity, but we could
1128
+ # reduce function evaluations when distribution parameters are the same but
1129
+ # sum limits differ. Roughly:
1130
+ # - compute the function at all points between min(a) and max(b),
1131
+ # - compute the cumulative sum,
1132
+ # - take the difference between elements of the cumulative sum
1133
+ # corresponding with b and a.
1134
+ # This is left to future enhancement
1135
+
1136
+ dtype, log, eps, zero, _, _, _ = constants
1137
+
1138
+ # To allow computation in a single vectorized call, find the maximum number
1139
+ # of points (over all slices) at which the function needs to be evaluated.
1140
+ # Note: if `inclusive` is `True`, then we want `1` more term in the sum.
1141
+ # I didn't think it was great style to use `True` as `1` in Python, so I
1142
+ # explicitly converted it to an `int` before using it.
1143
+ inclusive_adjustment = int(inclusive)
1144
+ steps = np.round((b - a) / step) + inclusive_adjustment
1145
+ # Equivalently, steps = np.round((b - a) / step) + inclusive
1146
+ max_steps = int(np.max(steps))
1147
+
1148
+ # In each slice, the function will be evaluated at the same number of points,
1149
+ # but excessive points (those beyond the right sum limit `b`) are replaced
1150
+ # with NaN to (potentially) reduce the time of these unnecessary calculations.
1151
+ # Use a new last axis for these calculations for consistency with other
1152
+ # elementwise algorithms.
1153
+ a2, b2, step2 = a[:, np.newaxis], b[:, np.newaxis], step[:, np.newaxis]
1154
+ args2 = [arg[:, np.newaxis] for arg in args]
1155
+ ks = a2 + np.arange(max_steps, dtype=dtype) * step2
1156
+ i_nan = ks >= (b2 + inclusive_adjustment*step2/2)
1157
+ ks[i_nan] = np.nan
1158
+ fs = f(ks, *args2)
1159
+
1160
+ # The function evaluated at NaN is NaN, and NaNs are zeroed in the sum.
1161
+ # In some cases it may be faster to loop over slices than to vectorize
1162
+ # like this. This is an optimization that can be added later.
1163
+ fs[i_nan] = zero
1164
+ nfev = max_steps - i_nan.sum(axis=-1)
1165
+ S = _logsumexp(fs, axis=-1) if log else np.sum(fs, axis=-1)
1166
+ # Rough, non-conservative error estimate. See gh-19667 for improvement ideas.
1167
+ E = np.real(S) + np.log(eps) if log else eps * abs(S)
1168
+ return S, E, nfev
1169
+
1170
+
1171
+ def _integral_bound(f, a, b, step, args, constants):
1172
+ # Estimate the sum with integral approximation
1173
+ dtype, log, _, _, rtol, atol, maxterms = constants
1174
+ log2 = np.log(2, dtype=dtype)
1175
+
1176
+ # Get a lower bound on the sum and compute effective absolute tolerance
1177
+ lb = _tanhsinh(f, a, b, args=args, atol=atol, rtol=rtol, log=log)
1178
+ tol = np.broadcast_to(atol, lb.integral.shape)
1179
+ tol = _logsumexp((tol, rtol + lb.integral)) if log else tol + rtol*lb.integral
1180
+ i_skip = lb.status < 0 # avoid unnecessary f_evals if integral is divergent
1181
+ tol[i_skip] = np.nan
1182
+ status = lb.status
1183
+
1184
+ # As in `_direct`, we'll need a temporary new axis for points
1185
+ # at which to evaluate the function. Append axis at the end for
1186
+ # consistency with other elementwise algorithms.
1187
+ a2 = a[..., np.newaxis]
1188
+ step2 = step[..., np.newaxis]
1189
+ args2 = [arg[..., np.newaxis] for arg in args]
1190
+
1191
+ # Find the location of a term that is less than the tolerance (if possible)
1192
+ log2maxterms = np.floor(np.log2(maxterms)) if maxterms else 0
1193
+ n_steps = np.concatenate([2**np.arange(0, log2maxterms), [maxterms]], dtype=dtype)
1194
+ nfev = len(n_steps)
1195
+ ks = a2 + n_steps * step2
1196
+ fks = f(ks, *args2)
1197
+ nt = np.minimum(np.sum(fks > tol[:, np.newaxis], axis=-1), n_steps.shape[-1]-1)
1198
+ n_steps = n_steps[nt]
1199
+
1200
+ # Directly evaluate the sum up to this term
1201
+ k = a + n_steps * step
1202
+ left, left_error, left_nfev = _direct(f, a, k, step, args,
1203
+ constants, inclusive=False)
1204
+ i_skip |= np.isposinf(left) # if sum is not finite, no sense in continuing
1205
+ status[np.isposinf(left)] = -3
1206
+ k[i_skip] = np.nan
1207
+
1208
+ # Use integration to estimate the remaining sum
1209
+ # Possible optimization for future work: if there were no terms less than
1210
+ # the tolerance, there is no need to compute the integral to better accuracy.
1211
+ # Something like:
1212
+ # atol = np.maximum(atol, np.minimum(fk/2 - fb/2))
1213
+ # rtol = np.maximum(rtol, np.minimum((fk/2 - fb/2)/left))
1214
+ # where `fk`/`fb` are currently calculated below.
1215
+ right = _tanhsinh(f, k, b, args=args, atol=atol, rtol=rtol, log=log)
1216
+
1217
+ # Calculate the full estimate and error from the pieces
1218
+ fk = fks[np.arange(len(fks)), nt]
1219
+ fb = f(b, *args)
1220
+ nfev += 1
1221
+ if log:
1222
+ log_step = np.log(step)
1223
+ S_terms = (left, right.integral - log_step, fk - log2, fb - log2)
1224
+ S = _logsumexp(S_terms, axis=0)
1225
+ E_terms = (left_error, right.error - log_step, fk-log2, fb-log2+np.pi*1j)
1226
+ E = _logsumexp(E_terms, axis=0).real
1227
+ else:
1228
+ S = left + right.integral/step + fk/2 + fb/2
1229
+ E = left_error + right.error/step + fk/2 - fb/2
1230
+ status[~i_skip] = right.status[~i_skip]
1231
+ return S, E, status, left_nfev + right.nfev + nfev + lb.nfev
parrot/lib/python3.10/site-packages/scipy/integrate/dop.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+
3
+ from scipy._lib.deprecation import _sub_module_deprecation
4
+
5
+ __all__: list[str] = []
6
+
7
+
8
+ def __dir__():
9
+ return __all__
10
+
11
+
12
+ def __getattr__(name):
13
+ return _sub_module_deprecation(sub_package="integrate", module="dop",
14
+ private_modules=["_dop"], all=__all__,
15
+ attribute=name)
parrot/lib/python3.10/site-packages/scipy/integrate/lsoda.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+
3
+ from scipy._lib.deprecation import _sub_module_deprecation
4
+
5
+ __all__ = ['lsoda'] # noqa: F822
6
+
7
+
8
+ def __dir__():
9
+ return __all__
10
+
11
+
12
+ def __getattr__(name):
13
+ return _sub_module_deprecation(sub_package="integrate", module="lsoda",
14
+ private_modules=["_lsoda"], all=__all__,
15
+ attribute=name)
parrot/lib/python3.10/site-packages/scipy/integrate/quadpack.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.integrate` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ "quad",
9
+ "dblquad",
10
+ "tplquad",
11
+ "nquad",
12
+ "IntegrationWarning",
13
+ ]
14
+
15
+
16
+ def __dir__():
17
+ return __all__
18
+
19
+
20
+ def __getattr__(name):
21
+ return _sub_module_deprecation(sub_package="integrate", module="quadpack",
22
+ private_modules=["_quadpack_py"], all=__all__,
23
+ attribute=name)
parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (173 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc ADDED
Binary file (6.98 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc ADDED
Binary file (2.09 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc ADDED
Binary file (27.3 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pytest
2
+
3
+ import numpy as np
4
+ from numpy.testing import assert_allclose
5
+
6
+ from scipy.integrate import quad_vec
7
+
8
+ from multiprocessing.dummy import Pool
9
+
10
+
11
+ quadrature_params = pytest.mark.parametrize(
12
+ 'quadrature', [None, "gk15", "gk21", "trapezoid"])
13
+
14
+
15
+ @quadrature_params
16
+ def test_quad_vec_simple(quadrature):
17
+ n = np.arange(10)
18
+ def f(x):
19
+ return x ** n
20
+ for epsabs in [0.1, 1e-3, 1e-6]:
21
+ if quadrature == 'trapezoid' and epsabs < 1e-4:
22
+ # slow: skip
23
+ continue
24
+
25
+ kwargs = dict(epsabs=epsabs, quadrature=quadrature)
26
+
27
+ exact = 2**(n+1)/(n + 1)
28
+
29
+ res, err = quad_vec(f, 0, 2, norm='max', **kwargs)
30
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
31
+
32
+ res, err = quad_vec(f, 0, 2, norm='2', **kwargs)
33
+ assert np.linalg.norm(res - exact) < epsabs
34
+
35
+ res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs)
36
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
37
+
38
+ res, err, *rest = quad_vec(f, 0, 2, norm='max',
39
+ epsrel=1e-8,
40
+ full_output=True,
41
+ limit=10000,
42
+ **kwargs)
43
+ assert_allclose(res, exact, rtol=0, atol=epsabs)
44
+
45
+
46
+ @quadrature_params
47
+ def test_quad_vec_simple_inf(quadrature):
48
+ def f(x):
49
+ return 1 / (1 + np.float64(x) ** 2)
50
+
51
+ for epsabs in [0.1, 1e-3, 1e-6]:
52
+ if quadrature == 'trapezoid' and epsabs < 1e-4:
53
+ # slow: skip
54
+ continue
55
+
56
+ kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature)
57
+
58
+ res, err = quad_vec(f, 0, np.inf, **kwargs)
59
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
60
+
61
+ res, err = quad_vec(f, 0, -np.inf, **kwargs)
62
+ assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
63
+
64
+ res, err = quad_vec(f, -np.inf, 0, **kwargs)
65
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
66
+
67
+ res, err = quad_vec(f, np.inf, 0, **kwargs)
68
+ assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err))
69
+
70
+ res, err = quad_vec(f, -np.inf, np.inf, **kwargs)
71
+ assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err))
72
+
73
+ res, err = quad_vec(f, np.inf, -np.inf, **kwargs)
74
+ assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err))
75
+
76
+ res, err = quad_vec(f, np.inf, np.inf, **kwargs)
77
+ assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
78
+
79
+ res, err = quad_vec(f, -np.inf, -np.inf, **kwargs)
80
+ assert_allclose(res, 0, rtol=0, atol=max(epsabs, err))
81
+
82
+ res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs)
83
+ assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err))
84
+
85
+ def f(x):
86
+ return np.sin(x + 2) / (1 + x ** 2)
87
+ exact = np.pi / np.e * np.sin(2)
88
+ epsabs = 1e-5
89
+
90
+ res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs,
91
+ quadrature=quadrature, full_output=True)
92
+ assert info.status == 1
93
+ assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err))
94
+
95
+
96
+ def test_quad_vec_args():
97
+ def f(x, a):
98
+ return x * (x + a) * np.arange(3)
99
+ a = 2
100
+ exact = np.array([0, 4/3, 8/3])
101
+
102
+ res, err = quad_vec(f, 0, 1, args=(a,))
103
+ assert_allclose(res, exact, rtol=0, atol=1e-4)
104
+
105
+
106
+ def _lorenzian(x):
107
+ return 1 / (1 + x**2)
108
+
109
+
110
+ @pytest.mark.fail_slow(5)
111
+ def test_quad_vec_pool():
112
+ f = _lorenzian
113
+ res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4)
114
+ assert_allclose(res, np.pi, rtol=0, atol=1e-4)
115
+
116
+ with Pool(10) as pool:
117
+ def f(x):
118
+ return 1 / (1 + x ** 2)
119
+ res, _ = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map)
120
+ assert_allclose(res, np.pi, rtol=0, atol=1e-4)
121
+
122
+
123
+ def _func_with_args(x, a):
124
+ return x * (x + a) * np.arange(3)
125
+
126
+
127
+ @pytest.mark.fail_slow(5)
128
+ @pytest.mark.parametrize('extra_args', [2, (2,)])
129
+ @pytest.mark.parametrize('workers', [1, 10])
130
+ def test_quad_vec_pool_args(extra_args, workers):
131
+ f = _func_with_args
132
+ exact = np.array([0, 4/3, 8/3])
133
+
134
+ res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers)
135
+ assert_allclose(res, exact, rtol=0, atol=1e-4)
136
+
137
+ with Pool(workers) as pool:
138
+ res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map)
139
+ assert_allclose(res, exact, rtol=0, atol=1e-4)
140
+
141
+
142
+ @quadrature_params
143
+ def test_num_eval(quadrature):
144
+ def f(x):
145
+ count[0] += 1
146
+ return x**5
147
+
148
+ count = [0]
149
+ res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature)
150
+ assert res[2].neval == count[0]
151
+
152
+
153
+ def test_info():
154
+ def f(x):
155
+ return np.ones((3, 2, 1))
156
+
157
+ res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True)
158
+
159
+ assert info.success is True
160
+ assert info.status == 0
161
+ assert info.message == 'Target precision reached.'
162
+ assert info.neval > 0
163
+ assert info.intervals.shape[1] == 2
164
+ assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1)
165
+ assert info.errors.shape == (info.intervals.shape[0],)
166
+
167
+
168
+ def test_nan_inf():
169
+ def f_nan(x):
170
+ return np.nan
171
+
172
+ def f_inf(x):
173
+ return np.inf if x < 0.1 else 1/x
174
+
175
+ res, err, info = quad_vec(f_nan, 0, 1, full_output=True)
176
+ assert info.status == 3
177
+
178
+ res, err, info = quad_vec(f_inf, 0, 1, full_output=True)
179
+ assert info.status == 3
180
+
181
+
182
+ @pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0),
183
+ (-np.inf, np.inf), (np.inf, -np.inf)])
184
+ def test_points(a, b):
185
+ # Check that initial interval splitting is done according to
186
+ # `points`, by checking that consecutive sets of 15 point (for
187
+ # gk15) function evaluations lie between `points`
188
+
189
+ points = (0, 0.25, 0.5, 0.75, 1.0)
190
+ points += tuple(-x for x in points)
191
+
192
+ quadrature_points = 15
193
+ interval_sets = []
194
+ count = 0
195
+
196
+ def f(x):
197
+ nonlocal count
198
+
199
+ if count % quadrature_points == 0:
200
+ interval_sets.append(set())
201
+
202
+ count += 1
203
+ interval_sets[-1].add(float(x))
204
+ return 0.0
205
+
206
+ quad_vec(f, a, b, points=points, quadrature='gk15', limit=0)
207
+
208
+ # Check that all point sets lie in a single `points` interval
209
+ for p in interval_sets:
210
+ j = np.searchsorted(sorted(points), tuple(p))
211
+ assert np.all(j == j[0])
212
+
213
+ def test_trapz_deprecation():
214
+ with pytest.deprecated_call(match="`quadrature='trapz'`"):
215
+ quad_vec(lambda x: x, 0, 1, quadrature="trapz")
parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py ADDED
@@ -0,0 +1,218 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import itertools
2
+ import numpy as np
3
+ from numpy.testing import assert_allclose
4
+ from scipy.integrate import ode
5
+
6
+
7
+ def _band_count(a):
8
+ """Returns ml and mu, the lower and upper band sizes of a."""
9
+ nrows, ncols = a.shape
10
+ ml = 0
11
+ for k in range(-nrows+1, 0):
12
+ if np.diag(a, k).any():
13
+ ml = -k
14
+ break
15
+ mu = 0
16
+ for k in range(nrows-1, 0, -1):
17
+ if np.diag(a, k).any():
18
+ mu = k
19
+ break
20
+ return ml, mu
21
+
22
+
23
+ def _linear_func(t, y, a):
24
+ """Linear system dy/dt = a * y"""
25
+ return a.dot(y)
26
+
27
+
28
+ def _linear_jac(t, y, a):
29
+ """Jacobian of a * y is a."""
30
+ return a
31
+
32
+
33
+ def _linear_banded_jac(t, y, a):
34
+ """Banded Jacobian."""
35
+ ml, mu = _band_count(a)
36
+ bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)]
37
+ bjac.append(np.diag(a))
38
+ for k in range(-1, -ml-1, -1):
39
+ bjac.append(np.r_[np.diag(a, k), [0] * (-k)])
40
+ return bjac
41
+
42
+
43
+ def _solve_linear_sys(a, y0, tend=1, dt=0.1,
44
+ solver=None, method='bdf', use_jac=True,
45
+ with_jacobian=False, banded=False):
46
+ """Use scipy.integrate.ode to solve a linear system of ODEs.
47
+
48
+ a : square ndarray
49
+ Matrix of the linear system to be solved.
50
+ y0 : ndarray
51
+ Initial condition
52
+ tend : float
53
+ Stop time.
54
+ dt : float
55
+ Step size of the output.
56
+ solver : str
57
+ If not None, this must be "vode", "lsoda" or "zvode".
58
+ method : str
59
+ Either "bdf" or "adams".
60
+ use_jac : bool
61
+ Determines if the jacobian function is passed to ode().
62
+ with_jacobian : bool
63
+ Passed to ode.set_integrator().
64
+ banded : bool
65
+ Determines whether a banded or full jacobian is used.
66
+ If `banded` is True, `lband` and `uband` are determined by the
67
+ values in `a`.
68
+ """
69
+ if banded:
70
+ lband, uband = _band_count(a)
71
+ else:
72
+ lband = None
73
+ uband = None
74
+
75
+ if use_jac:
76
+ if banded:
77
+ r = ode(_linear_func, _linear_banded_jac)
78
+ else:
79
+ r = ode(_linear_func, _linear_jac)
80
+ else:
81
+ r = ode(_linear_func)
82
+
83
+ if solver is None:
84
+ if np.iscomplexobj(a):
85
+ solver = "zvode"
86
+ else:
87
+ solver = "vode"
88
+
89
+ r.set_integrator(solver,
90
+ with_jacobian=with_jacobian,
91
+ method=method,
92
+ lband=lband, uband=uband,
93
+ rtol=1e-9, atol=1e-10,
94
+ )
95
+ t0 = 0
96
+ r.set_initial_value(y0, t0)
97
+ r.set_f_params(a)
98
+ r.set_jac_params(a)
99
+
100
+ t = [t0]
101
+ y = [y0]
102
+ while r.successful() and r.t < tend:
103
+ r.integrate(r.t + dt)
104
+ t.append(r.t)
105
+ y.append(r.y)
106
+
107
+ t = np.array(t)
108
+ y = np.array(y)
109
+ return t, y
110
+
111
+
112
+ def _analytical_solution(a, y0, t):
113
+ """
114
+ Analytical solution to the linear differential equations dy/dt = a*y.
115
+
116
+ The solution is only valid if `a` is diagonalizable.
117
+
118
+ Returns a 2-D array with shape (len(t), len(y0)).
119
+ """
120
+ lam, v = np.linalg.eig(a)
121
+ c = np.linalg.solve(v, y0)
122
+ e = c * np.exp(lam * t.reshape(-1, 1))
123
+ sol = e.dot(v.T)
124
+ return sol
125
+
126
+
127
+ def test_banded_ode_solvers():
128
+ # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class
129
+ # with a system that has a banded Jacobian matrix.
130
+
131
+ t_exact = np.linspace(0, 1.0, 5)
132
+
133
+ # --- Real arrays for testing the "lsoda" and "vode" solvers ---
134
+
135
+ # lband = 2, uband = 1:
136
+ a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0],
137
+ [0.2, -0.5, 0.9, 0.0, 0.0],
138
+ [0.1, 0.1, -0.4, 0.1, 0.0],
139
+ [0.0, 0.3, -0.1, -0.9, -0.3],
140
+ [0.0, 0.0, 0.1, 0.1, -0.7]])
141
+
142
+ # lband = 0, uband = 1:
143
+ a_real_upper = np.triu(a_real)
144
+
145
+ # lband = 2, uband = 0:
146
+ a_real_lower = np.tril(a_real)
147
+
148
+ # lband = 0, uband = 0:
149
+ a_real_diag = np.triu(a_real_lower)
150
+
151
+ real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag]
152
+ real_solutions = []
153
+
154
+ for a in real_matrices:
155
+ y0 = np.arange(1, a.shape[0] + 1)
156
+ y_exact = _analytical_solution(a, y0, t_exact)
157
+ real_solutions.append((y0, t_exact, y_exact))
158
+
159
+ def check_real(idx, solver, meth, use_jac, with_jac, banded):
160
+ a = real_matrices[idx]
161
+ y0, t_exact, y_exact = real_solutions[idx]
162
+ t, y = _solve_linear_sys(a, y0,
163
+ tend=t_exact[-1],
164
+ dt=t_exact[1] - t_exact[0],
165
+ solver=solver,
166
+ method=meth,
167
+ use_jac=use_jac,
168
+ with_jacobian=with_jac,
169
+ banded=banded)
170
+ assert_allclose(t, t_exact)
171
+ assert_allclose(y, y_exact)
172
+
173
+ for idx in range(len(real_matrices)):
174
+ p = [['vode', 'lsoda'], # solver
175
+ ['bdf', 'adams'], # method
176
+ [False, True], # use_jac
177
+ [False, True], # with_jacobian
178
+ [False, True]] # banded
179
+ for solver, meth, use_jac, with_jac, banded in itertools.product(*p):
180
+ check_real(idx, solver, meth, use_jac, with_jac, banded)
181
+
182
+ # --- Complex arrays for testing the "zvode" solver ---
183
+
184
+ # complex, lband = 2, uband = 1:
185
+ a_complex = a_real - 0.5j * a_real
186
+
187
+ # complex, lband = 0, uband = 0:
188
+ a_complex_diag = np.diag(np.diag(a_complex))
189
+
190
+ complex_matrices = [a_complex, a_complex_diag]
191
+ complex_solutions = []
192
+
193
+ for a in complex_matrices:
194
+ y0 = np.arange(1, a.shape[0] + 1) + 1j
195
+ y_exact = _analytical_solution(a, y0, t_exact)
196
+ complex_solutions.append((y0, t_exact, y_exact))
197
+
198
+ def check_complex(idx, solver, meth, use_jac, with_jac, banded):
199
+ a = complex_matrices[idx]
200
+ y0, t_exact, y_exact = complex_solutions[idx]
201
+ t, y = _solve_linear_sys(a, y0,
202
+ tend=t_exact[-1],
203
+ dt=t_exact[1] - t_exact[0],
204
+ solver=solver,
205
+ method=meth,
206
+ use_jac=use_jac,
207
+ with_jacobian=with_jac,
208
+ banded=banded)
209
+ assert_allclose(t, t_exact)
210
+ assert_allclose(y, y_exact)
211
+
212
+ for idx in range(len(complex_matrices)):
213
+ p = [['bdf', 'adams'], # method
214
+ [False, True], # use_jac
215
+ [False, True], # with_jacobian
216
+ [False, True]] # banded
217
+ for meth, use_jac, with_jac, banded in itertools.product(*p):
218
+ check_complex(idx, "zvode", meth, use_jac, with_jac, banded)
parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from numpy.testing import assert_equal, assert_allclose
3
+ from scipy.integrate import odeint
4
+ import scipy.integrate._test_odeint_banded as banded5x5
5
+
6
+
7
+ def rhs(y, t):
8
+ dydt = np.zeros_like(y)
9
+ banded5x5.banded5x5(t, y, dydt)
10
+ return dydt
11
+
12
+
13
+ def jac(y, t):
14
+ n = len(y)
15
+ jac = np.zeros((n, n), order='F')
16
+ banded5x5.banded5x5_jac(t, y, 1, 1, jac)
17
+ return jac
18
+
19
+
20
+ def bjac(y, t):
21
+ n = len(y)
22
+ bjac = np.zeros((4, n), order='F')
23
+ banded5x5.banded5x5_bjac(t, y, 1, 1, bjac)
24
+ return bjac
25
+
26
+
27
+ JACTYPE_FULL = 1
28
+ JACTYPE_BANDED = 4
29
+
30
+
31
+ def check_odeint(jactype):
32
+ if jactype == JACTYPE_FULL:
33
+ ml = None
34
+ mu = None
35
+ jacobian = jac
36
+ elif jactype == JACTYPE_BANDED:
37
+ ml = 2
38
+ mu = 1
39
+ jacobian = bjac
40
+ else:
41
+ raise ValueError(f"invalid jactype: {jactype!r}")
42
+
43
+ y0 = np.arange(1.0, 6.0)
44
+ # These tolerances must match the tolerances used in banded5x5.f.
45
+ rtol = 1e-11
46
+ atol = 1e-13
47
+ dt = 0.125
48
+ nsteps = 64
49
+ t = dt * np.arange(nsteps+1)
50
+
51
+ sol, info = odeint(rhs, y0, t,
52
+ Dfun=jacobian, ml=ml, mu=mu,
53
+ atol=atol, rtol=rtol, full_output=True)
54
+ yfinal = sol[-1]
55
+ odeint_nst = info['nst'][-1]
56
+ odeint_nfe = info['nfe'][-1]
57
+ odeint_nje = info['nje'][-1]
58
+
59
+ y1 = y0.copy()
60
+ # Pure Fortran solution. y1 is modified in-place.
61
+ nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype)
62
+
63
+ # It is likely that yfinal and y1 are *exactly* the same, but
64
+ # we'll be cautious and use assert_allclose.
65
+ assert_allclose(yfinal, y1, rtol=1e-12)
66
+ assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje))
67
+
68
+
69
+ def test_odeint_full_jac():
70
+ check_odeint(JACTYPE_FULL)
71
+
72
+
73
+ def test_odeint_banded_jac():
74
+ check_odeint(JACTYPE_BANDED)
parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py ADDED
@@ -0,0 +1,947 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # mypy: disable-error-code="attr-defined"
2
+ import os
3
+ import pytest
4
+
5
+ import numpy as np
6
+ from numpy.testing import assert_allclose, assert_equal
7
+
8
+ import scipy._lib._elementwise_iterative_method as eim
9
+ from scipy import special, stats
10
+ from scipy.integrate import quad_vec
11
+ from scipy.integrate._tanhsinh import _tanhsinh, _pair_cache, _nsum
12
+ from scipy.stats._discrete_distns import _gen_harmonic_gt1
13
+
14
+ class TestTanhSinh:
15
+
16
+ # Test problems from [1] Section 6
17
+ def f1(self, t):
18
+ return t * np.log(1 + t)
19
+
20
+ f1.ref = 0.25
21
+ f1.b = 1
22
+
23
+ def f2(self, t):
24
+ return t ** 2 * np.arctan(t)
25
+
26
+ f2.ref = (np.pi - 2 + 2 * np.log(2)) / 12
27
+ f2.b = 1
28
+
29
+ def f3(self, t):
30
+ return np.exp(t) * np.cos(t)
31
+
32
+ f3.ref = (np.exp(np.pi / 2) - 1) / 2
33
+ f3.b = np.pi / 2
34
+
35
+ def f4(self, t):
36
+ a = np.sqrt(2 + t ** 2)
37
+ return np.arctan(a) / ((1 + t ** 2) * a)
38
+
39
+ f4.ref = 5 * np.pi ** 2 / 96
40
+ f4.b = 1
41
+
42
+ def f5(self, t):
43
+ return np.sqrt(t) * np.log(t)
44
+
45
+ f5.ref = -4 / 9
46
+ f5.b = 1
47
+
48
+ def f6(self, t):
49
+ return np.sqrt(1 - t ** 2)
50
+
51
+ f6.ref = np.pi / 4
52
+ f6.b = 1
53
+
54
+ def f7(self, t):
55
+ return np.sqrt(t) / np.sqrt(1 - t ** 2)
56
+
57
+ f7.ref = 2 * np.sqrt(np.pi) * special.gamma(3 / 4) / special.gamma(1 / 4)
58
+ f7.b = 1
59
+
60
+ def f8(self, t):
61
+ return np.log(t) ** 2
62
+
63
+ f8.ref = 2
64
+ f8.b = 1
65
+
66
+ def f9(self, t):
67
+ return np.log(np.cos(t))
68
+
69
+ f9.ref = -np.pi * np.log(2) / 2
70
+ f9.b = np.pi / 2
71
+
72
+ def f10(self, t):
73
+ return np.sqrt(np.tan(t))
74
+
75
+ f10.ref = np.pi * np.sqrt(2) / 2
76
+ f10.b = np.pi / 2
77
+
78
+ def f11(self, t):
79
+ return 1 / (1 + t ** 2)
80
+
81
+ f11.ref = np.pi / 2
82
+ f11.b = np.inf
83
+
84
+ def f12(self, t):
85
+ return np.exp(-t) / np.sqrt(t)
86
+
87
+ f12.ref = np.sqrt(np.pi)
88
+ f12.b = np.inf
89
+
90
+ def f13(self, t):
91
+ return np.exp(-t ** 2 / 2)
92
+
93
+ f13.ref = np.sqrt(np.pi / 2)
94
+ f13.b = np.inf
95
+
96
+ def f14(self, t):
97
+ return np.exp(-t) * np.cos(t)
98
+
99
+ f14.ref = 0.5
100
+ f14.b = np.inf
101
+
102
+ def f15(self, t):
103
+ return np.sin(t) / t
104
+
105
+ f15.ref = np.pi / 2
106
+ f15.b = np.inf
107
+
108
+ def error(self, res, ref, log=False):
109
+ err = abs(res - ref)
110
+
111
+ if not log:
112
+ return err
113
+
114
+ with np.errstate(divide='ignore'):
115
+ return np.log10(err)
116
+
117
+ def test_input_validation(self):
118
+ f = self.f1
119
+
120
+ message = '`f` must be callable.'
121
+ with pytest.raises(ValueError, match=message):
122
+ _tanhsinh(42, 0, f.b)
123
+
124
+ message = '...must be True or False.'
125
+ with pytest.raises(ValueError, match=message):
126
+ _tanhsinh(f, 0, f.b, log=2)
127
+
128
+ message = '...must be real numbers.'
129
+ with pytest.raises(ValueError, match=message):
130
+ _tanhsinh(f, 1+1j, f.b)
131
+ with pytest.raises(ValueError, match=message):
132
+ _tanhsinh(f, 0, f.b, atol='ekki')
133
+ with pytest.raises(ValueError, match=message):
134
+ _tanhsinh(f, 0, f.b, rtol=pytest)
135
+
136
+ message = '...must be non-negative and finite.'
137
+ with pytest.raises(ValueError, match=message):
138
+ _tanhsinh(f, 0, f.b, rtol=-1)
139
+ with pytest.raises(ValueError, match=message):
140
+ _tanhsinh(f, 0, f.b, atol=np.inf)
141
+
142
+ message = '...may not be positive infinity.'
143
+ with pytest.raises(ValueError, match=message):
144
+ _tanhsinh(f, 0, f.b, rtol=np.inf, log=True)
145
+ with pytest.raises(ValueError, match=message):
146
+ _tanhsinh(f, 0, f.b, atol=np.inf, log=True)
147
+
148
+ message = '...must be integers.'
149
+ with pytest.raises(ValueError, match=message):
150
+ _tanhsinh(f, 0, f.b, maxlevel=object())
151
+ with pytest.raises(ValueError, match=message):
152
+ _tanhsinh(f, 0, f.b, maxfun=1+1j)
153
+ with pytest.raises(ValueError, match=message):
154
+ _tanhsinh(f, 0, f.b, minlevel="migratory coconut")
155
+
156
+ message = '...must be non-negative.'
157
+ with pytest.raises(ValueError, match=message):
158
+ _tanhsinh(f, 0, f.b, maxlevel=-1)
159
+ with pytest.raises(ValueError, match=message):
160
+ _tanhsinh(f, 0, f.b, maxfun=-1)
161
+ with pytest.raises(ValueError, match=message):
162
+ _tanhsinh(f, 0, f.b, minlevel=-1)
163
+
164
+ message = '...must be True or False.'
165
+ with pytest.raises(ValueError, match=message):
166
+ _tanhsinh(f, 0, f.b, preserve_shape=2)
167
+
168
+ message = '...must be callable.'
169
+ with pytest.raises(ValueError, match=message):
170
+ _tanhsinh(f, 0, f.b, callback='elderberry')
171
+
172
+ @pytest.mark.parametrize("limits, ref", [
173
+ [(0, np.inf), 0.5], # b infinite
174
+ [(-np.inf, 0), 0.5], # a infinite
175
+ [(-np.inf, np.inf), 1], # a and b infinite
176
+ [(np.inf, -np.inf), -1], # flipped limits
177
+ [(1, -1), stats.norm.cdf(-1) - stats.norm.cdf(1)], # flipped limits
178
+ ])
179
+ def test_integral_transforms(self, limits, ref):
180
+ # Check that the integral transforms are behaving for both normal and
181
+ # log integration
182
+ dist = stats.norm()
183
+
184
+ res = _tanhsinh(dist.pdf, *limits)
185
+ assert_allclose(res.integral, ref)
186
+
187
+ logres = _tanhsinh(dist.logpdf, *limits, log=True)
188
+ assert_allclose(np.exp(logres.integral), ref)
189
+ # Transformation should not make the result complex unnecessarily
190
+ assert (np.issubdtype(logres.integral.dtype, np.floating) if ref > 0
191
+ else np.issubdtype(logres.integral.dtype, np.complexfloating))
192
+
193
+ assert_allclose(np.exp(logres.error), res.error, atol=1e-16)
194
+
195
+ # 15 skipped intentionally; it's very difficult numerically
196
+ @pytest.mark.parametrize('f_number', range(1, 15))
197
+ def test_basic(self, f_number):
198
+ f = getattr(self, f"f{f_number}")
199
+ rtol = 2e-8
200
+ res = _tanhsinh(f, 0, f.b, rtol=rtol)
201
+ assert_allclose(res.integral, f.ref, rtol=rtol)
202
+ if f_number not in {14}: # mildly underestimates error here
203
+ true_error = abs(self.error(res.integral, f.ref)/res.integral)
204
+ assert true_error < res.error
205
+
206
+ if f_number in {7, 10, 12}: # succeeds, but doesn't know it
207
+ return
208
+
209
+ assert res.success
210
+ assert res.status == 0
211
+
212
+ @pytest.mark.parametrize('ref', (0.5, [0.4, 0.6]))
213
+ @pytest.mark.parametrize('case', stats._distr_params.distcont)
214
+ def test_accuracy(self, ref, case):
215
+ distname, params = case
216
+ if distname in {'dgamma', 'dweibull', 'laplace', 'kstwo'}:
217
+ # should split up interval at first-derivative discontinuity
218
+ pytest.skip('tanh-sinh is not great for non-smooth integrands')
219
+ if (distname in {'studentized_range', 'levy_stable'}
220
+ and not int(os.getenv('SCIPY_XSLOW', 0))):
221
+ pytest.skip('This case passes, but it is too slow.')
222
+ dist = getattr(stats, distname)(*params)
223
+ x = dist.interval(ref)
224
+ res = _tanhsinh(dist.pdf, *x)
225
+ assert_allclose(res.integral, ref)
226
+
227
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
228
+ def test_vectorization(self, shape):
229
+ # Test for correct functionality, output shapes, and dtypes for various
230
+ # input shapes.
231
+ rng = np.random.default_rng(82456839535679456794)
232
+ a = rng.random(shape)
233
+ b = rng.random(shape)
234
+ p = rng.random(shape)
235
+ n = np.prod(shape)
236
+
237
+ def f(x, p):
238
+ f.ncall += 1
239
+ f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1]
240
+ return x**p
241
+ f.ncall = 0
242
+ f.feval = 0
243
+
244
+ @np.vectorize
245
+ def _tanhsinh_single(a, b, p):
246
+ return _tanhsinh(lambda x: x**p, a, b)
247
+
248
+ res = _tanhsinh(f, a, b, args=(p,))
249
+ refs = _tanhsinh_single(a, b, p).ravel()
250
+
251
+ attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel']
252
+ for attr in attrs:
253
+ ref_attr = [getattr(ref, attr) for ref in refs]
254
+ res_attr = getattr(res, attr)
255
+ assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15)
256
+ assert_equal(res_attr.shape, shape)
257
+
258
+ assert np.issubdtype(res.success.dtype, np.bool_)
259
+ assert np.issubdtype(res.status.dtype, np.integer)
260
+ assert np.issubdtype(res.nfev.dtype, np.integer)
261
+ assert np.issubdtype(res.maxlevel.dtype, np.integer)
262
+ assert_equal(np.max(res.nfev), f.feval)
263
+ # maxlevel = 2 -> 3 function calls (2 initialization, 1 work)
264
+ assert np.max(res.maxlevel) >= 2
265
+ assert_equal(np.max(res.maxlevel), f.ncall)
266
+
267
+ def test_flags(self):
268
+ # Test cases that should produce different status flags; show that all
269
+ # can be produced simultaneously.
270
+ def f(xs, js):
271
+ f.nit += 1
272
+ funcs = [lambda x: np.exp(-x**2), # converges
273
+ lambda x: np.exp(x), # reaches maxiter due to order=2
274
+ lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN
275
+ res = [funcs[j](x) for x, j in zip(xs, js.ravel())]
276
+ return res
277
+ f.nit = 0
278
+
279
+ args = (np.arange(3, dtype=np.int64),)
280
+ res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, args=args)
281
+ ref_flags = np.array([0, -2, -3])
282
+ assert_equal(res.status, ref_flags)
283
+
284
+ def test_flags_preserve_shape(self):
285
+ # Same test as above but using `preserve_shape` option to simplify.
286
+ def f(x):
287
+ return [np.exp(-x[0]**2), # converges
288
+ np.exp(x[1]), # reaches maxiter due to order=2
289
+ np.full_like(x[2], np.nan)[()]] # stops due to NaN
290
+
291
+ res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, preserve_shape=True)
292
+ ref_flags = np.array([0, -2, -3])
293
+ assert_equal(res.status, ref_flags)
294
+
295
+ def test_preserve_shape(self):
296
+ # Test `preserve_shape` option
297
+ def f(x):
298
+ return np.asarray([[x, np.sin(10 * x)],
299
+ [np.cos(30 * x), x * np.sin(100 * x)]])
300
+
301
+ ref = quad_vec(f, 0, 1)
302
+ res = _tanhsinh(f, 0, 1, preserve_shape=True)
303
+ assert_allclose(res.integral, ref[0])
304
+
305
+ def test_convergence(self):
306
+ # demonstrate that number of accurate digits doubles each iteration
307
+ f = self.f1
308
+ last_logerr = 0
309
+ for i in range(4):
310
+ res = _tanhsinh(f, 0, f.b, minlevel=0, maxlevel=i)
311
+ logerr = self.error(res.integral, f.ref, log=True)
312
+ assert (logerr < last_logerr * 2 or logerr < -15.5)
313
+ last_logerr = logerr
314
+
315
+ def test_options_and_result_attributes(self):
316
+ # demonstrate that options are behaving as advertised and status
317
+ # messages are as intended
318
+ def f(x):
319
+ f.calls += 1
320
+ f.feval += np.size(x)
321
+ return self.f2(x)
322
+ f.ref = self.f2.ref
323
+ f.b = self.f2.b
324
+ default_rtol = 1e-12
325
+ default_atol = f.ref * default_rtol # effective default absolute tol
326
+
327
+ # Test default options
328
+ f.feval, f.calls = 0, 0
329
+ ref = _tanhsinh(f, 0, f.b)
330
+ assert self.error(ref.integral, f.ref) < ref.error < default_atol
331
+ assert ref.nfev == f.feval
332
+ ref.calls = f.calls # reference number of function calls
333
+ assert ref.success
334
+ assert ref.status == 0
335
+
336
+ # Test `maxlevel` equal to required max level
337
+ # We should get all the same results
338
+ f.feval, f.calls = 0, 0
339
+ maxlevel = ref.maxlevel
340
+ res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel)
341
+ res.calls = f.calls
342
+ assert res == ref
343
+
344
+ # Now reduce the maximum level. We won't meet tolerances.
345
+ f.feval, f.calls = 0, 0
346
+ maxlevel -= 1
347
+ assert maxlevel >= 2 # can't compare errors otherwise
348
+ res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel)
349
+ assert self.error(res.integral, f.ref) < res.error > default_atol
350
+ assert res.nfev == f.feval < ref.nfev
351
+ assert f.calls == ref.calls - 1
352
+ assert not res.success
353
+ assert res.status == eim._ECONVERR
354
+
355
+ # `maxfun` is currently not enforced
356
+
357
+ # # Test `maxfun` equal to required number of function evaluations
358
+ # # We should get all the same results
359
+ # f.feval, f.calls = 0, 0
360
+ # maxfun = ref.nfev
361
+ # res = _tanhsinh(f, 0, f.b, maxfun = maxfun)
362
+ # assert res == ref
363
+ #
364
+ # # Now reduce `maxfun`. We won't meet tolerances.
365
+ # f.feval, f.calls = 0, 0
366
+ # maxfun -= 1
367
+ # res = _tanhsinh(f, 0, f.b, maxfun=maxfun)
368
+ # assert self.error(res.integral, f.ref) < res.error > default_atol
369
+ # assert res.nfev == f.feval < ref.nfev
370
+ # assert f.calls == ref.calls - 1
371
+ # assert not res.success
372
+ # assert res.status == 2
373
+
374
+ # Take this result to be the new reference
375
+ ref = res
376
+ ref.calls = f.calls
377
+
378
+ # Test `atol`
379
+ f.feval, f.calls = 0, 0
380
+ # With this tolerance, we should get the exact same result as ref
381
+ atol = np.nextafter(ref.error, np.inf)
382
+ res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol)
383
+ assert res.integral == ref.integral
384
+ assert res.error == ref.error
385
+ assert res.nfev == f.feval == ref.nfev
386
+ assert f.calls == ref.calls
387
+ # Except the result is considered to be successful
388
+ assert res.success
389
+ assert res.status == 0
390
+
391
+ f.feval, f.calls = 0, 0
392
+ # With a tighter tolerance, we should get a more accurate result
393
+ atol = np.nextafter(ref.error, -np.inf)
394
+ res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol)
395
+ assert self.error(res.integral, f.ref) < res.error < atol
396
+ assert res.nfev == f.feval > ref.nfev
397
+ assert f.calls > ref.calls
398
+ assert res.success
399
+ assert res.status == 0
400
+
401
+ # Test `rtol`
402
+ f.feval, f.calls = 0, 0
403
+ # With this tolerance, we should get the exact same result as ref
404
+ rtol = np.nextafter(ref.error/ref.integral, np.inf)
405
+ res = _tanhsinh(f, 0, f.b, rtol=rtol)
406
+ assert res.integral == ref.integral
407
+ assert res.error == ref.error
408
+ assert res.nfev == f.feval == ref.nfev
409
+ assert f.calls == ref.calls
410
+ # Except the result is considered to be successful
411
+ assert res.success
412
+ assert res.status == 0
413
+
414
+ f.feval, f.calls = 0, 0
415
+ # With a tighter tolerance, we should get a more accurate result
416
+ rtol = np.nextafter(ref.error/ref.integral, -np.inf)
417
+ res = _tanhsinh(f, 0, f.b, rtol=rtol)
418
+ assert self.error(res.integral, f.ref)/f.ref < res.error/res.integral < rtol
419
+ assert res.nfev == f.feval > ref.nfev
420
+ assert f.calls > ref.calls
421
+ assert res.success
422
+ assert res.status == 0
423
+
424
+ @pytest.mark.parametrize('rtol', [1e-4, 1e-14])
425
+ def test_log(self, rtol):
426
+ # Test equivalence of log-integration and regular integration
427
+ dist = stats.norm()
428
+
429
+ test_tols = dict(atol=1e-18, rtol=1e-15)
430
+
431
+ # Positive integrand (real log-integrand)
432
+ res = _tanhsinh(dist.logpdf, -1, 2, log=True, rtol=np.log(rtol))
433
+ ref = _tanhsinh(dist.pdf, -1, 2, rtol=rtol)
434
+ assert_allclose(np.exp(res.integral), ref.integral, **test_tols)
435
+ assert_allclose(np.exp(res.error), ref.error, **test_tols)
436
+ assert res.nfev == ref.nfev
437
+
438
+ # Real integrand (complex log-integrand)
439
+ def f(x):
440
+ return -dist.logpdf(x)*dist.pdf(x)
441
+
442
+ def logf(x):
443
+ return np.log(dist.logpdf(x) + 0j) + dist.logpdf(x) + np.pi * 1j
444
+
445
+ res = _tanhsinh(logf, -np.inf, np.inf, log=True)
446
+ ref = _tanhsinh(f, -np.inf, np.inf)
447
+ # In gh-19173, we saw `invalid` warnings on one CI platform.
448
+ # Silencing `all` because I can't reproduce locally and don't want
449
+ # to risk the need to run CI again.
450
+ with np.errstate(all='ignore'):
451
+ assert_allclose(np.exp(res.integral), ref.integral, **test_tols)
452
+ assert_allclose(np.exp(res.error), ref.error, **test_tols)
453
+ assert res.nfev == ref.nfev
454
+
455
+ def test_complex(self):
456
+ # Test integration of complex integrand
457
+ # Finite limits
458
+ def f(x):
459
+ return np.exp(1j * x)
460
+
461
+ res = _tanhsinh(f, 0, np.pi/4)
462
+ ref = np.sqrt(2)/2 + (1-np.sqrt(2)/2)*1j
463
+ assert_allclose(res.integral, ref)
464
+
465
+ # Infinite limits
466
+ dist1 = stats.norm(scale=1)
467
+ dist2 = stats.norm(scale=2)
468
+ def f(x):
469
+ return dist1.pdf(x) + 1j*dist2.pdf(x)
470
+
471
+ res = _tanhsinh(f, np.inf, -np.inf)
472
+ assert_allclose(res.integral, -(1+1j))
473
+
474
+ @pytest.mark.parametrize("maxlevel", range(4))
475
+ def test_minlevel(self, maxlevel):
476
+ # Verify that minlevel does not change the values at which the
477
+ # integrand is evaluated or the integral/error estimates, only the
478
+ # number of function calls
479
+ def f(x):
480
+ f.calls += 1
481
+ f.feval += np.size(x)
482
+ f.x = np.concatenate((f.x, x.ravel()))
483
+ return self.f2(x)
484
+ f.feval, f.calls, f.x = 0, 0, np.array([])
485
+
486
+ ref = _tanhsinh(f, 0, self.f2.b, minlevel=0, maxlevel=maxlevel)
487
+ ref_x = np.sort(f.x)
488
+
489
+ for minlevel in range(0, maxlevel + 1):
490
+ f.feval, f.calls, f.x = 0, 0, np.array([])
491
+ options = dict(minlevel=minlevel, maxlevel=maxlevel)
492
+ res = _tanhsinh(f, 0, self.f2.b, **options)
493
+ # Should be very close; all that has changed is the order of values
494
+ assert_allclose(res.integral, ref.integral, rtol=4e-16)
495
+ # Difference in absolute errors << magnitude of integral
496
+ assert_allclose(res.error, ref.error, atol=4e-16 * ref.integral)
497
+ assert res.nfev == f.feval == len(f.x)
498
+ assert f.calls == maxlevel - minlevel + 1 + 1 # 1 validation call
499
+ assert res.status == ref.status
500
+ assert_equal(ref_x, np.sort(f.x))
501
+
502
+ def test_improper_integrals(self):
503
+ # Test handling of infinite limits of integration (mixed with finite limits)
504
+ def f(x):
505
+ x[np.isinf(x)] = np.nan
506
+ return np.exp(-x**2)
507
+ a = [-np.inf, 0, -np.inf, np.inf, -20, -np.inf, -20]
508
+ b = [np.inf, np.inf, 0, -np.inf, 20, 20, np.inf]
509
+ ref = np.sqrt(np.pi)
510
+ res = _tanhsinh(f, a, b)
511
+ assert_allclose(res.integral, [ref, ref/2, ref/2, -ref, ref, ref, ref])
512
+
513
+ @pytest.mark.parametrize("limits", ((0, 3), ([-np.inf, 0], [3, 3])))
514
+ @pytest.mark.parametrize("dtype", (np.float32, np.float64))
515
+ def test_dtype(self, limits, dtype):
516
+ # Test that dtypes are preserved
517
+ a, b = np.asarray(limits, dtype=dtype)[()]
518
+
519
+ def f(x):
520
+ assert x.dtype == dtype
521
+ return np.exp(x)
522
+
523
+ rtol = 1e-12 if dtype == np.float64 else 1e-5
524
+ res = _tanhsinh(f, a, b, rtol=rtol)
525
+ assert res.integral.dtype == dtype
526
+ assert res.error.dtype == dtype
527
+ assert np.all(res.success)
528
+ assert_allclose(res.integral, np.exp(b)-np.exp(a), rtol=rtol)
529
+
530
+ def test_maxiter_callback(self):
531
+ # Test behavior of `maxiter` parameter and `callback` interface
532
+ a, b = -np.inf, np.inf
533
+ def f(x):
534
+ return np.exp(-x*x)
535
+
536
+ minlevel, maxlevel = 0, 2
537
+ maxiter = maxlevel - minlevel + 1
538
+ kwargs = dict(minlevel=minlevel, maxlevel=maxlevel, rtol=1e-15)
539
+ res = _tanhsinh(f, a, b, **kwargs)
540
+ assert not res.success
541
+ assert res.maxlevel == maxlevel
542
+
543
+ def callback(res):
544
+ callback.iter += 1
545
+ callback.res = res
546
+ assert hasattr(res, 'integral')
547
+ assert res.status == 1
548
+ if callback.iter == maxiter:
549
+ raise StopIteration
550
+ callback.iter = -1 # callback called once before first iteration
551
+ callback.res = None
552
+
553
+ del kwargs['maxlevel']
554
+ res2 = _tanhsinh(f, a, b, **kwargs, callback=callback)
555
+ # terminating with callback is identical to terminating due to maxiter
556
+ # (except for `status`)
557
+ for key in res.keys():
558
+ if key == 'status':
559
+ assert callback.res[key] == 1
560
+ assert res[key] == -2
561
+ assert res2[key] == -4
562
+ else:
563
+ assert res2[key] == callback.res[key] == res[key]
564
+
565
+ def test_jumpstart(self):
566
+ # The intermediate results at each level i should be the same as the
567
+ # final results when jumpstarting at level i; i.e. minlevel=maxlevel=i
568
+ a, b = -np.inf, np.inf
569
+ def f(x):
570
+ return np.exp(-x*x)
571
+
572
+ def callback(res):
573
+ callback.integrals.append(res.integral)
574
+ callback.errors.append(res.error)
575
+ callback.integrals = []
576
+ callback.errors = []
577
+
578
+ maxlevel = 4
579
+ _tanhsinh(f, a, b, minlevel=0, maxlevel=maxlevel, callback=callback)
580
+
581
+ integrals = []
582
+ errors = []
583
+ for i in range(maxlevel + 1):
584
+ res = _tanhsinh(f, a, b, minlevel=i, maxlevel=i)
585
+ integrals.append(res.integral)
586
+ errors.append(res.error)
587
+
588
+ assert_allclose(callback.integrals[1:], integrals, rtol=1e-15)
589
+ assert_allclose(callback.errors[1:], errors, rtol=1e-15, atol=1e-16)
590
+
591
+ def test_special_cases(self):
592
+ # Test edge cases and other special cases
593
+
594
+ # Test that integers are not passed to `f`
595
+ # (otherwise this would overflow)
596
+ def f(x):
597
+ assert np.issubdtype(x.dtype, np.floating)
598
+ return x ** 99
599
+
600
+ res = _tanhsinh(f, 0, 1)
601
+ assert res.success
602
+ assert_allclose(res.integral, 1/100)
603
+
604
+ # Test levels 0 and 1; error is NaN
605
+ res = _tanhsinh(f, 0, 1, maxlevel=0)
606
+ assert res.integral > 0
607
+ assert_equal(res.error, np.nan)
608
+ res = _tanhsinh(f, 0, 1, maxlevel=1)
609
+ assert res.integral > 0
610
+ assert_equal(res.error, np.nan)
611
+
612
+ # Tes equal left and right integration limits
613
+ res = _tanhsinh(f, 1, 1)
614
+ assert res.success
615
+ assert res.maxlevel == -1
616
+ assert_allclose(res.integral, 0)
617
+
618
+ # Test scalar `args` (not in tuple)
619
+ def f(x, c):
620
+ return x**c
621
+
622
+ res = _tanhsinh(f, 0, 1, args=99)
623
+ assert_allclose(res.integral, 1/100)
624
+
625
+ # Test NaNs
626
+ a = [np.nan, 0, 0, 0]
627
+ b = [1, np.nan, 1, 1]
628
+ c = [1, 1, np.nan, 1]
629
+ res = _tanhsinh(f, a, b, args=(c,))
630
+ assert_allclose(res.integral, [np.nan, np.nan, np.nan, 0.5])
631
+ assert_allclose(res.error[:3], np.nan)
632
+ assert_equal(res.status, [-3, -3, -3, 0])
633
+ assert_equal(res.success, [False, False, False, True])
634
+ assert_equal(res.nfev[:3], 1)
635
+
636
+ # Test complex integral followed by real integral
637
+ # Previously, h0 was of the result dtype. If the `dtype` were complex,
638
+ # this could lead to complex cached abscissae/weights. If these get
639
+ # cast to real dtype for a subsequent real integral, we would get a
640
+ # ComplexWarning. Check that this is avoided.
641
+ _pair_cache.xjc = np.empty(0)
642
+ _pair_cache.wj = np.empty(0)
643
+ _pair_cache.indices = [0]
644
+ _pair_cache.h0 = None
645
+ res = _tanhsinh(lambda x: x*1j, 0, 1)
646
+ assert_allclose(res.integral, 0.5*1j)
647
+ res = _tanhsinh(lambda x: x, 0, 1)
648
+ assert_allclose(res.integral, 0.5)
649
+
650
+ # Test zero-size
651
+ shape = (0, 3)
652
+ res = _tanhsinh(lambda x: x, 0, np.zeros(shape))
653
+ attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel']
654
+ for attr in attrs:
655
+ assert_equal(res[attr].shape, shape)
656
+
657
+
658
+ class TestNSum:
659
+ rng = np.random.default_rng(5895448232066142650)
660
+ p = rng.uniform(1, 10, size=10)
661
+
662
+ def f1(self, k):
663
+ # Integers are never passed to `f1`; if they were, we'd get
664
+ # integer to negative integer power error
665
+ return k**(-2)
666
+
667
+ f1.ref = np.pi**2/6
668
+ f1.a = 1
669
+ f1.b = np.inf
670
+ f1.args = tuple()
671
+
672
+ def f2(self, k, p):
673
+ return 1 / k**p
674
+
675
+ f2.ref = special.zeta(p, 1)
676
+ f2.a = 1
677
+ f2.b = np.inf
678
+ f2.args = (p,)
679
+
680
+ def f3(self, k, p):
681
+ return 1 / k**p
682
+
683
+ f3.a = 1
684
+ f3.b = rng.integers(5, 15, size=(3, 1))
685
+ f3.ref = _gen_harmonic_gt1(f3.b, p)
686
+ f3.args = (p,)
687
+
688
+ def test_input_validation(self):
689
+ f = self.f1
690
+
691
+ message = '`f` must be callable.'
692
+ with pytest.raises(ValueError, match=message):
693
+ _nsum(42, f.a, f.b)
694
+
695
+ message = '...must be True or False.'
696
+ with pytest.raises(ValueError, match=message):
697
+ _nsum(f, f.a, f.b, log=2)
698
+
699
+ message = '...must be real numbers.'
700
+ with pytest.raises(ValueError, match=message):
701
+ _nsum(f, 1+1j, f.b)
702
+ with pytest.raises(ValueError, match=message):
703
+ _nsum(f, f.a, None)
704
+ with pytest.raises(ValueError, match=message):
705
+ _nsum(f, f.a, f.b, step=object())
706
+ with pytest.raises(ValueError, match=message):
707
+ _nsum(f, f.a, f.b, atol='ekki')
708
+ with pytest.raises(ValueError, match=message):
709
+ _nsum(f, f.a, f.b, rtol=pytest)
710
+
711
+ with np.errstate(all='ignore'):
712
+ res = _nsum(f, [np.nan, -np.inf, np.inf], 1)
713
+ assert np.all((res.status == -1) & np.isnan(res.sum)
714
+ & np.isnan(res.error) & ~res.success & res.nfev == 1)
715
+ res = _nsum(f, 10, [np.nan, 1])
716
+ assert np.all((res.status == -1) & np.isnan(res.sum)
717
+ & np.isnan(res.error) & ~res.success & res.nfev == 1)
718
+ res = _nsum(f, 1, 10, step=[np.nan, -np.inf, np.inf, -1, 0])
719
+ assert np.all((res.status == -1) & np.isnan(res.sum)
720
+ & np.isnan(res.error) & ~res.success & res.nfev == 1)
721
+
722
+ message = '...must be non-negative and finite.'
723
+ with pytest.raises(ValueError, match=message):
724
+ _nsum(f, f.a, f.b, rtol=-1)
725
+ with pytest.raises(ValueError, match=message):
726
+ _nsum(f, f.a, f.b, atol=np.inf)
727
+
728
+ message = '...may not be positive infinity.'
729
+ with pytest.raises(ValueError, match=message):
730
+ _nsum(f, f.a, f.b, rtol=np.inf, log=True)
731
+ with pytest.raises(ValueError, match=message):
732
+ _nsum(f, f.a, f.b, atol=np.inf, log=True)
733
+
734
+ message = '...must be a non-negative integer.'
735
+ with pytest.raises(ValueError, match=message):
736
+ _nsum(f, f.a, f.b, maxterms=3.5)
737
+ with pytest.raises(ValueError, match=message):
738
+ _nsum(f, f.a, f.b, maxterms=-2)
739
+
740
+ @pytest.mark.parametrize('f_number', range(1, 4))
741
+ def test_basic(self, f_number):
742
+ f = getattr(self, f"f{f_number}")
743
+ res = _nsum(f, f.a, f.b, args=f.args)
744
+ assert_allclose(res.sum, f.ref)
745
+ assert_equal(res.status, 0)
746
+ assert_equal(res.success, True)
747
+
748
+ with np.errstate(divide='ignore'):
749
+ logres = _nsum(lambda *args: np.log(f(*args)),
750
+ f.a, f.b, log=True, args=f.args)
751
+ assert_allclose(np.exp(logres.sum), res.sum)
752
+ assert_allclose(np.exp(logres.error), res.error)
753
+ assert_equal(logres.status, 0)
754
+ assert_equal(logres.success, True)
755
+
756
+ @pytest.mark.parametrize('maxterms', [0, 1, 10, 20, 100])
757
+ def test_integral(self, maxterms):
758
+ # test precise behavior of integral approximation
759
+ f = self.f1
760
+
761
+ def logf(x):
762
+ return -2*np.log(x)
763
+
764
+ def F(x):
765
+ return -1 / x
766
+
767
+ a = np.asarray([1, 5])[:, np.newaxis]
768
+ b = np.asarray([20, 100, np.inf])[:, np.newaxis, np.newaxis]
769
+ step = np.asarray([0.5, 1, 2]).reshape((-1, 1, 1, 1))
770
+ nsteps = np.floor((b - a)/step)
771
+ b_original = b
772
+ b = a + nsteps*step
773
+
774
+ k = a + maxterms*step
775
+ # partial sum
776
+ direct = f(a + np.arange(maxterms)*step).sum(axis=-1, keepdims=True)
777
+ integral = (F(b) - F(k))/step # integral approximation of remainder
778
+ low = direct + integral + f(b) # theoretical lower bound
779
+ high = direct + integral + f(k) # theoretical upper bound
780
+ ref_sum = (low + high)/2 # _nsum uses average of the two
781
+ ref_err = (high - low)/2 # error (assuming perfect quadrature)
782
+
783
+ # correct reference values where number of terms < maxterms
784
+ a, b, step = np.broadcast_arrays(a, b, step)
785
+ for i in np.ndindex(a.shape):
786
+ ai, bi, stepi = a[i], b[i], step[i]
787
+ if (bi - ai)/stepi + 1 <= maxterms:
788
+ direct = f(np.arange(ai, bi+stepi, stepi)).sum()
789
+ ref_sum[i] = direct
790
+ ref_err[i] = direct * np.finfo(direct).eps
791
+
792
+ rtol = 1e-12
793
+ res = _nsum(f, a, b_original, step=step, maxterms=maxterms, rtol=rtol)
794
+ assert_allclose(res.sum, ref_sum, rtol=10*rtol)
795
+ assert_allclose(res.error, ref_err, rtol=100*rtol)
796
+ assert_equal(res.status, 0)
797
+ assert_equal(res.success, True)
798
+
799
+ i = ((b_original - a)/step + 1 <= maxterms)
800
+ assert_allclose(res.sum[i], ref_sum[i], rtol=1e-15)
801
+ assert_allclose(res.error[i], ref_err[i], rtol=1e-15)
802
+
803
+ logres = _nsum(logf, a, b_original, step=step, log=True,
804
+ rtol=np.log(rtol), maxterms=maxterms)
805
+ assert_allclose(np.exp(logres.sum), res.sum)
806
+ assert_allclose(np.exp(logres.error), res.error)
807
+ assert_equal(logres.status, 0)
808
+ assert_equal(logres.success, True)
809
+
810
+ @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)])
811
+ def test_vectorization(self, shape):
812
+ # Test for correct functionality, output shapes, and dtypes for various
813
+ # input shapes.
814
+ rng = np.random.default_rng(82456839535679456794)
815
+ a = rng.integers(1, 10, size=shape)
816
+ # when the sum can be computed directly or `maxterms` is large enough
817
+ # to meet `atol`, there are slight differences (for good reason)
818
+ # between vectorized call and looping.
819
+ b = np.inf
820
+ p = rng.random(shape) + 1
821
+ n = np.prod(shape)
822
+
823
+ def f(x, p):
824
+ f.feval += 1 if (x.size == n or x.ndim <= 1) else x.shape[-1]
825
+ return 1 / x ** p
826
+
827
+ f.feval = 0
828
+
829
+ @np.vectorize
830
+ def _nsum_single(a, b, p, maxterms):
831
+ return _nsum(lambda x: 1 / x**p, a, b, maxterms=maxterms)
832
+
833
+ res = _nsum(f, a, b, maxterms=1000, args=(p,))
834
+ refs = _nsum_single(a, b, p, maxterms=1000).ravel()
835
+
836
+ attrs = ['sum', 'error', 'success', 'status', 'nfev']
837
+ for attr in attrs:
838
+ ref_attr = [getattr(ref, attr) for ref in refs]
839
+ res_attr = getattr(res, attr)
840
+ assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15)
841
+ assert_equal(res_attr.shape, shape)
842
+
843
+ assert np.issubdtype(res.success.dtype, np.bool_)
844
+ assert np.issubdtype(res.status.dtype, np.integer)
845
+ assert np.issubdtype(res.nfev.dtype, np.integer)
846
+ assert_equal(np.max(res.nfev), f.feval)
847
+
848
+ def test_status(self):
849
+ f = self.f2
850
+
851
+ p = [2, 2, 0.9, 1.1]
852
+ a = [0, 0, 1, 1]
853
+ b = [10, np.inf, np.inf, np.inf]
854
+ ref = special.zeta(p, 1)
855
+
856
+ with np.errstate(divide='ignore'): # intentionally dividing by zero
857
+ res = _nsum(f, a, b, args=(p,))
858
+
859
+ assert_equal(res.success, [False, False, False, True])
860
+ assert_equal(res.status, [-3, -3, -2, 0])
861
+ assert_allclose(res.sum[res.success], ref[res.success])
862
+
863
+ def test_nfev(self):
864
+ def f(x):
865
+ f.nfev += np.size(x)
866
+ return 1 / x**2
867
+
868
+ f.nfev = 0
869
+ res = _nsum(f, 1, 10)
870
+ assert_equal(res.nfev, f.nfev)
871
+
872
+ f.nfev = 0
873
+ res = _nsum(f, 1, np.inf, atol=1e-6)
874
+ assert_equal(res.nfev, f.nfev)
875
+
876
+ def test_inclusive(self):
877
+ # There was an edge case off-by one bug when `_direct` was called with
878
+ # `inclusive=True`. Check that this is resolved.
879
+ res = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf, maxterms=500, atol=0.1)
880
+ ref = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf)
881
+ assert np.all(res.sum > (ref.sum - res.error))
882
+ assert np.all(res.sum < (ref.sum + res.error))
883
+
884
+ def test_special_case(self):
885
+ # test equal lower/upper limit
886
+ f = self.f1
887
+ a = b = 2
888
+ res = _nsum(f, a, b)
889
+ assert_equal(res.sum, f(a))
890
+
891
+ # Test scalar `args` (not in tuple)
892
+ res = _nsum(self.f2, 1, np.inf, args=2)
893
+ assert_allclose(res.sum, self.f1.ref) # f1.ref is correct w/ args=2
894
+
895
+ # Test 0 size input
896
+ a = np.empty((3, 1, 1)) # arbitrary broadcastable shapes
897
+ b = np.empty((0, 1)) # could use Hypothesis
898
+ p = np.empty(4) # but it's overkill
899
+ shape = np.broadcast_shapes(a.shape, b.shape, p.shape)
900
+ res = _nsum(self.f2, a, b, args=(p,))
901
+ assert res.sum.shape == shape
902
+ assert res.status.shape == shape
903
+ assert res.nfev.shape == shape
904
+
905
+ # Test maxterms=0
906
+ def f(x):
907
+ with np.errstate(divide='ignore'):
908
+ return 1 / x
909
+
910
+ res = _nsum(f, 0, 10, maxterms=0)
911
+ assert np.isnan(res.sum)
912
+ assert np.isnan(res.error)
913
+ assert res.status == -2
914
+
915
+ res = _nsum(f, 0, 10, maxterms=1)
916
+ assert np.isnan(res.sum)
917
+ assert np.isnan(res.error)
918
+ assert res.status == -3
919
+
920
+ # Test NaNs
921
+ # should skip both direct and integral methods if there are NaNs
922
+ a = [np.nan, 1, 1, 1]
923
+ b = [np.inf, np.nan, np.inf, np.inf]
924
+ p = [2, 2, np.nan, 2]
925
+ res = _nsum(self.f2, a, b, args=(p,))
926
+ assert_allclose(res.sum, [np.nan, np.nan, np.nan, self.f1.ref])
927
+ assert_allclose(res.error[:3], np.nan)
928
+ assert_equal(res.status, [-1, -1, -3, 0])
929
+ assert_equal(res.success, [False, False, False, True])
930
+ # Ideally res.nfev[2] would be 1, but `tanhsinh` has some function evals
931
+ assert_equal(res.nfev[:2], 1)
932
+
933
+ @pytest.mark.parametrize('dtype', [np.float32, np.float64])
934
+ def test_dtype(self, dtype):
935
+ def f(k):
936
+ assert k.dtype == dtype
937
+ return 1 / k ** np.asarray(2, dtype=dtype)[()]
938
+
939
+ a = np.asarray(1, dtype=dtype)
940
+ b = np.asarray([10, np.inf], dtype=dtype)
941
+ res = _nsum(f, a, b)
942
+ assert res.sum.dtype == dtype
943
+ assert res.error.dtype == dtype
944
+
945
+ rtol = 1e-12 if dtype == np.float64 else 1e-6
946
+ ref = _gen_harmonic_gt1(b, 2)
947
+ assert_allclose(res.sum, ref, rtol=rtol)
parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c2f5db6e6c40228f4d37552624bde26b3a2392e63902889f48f5ee20825b9e1
3
+ size 300112
parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (1.86 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/_common.cpython-310.pyc ADDED
Binary file (11.3 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc ADDED
Binary file (2.07 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/ndimage/fourier.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.ndimage` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+
8
+ __all__ = [ # noqa: F822
9
+ 'fourier_gaussian', 'fourier_uniform',
10
+ 'fourier_ellipsoid', 'fourier_shift'
11
+ ]
12
+
13
+
14
+ def __dir__():
15
+ return __all__
16
+
17
+
18
+ def __getattr__(name):
19
+ return _sub_module_deprecation(sub_package='ndimage', module='fourier',
20
+ private_modules=['_fourier'], all=__all__,
21
+ attribute=name)
parrot/lib/python3.10/site-packages/scipy/odr/__init__.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ =================================================
3
+ Orthogonal distance regression (:mod:`scipy.odr`)
4
+ =================================================
5
+
6
+ .. currentmodule:: scipy.odr
7
+
8
+ Package Content
9
+ ===============
10
+
11
+ .. autosummary::
12
+ :toctree: generated/
13
+
14
+ Data -- The data to fit.
15
+ RealData -- Data with weights as actual std. dev.s and/or covariances.
16
+ Model -- Stores information about the function to be fit.
17
+ ODR -- Gathers all info & manages the main fitting routine.
18
+ Output -- Result from the fit.
19
+ odr -- Low-level function for ODR.
20
+
21
+ OdrWarning -- Warning about potential problems when running ODR.
22
+ OdrError -- Error exception.
23
+ OdrStop -- Stop exception.
24
+
25
+ polynomial -- Factory function for a general polynomial model.
26
+ exponential -- Exponential model
27
+ multilinear -- Arbitrary-dimensional linear model
28
+ unilinear -- Univariate linear model
29
+ quadratic -- Quadratic model
30
+
31
+ Usage information
32
+ =================
33
+
34
+ Introduction
35
+ ------------
36
+
37
+ Why Orthogonal Distance Regression (ODR)? Sometimes one has
38
+ measurement errors in the explanatory (a.k.a., "independent")
39
+ variable(s), not just the response (a.k.a., "dependent") variable(s).
40
+ Ordinary Least Squares (OLS) fitting procedures treat the data for
41
+ explanatory variables as fixed, i.e., not subject to error of any kind.
42
+ Furthermore, OLS procedures require that the response variables be an
43
+ explicit function of the explanatory variables; sometimes making the
44
+ equation explicit is impractical and/or introduces errors. ODR can
45
+ handle both of these cases with ease, and can even reduce to the OLS
46
+ case if that is sufficient for the problem.
47
+
48
+ ODRPACK is a FORTRAN-77 library for performing ODR with possibly
49
+ non-linear fitting functions. It uses a modified trust-region
50
+ Levenberg-Marquardt-type algorithm [1]_ to estimate the function
51
+ parameters. The fitting functions are provided by Python functions
52
+ operating on NumPy arrays. The required derivatives may be provided
53
+ by Python functions as well, or may be estimated numerically. ODRPACK
54
+ can do explicit or implicit ODR fits, or it can do OLS. Input and
55
+ output variables may be multidimensional. Weights can be provided to
56
+ account for different variances of the observations, and even
57
+ covariances between dimensions of the variables.
58
+
59
+ The `scipy.odr` package offers an object-oriented interface to
60
+ ODRPACK, in addition to the low-level `odr` function.
61
+
62
+ Additional background information about ODRPACK can be found in the
63
+ `ODRPACK User's Guide
64
+ <https://docs.scipy.org/doc/external/odrpack_guide.pdf>`_, reading
65
+ which is recommended.
66
+
67
+ Basic usage
68
+ -----------
69
+
70
+ 1. Define the function you want to fit against.::
71
+
72
+ def f(B, x):
73
+ '''Linear function y = m*x + b'''
74
+ # B is a vector of the parameters.
75
+ # x is an array of the current x values.
76
+ # x is in the same format as the x passed to Data or RealData.
77
+ #
78
+ # Return an array in the same format as y passed to Data or RealData.
79
+ return B[0]*x + B[1]
80
+
81
+ 2. Create a Model.::
82
+
83
+ linear = Model(f)
84
+
85
+ 3. Create a Data or RealData instance.::
86
+
87
+ mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2))
88
+
89
+ or, when the actual covariances are known::
90
+
91
+ mydata = RealData(x, y, sx=sx, sy=sy)
92
+
93
+ 4. Instantiate ODR with your data, model and initial parameter estimate.::
94
+
95
+ myodr = ODR(mydata, linear, beta0=[1., 2.])
96
+
97
+ 5. Run the fit.::
98
+
99
+ myoutput = myodr.run()
100
+
101
+ 6. Examine output.::
102
+
103
+ myoutput.pprint()
104
+
105
+
106
+ References
107
+ ----------
108
+ .. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression,"
109
+ in "Statistical analysis of measurement error models and
110
+ applications: proceedings of the AMS-IMS-SIAM joint summer research
111
+ conference held June 10-16, 1989," Contemporary Mathematics,
112
+ vol. 112, pg. 186, 1990.
113
+
114
+ """
115
+ # version: 0.7
116
+ # author: Robert Kern <robert.kern@gmail.com>
117
+ # date: 2006-09-21
118
+
119
+ from ._odrpack import *
120
+ from ._models import *
121
+ from . import _add_newdocs
122
+
123
+ # Deprecated namespaces, to be removed in v2.0.0
124
+ from . import models, odrpack
125
+
126
+ __all__ = [s for s in dir()
127
+ if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))]
128
+
129
+ from scipy._lib._testutils import PytestTester
130
+ test = PytestTester(__name__)
131
+ del PytestTester
parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc ADDED
Binary file (9.05 kB). View file
 
parrot/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from numpy.lib import add_newdoc
2
+
3
+ add_newdoc('scipy.odr', 'odr',
4
+ """
5
+ odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None,
6
+ ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0,
7
+ taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None,
8
+ scld=None, work=None, iwork=None, full_output=0)
9
+
10
+ Low-level function for ODR.
11
+
12
+ See Also
13
+ --------
14
+ ODR : The ODR class gathers all information and coordinates the running of the
15
+ main fitting routine.
16
+ Model : The Model class stores information about the function you wish to fit.
17
+ Data : The data to fit.
18
+ RealData : Data with weights as actual std. dev.s and/or covariances.
19
+
20
+ Notes
21
+ -----
22
+ This is a function performing the same operation as the `ODR`,
23
+ `Model`, and `Data` classes together. The parameters of this
24
+ function are explained in the class documentation.
25
+
26
+ """)
27
+
28
+ add_newdoc('scipy.odr.__odrpack', '_set_exceptions',
29
+ """
30
+ _set_exceptions(odr_error, odr_stop)
31
+
32
+ Internal function: set exception classes.
33
+
34
+ """)
parrot/lib/python3.10/site-packages/scipy/odr/_models.py ADDED
@@ -0,0 +1,315 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Collection of Model instances for use with the odrpack fitting package.
2
+ """
3
+ import numpy as np
4
+ from scipy.odr._odrpack import Model
5
+
6
+ __all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic',
7
+ 'polynomial']
8
+
9
+
10
+ def _lin_fcn(B, x):
11
+ a, b = B[0], B[1:]
12
+ b.shape = (b.shape[0], 1)
13
+
14
+ return a + (x*b).sum(axis=0)
15
+
16
+
17
+ def _lin_fjb(B, x):
18
+ a = np.ones(x.shape[-1], float)
19
+ res = np.concatenate((a, x.ravel()))
20
+ res.shape = (B.shape[-1], x.shape[-1])
21
+ return res
22
+
23
+
24
+ def _lin_fjd(B, x):
25
+ b = B[1:]
26
+ b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0)
27
+ b.shape = x.shape
28
+ return b
29
+
30
+
31
+ def _lin_est(data):
32
+ # Eh. The answer is analytical, so just return all ones.
33
+ # Don't return zeros since that will interfere with
34
+ # ODRPACK's auto-scaling procedures.
35
+
36
+ if len(data.x.shape) == 2:
37
+ m = data.x.shape[0]
38
+ else:
39
+ m = 1
40
+
41
+ return np.ones((m + 1,), float)
42
+
43
+
44
+ def _poly_fcn(B, x, powers):
45
+ a, b = B[0], B[1:]
46
+ b.shape = (b.shape[0], 1)
47
+
48
+ return a + np.sum(b * np.power(x, powers), axis=0)
49
+
50
+
51
+ def _poly_fjacb(B, x, powers):
52
+ res = np.concatenate((np.ones(x.shape[-1], float),
53
+ np.power(x, powers).flat))
54
+ res.shape = (B.shape[-1], x.shape[-1])
55
+ return res
56
+
57
+
58
+ def _poly_fjacd(B, x, powers):
59
+ b = B[1:]
60
+ b.shape = (b.shape[0], 1)
61
+
62
+ b = b * powers
63
+
64
+ return np.sum(b * np.power(x, powers-1), axis=0)
65
+
66
+
67
+ def _exp_fcn(B, x):
68
+ return B[0] + np.exp(B[1] * x)
69
+
70
+
71
+ def _exp_fjd(B, x):
72
+ return B[1] * np.exp(B[1] * x)
73
+
74
+
75
+ def _exp_fjb(B, x):
76
+ res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x)))
77
+ res.shape = (2, x.shape[-1])
78
+ return res
79
+
80
+
81
+ def _exp_est(data):
82
+ # Eh.
83
+ return np.array([1., 1.])
84
+
85
+
86
+ class _MultilinearModel(Model):
87
+ r"""
88
+ Arbitrary-dimensional linear model
89
+
90
+ This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i`
91
+
92
+ Examples
93
+ --------
94
+ We can calculate orthogonal distance regression with an arbitrary
95
+ dimensional linear model:
96
+
97
+ >>> from scipy import odr
98
+ >>> import numpy as np
99
+ >>> x = np.linspace(0.0, 5.0)
100
+ >>> y = 10.0 + 5.0 * x
101
+ >>> data = odr.Data(x, y)
102
+ >>> odr_obj = odr.ODR(data, odr.multilinear)
103
+ >>> output = odr_obj.run()
104
+ >>> print(output.beta)
105
+ [10. 5.]
106
+
107
+ """
108
+
109
+ def __init__(self):
110
+ super().__init__(
111
+ _lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est,
112
+ meta={'name': 'Arbitrary-dimensional Linear',
113
+ 'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]',
114
+ 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'})
115
+
116
+
117
+ multilinear = _MultilinearModel()
118
+
119
+
120
+ def polynomial(order):
121
+ """
122
+ Factory function for a general polynomial model.
123
+
124
+ Parameters
125
+ ----------
126
+ order : int or sequence
127
+ If an integer, it becomes the order of the polynomial to fit. If
128
+ a sequence of numbers, then these are the explicit powers in the
129
+ polynomial.
130
+ A constant term (power 0) is always included, so don't include 0.
131
+ Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
132
+
133
+ Returns
134
+ -------
135
+ polynomial : Model instance
136
+ Model instance.
137
+
138
+ Examples
139
+ --------
140
+ We can fit an input data using orthogonal distance regression (ODR) with
141
+ a polynomial model:
142
+
143
+ >>> import numpy as np
144
+ >>> import matplotlib.pyplot as plt
145
+ >>> from scipy import odr
146
+ >>> x = np.linspace(0.0, 5.0)
147
+ >>> y = np.sin(x)
148
+ >>> poly_model = odr.polynomial(3) # using third order polynomial model
149
+ >>> data = odr.Data(x, y)
150
+ >>> odr_obj = odr.ODR(data, poly_model)
151
+ >>> output = odr_obj.run() # running ODR fitting
152
+ >>> poly = np.poly1d(output.beta[::-1])
153
+ >>> poly_y = poly(x)
154
+ >>> plt.plot(x, y, label="input data")
155
+ >>> plt.plot(x, poly_y, label="polynomial ODR")
156
+ >>> plt.legend()
157
+ >>> plt.show()
158
+
159
+ """
160
+
161
+ powers = np.asarray(order)
162
+ if powers.shape == ():
163
+ # Scalar.
164
+ powers = np.arange(1, powers + 1)
165
+
166
+ powers.shape = (len(powers), 1)
167
+ len_beta = len(powers) + 1
168
+
169
+ def _poly_est(data, len_beta=len_beta):
170
+ # Eh. Ignore data and return all ones.
171
+ return np.ones((len_beta,), float)
172
+
173
+ return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
174
+ estimate=_poly_est, extra_args=(powers,),
175
+ meta={'name': 'Sorta-general Polynomial',
176
+ 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
177
+ 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' %
178
+ (len_beta-1)})
179
+
180
+
181
+ class _ExponentialModel(Model):
182
+ r"""
183
+ Exponential model
184
+
185
+ This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}`
186
+
187
+ Examples
188
+ --------
189
+ We can calculate orthogonal distance regression with an exponential model:
190
+
191
+ >>> from scipy import odr
192
+ >>> import numpy as np
193
+ >>> x = np.linspace(0.0, 5.0)
194
+ >>> y = -10.0 + np.exp(0.5*x)
195
+ >>> data = odr.Data(x, y)
196
+ >>> odr_obj = odr.ODR(data, odr.exponential)
197
+ >>> output = odr_obj.run()
198
+ >>> print(output.beta)
199
+ [-10. 0.5]
200
+
201
+ """
202
+
203
+ def __init__(self):
204
+ super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb,
205
+ estimate=_exp_est,
206
+ meta={'name': 'Exponential',
207
+ 'equ': 'y= B_0 + exp(B_1 * x)',
208
+ 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'})
209
+
210
+
211
+ exponential = _ExponentialModel()
212
+
213
+
214
+ def _unilin(B, x):
215
+ return x*B[0] + B[1]
216
+
217
+
218
+ def _unilin_fjd(B, x):
219
+ return np.ones(x.shape, float) * B[0]
220
+
221
+
222
+ def _unilin_fjb(B, x):
223
+ _ret = np.concatenate((x, np.ones(x.shape, float)))
224
+ _ret.shape = (2,) + x.shape
225
+
226
+ return _ret
227
+
228
+
229
+ def _unilin_est(data):
230
+ return (1., 1.)
231
+
232
+
233
+ def _quadratic(B, x):
234
+ return x*(x*B[0] + B[1]) + B[2]
235
+
236
+
237
+ def _quad_fjd(B, x):
238
+ return 2*x*B[0] + B[1]
239
+
240
+
241
+ def _quad_fjb(B, x):
242
+ _ret = np.concatenate((x*x, x, np.ones(x.shape, float)))
243
+ _ret.shape = (3,) + x.shape
244
+
245
+ return _ret
246
+
247
+
248
+ def _quad_est(data):
249
+ return (1.,1.,1.)
250
+
251
+
252
+ class _UnilinearModel(Model):
253
+ r"""
254
+ Univariate linear model
255
+
256
+ This model is defined by :math:`y = \beta_0 x + \beta_1`
257
+
258
+ Examples
259
+ --------
260
+ We can calculate orthogonal distance regression with an unilinear model:
261
+
262
+ >>> from scipy import odr
263
+ >>> import numpy as np
264
+ >>> x = np.linspace(0.0, 5.0)
265
+ >>> y = 1.0 * x + 2.0
266
+ >>> data = odr.Data(x, y)
267
+ >>> odr_obj = odr.ODR(data, odr.unilinear)
268
+ >>> output = odr_obj.run()
269
+ >>> print(output.beta)
270
+ [1. 2.]
271
+
272
+ """
273
+
274
+ def __init__(self):
275
+ super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb,
276
+ estimate=_unilin_est,
277
+ meta={'name': 'Univariate Linear',
278
+ 'equ': 'y = B_0 * x + B_1',
279
+ 'TeXequ': '$y = \\beta_0 x + \\beta_1$'})
280
+
281
+
282
+ unilinear = _UnilinearModel()
283
+
284
+
285
+ class _QuadraticModel(Model):
286
+ r"""
287
+ Quadratic model
288
+
289
+ This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2`
290
+
291
+ Examples
292
+ --------
293
+ We can calculate orthogonal distance regression with a quadratic model:
294
+
295
+ >>> from scipy import odr
296
+ >>> import numpy as np
297
+ >>> x = np.linspace(0.0, 5.0)
298
+ >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0
299
+ >>> data = odr.Data(x, y)
300
+ >>> odr_obj = odr.ODR(data, odr.quadratic)
301
+ >>> output = odr_obj.run()
302
+ >>> print(output.beta)
303
+ [1. 2. 3.]
304
+
305
+ """
306
+
307
+ def __init__(self):
308
+ super().__init__(
309
+ _quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est,
310
+ meta={'name': 'Quadratic',
311
+ 'equ': 'y = B_0*x**2 + B_1*x + B_2',
312
+ 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'})
313
+
314
+
315
+ quadratic = _QuadraticModel()
parrot/lib/python3.10/site-packages/scipy/odr/_odrpack.py ADDED
@@ -0,0 +1,1151 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Python wrappers for Orthogonal Distance Regression (ODRPACK).
3
+
4
+ Notes
5
+ =====
6
+
7
+ * Array formats -- FORTRAN stores its arrays in memory column first, i.e., an
8
+ array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently,
9
+ NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For
10
+ efficiency and convenience, the input and output arrays of the fitting
11
+ function (and its Jacobians) are passed to FORTRAN without transposition.
12
+ Therefore, where the ODRPACK documentation says that the X array is of shape
13
+ (N, M), it will be passed to the Python function as an array of shape (M, N).
14
+ If M==1, the 1-D case, then nothing matters; if M>1, then your
15
+ Python functions will be dealing with arrays that are indexed in reverse of
16
+ the ODRPACK documentation. No real issue, but watch out for your indexing of
17
+ the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth
18
+ observation will be returned as jacd[j, i, n]. Except for the Jacobians, it
19
+ really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course,
20
+ you can always use the transpose() function from SciPy explicitly.
21
+
22
+ * Examples -- See the accompanying file test/test.py for examples of how to set
23
+ up fits of your own. Some are taken from the User's Guide; some are from
24
+ other sources.
25
+
26
+ * Models -- Some common models are instantiated in the accompanying module
27
+ models.py . Contributions are welcome.
28
+
29
+ Credits
30
+ =======
31
+
32
+ * Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs.
33
+
34
+ Robert Kern
35
+ robert.kern@gmail.com
36
+
37
+ """
38
+ import os
39
+
40
+ import numpy as np
41
+ from warnings import warn
42
+ from scipy.odr import __odrpack
43
+
44
+ __all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop',
45
+ 'Data', 'RealData', 'Model', 'Output', 'ODR',
46
+ 'odr_error', 'odr_stop']
47
+
48
+ odr = __odrpack.odr
49
+
50
+
51
+ class OdrWarning(UserWarning):
52
+ """
53
+ Warning indicating that the data passed into
54
+ ODR will cause problems when passed into 'odr'
55
+ that the user should be aware of.
56
+ """
57
+ pass
58
+
59
+
60
+ class OdrError(Exception):
61
+ """
62
+ Exception indicating an error in fitting.
63
+
64
+ This is raised by `~scipy.odr.odr` if an error occurs during fitting.
65
+ """
66
+ pass
67
+
68
+
69
+ class OdrStop(Exception):
70
+ """
71
+ Exception stopping fitting.
72
+
73
+ You can raise this exception in your objective function to tell
74
+ `~scipy.odr.odr` to stop fitting.
75
+ """
76
+ pass
77
+
78
+
79
+ # Backwards compatibility
80
+ odr_error = OdrError
81
+ odr_stop = OdrStop
82
+
83
+ __odrpack._set_exceptions(OdrError, OdrStop)
84
+
85
+
86
+ def _conv(obj, dtype=None):
87
+ """ Convert an object to the preferred form for input to the odr routine.
88
+ """
89
+
90
+ if obj is None:
91
+ return obj
92
+ else:
93
+ if dtype is None:
94
+ obj = np.asarray(obj)
95
+ else:
96
+ obj = np.asarray(obj, dtype)
97
+ if obj.shape == ():
98
+ # Scalar.
99
+ return obj.dtype.type(obj)
100
+ else:
101
+ return obj
102
+
103
+
104
+ def _report_error(info):
105
+ """ Interprets the return code of the odr routine.
106
+
107
+ Parameters
108
+ ----------
109
+ info : int
110
+ The return code of the odr routine.
111
+
112
+ Returns
113
+ -------
114
+ problems : list(str)
115
+ A list of messages about why the odr() routine stopped.
116
+ """
117
+
118
+ stopreason = ('Blank',
119
+ 'Sum of squares convergence',
120
+ 'Parameter convergence',
121
+ 'Both sum of squares and parameter convergence',
122
+ 'Iteration limit reached')[info % 5]
123
+
124
+ if info >= 5:
125
+ # questionable results or fatal error
126
+
127
+ I = (info//10000 % 10,
128
+ info//1000 % 10,
129
+ info//100 % 10,
130
+ info//10 % 10,
131
+ info % 10)
132
+ problems = []
133
+
134
+ if I[0] == 0:
135
+ if I[1] != 0:
136
+ problems.append('Derivatives possibly not correct')
137
+ if I[2] != 0:
138
+ problems.append('Error occurred in callback')
139
+ if I[3] != 0:
140
+ problems.append('Problem is not full rank at solution')
141
+ problems.append(stopreason)
142
+ elif I[0] == 1:
143
+ if I[1] != 0:
144
+ problems.append('N < 1')
145
+ if I[2] != 0:
146
+ problems.append('M < 1')
147
+ if I[3] != 0:
148
+ problems.append('NP < 1 or NP > N')
149
+ if I[4] != 0:
150
+ problems.append('NQ < 1')
151
+ elif I[0] == 2:
152
+ if I[1] != 0:
153
+ problems.append('LDY and/or LDX incorrect')
154
+ if I[2] != 0:
155
+ problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect')
156
+ if I[3] != 0:
157
+ problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect')
158
+ if I[4] != 0:
159
+ problems.append('LWORK and/or LIWORK too small')
160
+ elif I[0] == 3:
161
+ if I[1] != 0:
162
+ problems.append('STPB and/or STPD incorrect')
163
+ if I[2] != 0:
164
+ problems.append('SCLB and/or SCLD incorrect')
165
+ if I[3] != 0:
166
+ problems.append('WE incorrect')
167
+ if I[4] != 0:
168
+ problems.append('WD incorrect')
169
+ elif I[0] == 4:
170
+ problems.append('Error in derivatives')
171
+ elif I[0] == 5:
172
+ problems.append('Error occurred in callback')
173
+ elif I[0] == 6:
174
+ problems.append('Numerical error detected')
175
+
176
+ return problems
177
+
178
+ else:
179
+ return [stopreason]
180
+
181
+
182
+ class Data:
183
+ """
184
+ The data to fit.
185
+
186
+ Parameters
187
+ ----------
188
+ x : array_like
189
+ Observed data for the independent variable of the regression
190
+ y : array_like, optional
191
+ If array-like, observed data for the dependent variable of the
192
+ regression. A scalar input implies that the model to be used on
193
+ the data is implicit.
194
+ we : array_like, optional
195
+ If `we` is a scalar, then that value is used for all data points (and
196
+ all dimensions of the response variable).
197
+ If `we` is a rank-1 array of length q (the dimensionality of the
198
+ response variable), then this vector is the diagonal of the covariant
199
+ weighting matrix for all data points.
200
+ If `we` is a rank-1 array of length n (the number of data points), then
201
+ the i'th element is the weight for the i'th response variable
202
+ observation (single-dimensional only).
203
+ If `we` is a rank-2 array of shape (q, q), then this is the full
204
+ covariant weighting matrix broadcast to each observation.
205
+ If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the
206
+ diagonal of the covariant weighting matrix for the i'th observation.
207
+ If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the
208
+ full specification of the covariant weighting matrix for each
209
+ observation.
210
+ If the fit is implicit, then only a positive scalar value is used.
211
+ wd : array_like, optional
212
+ If `wd` is a scalar, then that value is used for all data points
213
+ (and all dimensions of the input variable). If `wd` = 0, then the
214
+ covariant weighting matrix for each observation is set to the identity
215
+ matrix (so each dimension of each observation has the same weight).
216
+ If `wd` is a rank-1 array of length m (the dimensionality of the input
217
+ variable), then this vector is the diagonal of the covariant weighting
218
+ matrix for all data points.
219
+ If `wd` is a rank-1 array of length n (the number of data points), then
220
+ the i'th element is the weight for the ith input variable observation
221
+ (single-dimensional only).
222
+ If `wd` is a rank-2 array of shape (m, m), then this is the full
223
+ covariant weighting matrix broadcast to each observation.
224
+ If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the
225
+ diagonal of the covariant weighting matrix for the ith observation.
226
+ If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the
227
+ full specification of the covariant weighting matrix for each
228
+ observation.
229
+ fix : array_like of ints, optional
230
+ The `fix` argument is the same as ifixx in the class ODR. It is an
231
+ array of integers with the same shape as data.x that determines which
232
+ input observations are treated as fixed. One can use a sequence of
233
+ length m (the dimensionality of the input observations) to fix some
234
+ dimensions for all observations. A value of 0 fixes the observation,
235
+ a value > 0 makes it free.
236
+ meta : dict, optional
237
+ Free-form dictionary for metadata.
238
+
239
+ Notes
240
+ -----
241
+ Each argument is attached to the member of the instance of the same name.
242
+ The structures of `x` and `y` are described in the Model class docstring.
243
+ If `y` is an integer, then the Data instance can only be used to fit with
244
+ implicit models where the dimensionality of the response is equal to the
245
+ specified value of `y`.
246
+
247
+ The `we` argument weights the effect a deviation in the response variable
248
+ has on the fit. The `wd` argument weights the effect a deviation in the
249
+ input variable has on the fit. To handle multidimensional inputs and
250
+ responses easily, the structure of these arguments has the n'th
251
+ dimensional axis first. These arguments heavily use the structured
252
+ arguments feature of ODRPACK to conveniently and flexibly support all
253
+ options. See the ODRPACK User's Guide for a full explanation of how these
254
+ weights are used in the algorithm. Basically, a higher value of the weight
255
+ for a particular data point makes a deviation at that point more
256
+ detrimental to the fit.
257
+
258
+ """
259
+
260
+ def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None):
261
+ self.x = _conv(x)
262
+
263
+ if not isinstance(self.x, np.ndarray):
264
+ raise ValueError("Expected an 'ndarray' of data for 'x', "
265
+ f"but instead got data of type '{type(self.x).__name__}'")
266
+
267
+ self.y = _conv(y)
268
+ self.we = _conv(we)
269
+ self.wd = _conv(wd)
270
+ self.fix = _conv(fix)
271
+ self.meta = {} if meta is None else meta
272
+
273
+ def set_meta(self, **kwds):
274
+ """ Update the metadata dictionary with the keywords and data provided
275
+ by keywords.
276
+
277
+ Examples
278
+ --------
279
+ ::
280
+
281
+ data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay")
282
+ """
283
+
284
+ self.meta.update(kwds)
285
+
286
+ def __getattr__(self, attr):
287
+ """ Dispatch attribute access to the metadata dictionary.
288
+ """
289
+ if attr != "meta" and attr in self.meta:
290
+ return self.meta[attr]
291
+ else:
292
+ raise AttributeError("'%s' not in metadata" % attr)
293
+
294
+
295
+ class RealData(Data):
296
+ """
297
+ The data, with weightings as actual standard deviations and/or
298
+ covariances.
299
+
300
+ Parameters
301
+ ----------
302
+ x : array_like
303
+ Observed data for the independent variable of the regression
304
+ y : array_like, optional
305
+ If array-like, observed data for the dependent variable of the
306
+ regression. A scalar input implies that the model to be used on
307
+ the data is implicit.
308
+ sx : array_like, optional
309
+ Standard deviations of `x`.
310
+ `sx` are standard deviations of `x` and are converted to weights by
311
+ dividing 1.0 by their squares.
312
+ sy : array_like, optional
313
+ Standard deviations of `y`.
314
+ `sy` are standard deviations of `y` and are converted to weights by
315
+ dividing 1.0 by their squares.
316
+ covx : array_like, optional
317
+ Covariance of `x`
318
+ `covx` is an array of covariance matrices of `x` and are converted to
319
+ weights by performing a matrix inversion on each observation's
320
+ covariance matrix.
321
+ covy : array_like, optional
322
+ Covariance of `y`
323
+ `covy` is an array of covariance matrices and are converted to
324
+ weights by performing a matrix inversion on each observation's
325
+ covariance matrix.
326
+ fix : array_like, optional
327
+ The argument and member fix is the same as Data.fix and ODR.ifixx:
328
+ It is an array of integers with the same shape as `x` that
329
+ determines which input observations are treated as fixed. One can
330
+ use a sequence of length m (the dimensionality of the input
331
+ observations) to fix some dimensions for all observations. A value
332
+ of 0 fixes the observation, a value > 0 makes it free.
333
+ meta : dict, optional
334
+ Free-form dictionary for metadata.
335
+
336
+ Notes
337
+ -----
338
+ The weights `wd` and `we` are computed from provided values as follows:
339
+
340
+ `sx` and `sy` are converted to weights by dividing 1.0 by their squares.
341
+ For example, ``wd = 1./np.power(`sx`, 2)``.
342
+
343
+ `covx` and `covy` are arrays of covariance matrices and are converted to
344
+ weights by performing a matrix inversion on each observation's covariance
345
+ matrix. For example, ``we[i] = np.linalg.inv(covy[i])``.
346
+
347
+ These arguments follow the same structured argument conventions as wd and
348
+ we only restricted by their natures: `sx` and `sy` can't be rank-3, but
349
+ `covx` and `covy` can be.
350
+
351
+ Only set *either* `sx` or `covx` (not both). Setting both will raise an
352
+ exception. Same with `sy` and `covy`.
353
+
354
+ """
355
+
356
+ def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None,
357
+ fix=None, meta=None):
358
+ if (sx is not None) and (covx is not None):
359
+ raise ValueError("cannot set both sx and covx")
360
+ if (sy is not None) and (covy is not None):
361
+ raise ValueError("cannot set both sy and covy")
362
+
363
+ # Set flags for __getattr__
364
+ self._ga_flags = {}
365
+ if sx is not None:
366
+ self._ga_flags['wd'] = 'sx'
367
+ else:
368
+ self._ga_flags['wd'] = 'covx'
369
+ if sy is not None:
370
+ self._ga_flags['we'] = 'sy'
371
+ else:
372
+ self._ga_flags['we'] = 'covy'
373
+
374
+ self.x = _conv(x)
375
+
376
+ if not isinstance(self.x, np.ndarray):
377
+ raise ValueError("Expected an 'ndarray' of data for 'x', "
378
+ f"but instead got data of type '{type(self.x).__name__}'")
379
+
380
+ self.y = _conv(y)
381
+ self.sx = _conv(sx)
382
+ self.sy = _conv(sy)
383
+ self.covx = _conv(covx)
384
+ self.covy = _conv(covy)
385
+ self.fix = _conv(fix)
386
+ self.meta = {} if meta is None else meta
387
+
388
+ def _sd2wt(self, sd):
389
+ """ Convert standard deviation to weights.
390
+ """
391
+
392
+ return 1./np.power(sd, 2)
393
+
394
+ def _cov2wt(self, cov):
395
+ """ Convert covariance matrix(-ices) to weights.
396
+ """
397
+
398
+ from scipy.linalg import inv
399
+
400
+ if len(cov.shape) == 2:
401
+ return inv(cov)
402
+ else:
403
+ weights = np.zeros(cov.shape, float)
404
+
405
+ for i in range(cov.shape[-1]): # n
406
+ weights[:,:,i] = inv(cov[:,:,i])
407
+
408
+ return weights
409
+
410
+ def __getattr__(self, attr):
411
+
412
+ if attr not in ('wd', 'we'):
413
+ if attr != "meta" and attr in self.meta:
414
+ return self.meta[attr]
415
+ else:
416
+ raise AttributeError("'%s' not in metadata" % attr)
417
+ else:
418
+ lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx),
419
+ ('wd', 'covx'): (self._cov2wt, self.covx),
420
+ ('we', 'sy'): (self._sd2wt, self.sy),
421
+ ('we', 'covy'): (self._cov2wt, self.covy)}
422
+
423
+ func, arg = lookup_tbl[(attr, self._ga_flags[attr])]
424
+
425
+ if arg is not None:
426
+ return func(*(arg,))
427
+ else:
428
+ return None
429
+
430
+
431
+ class Model:
432
+ """
433
+ The Model class stores information about the function you wish to fit.
434
+
435
+ It stores the function itself, at the least, and optionally stores
436
+ functions which compute the Jacobians used during fitting. Also, one
437
+ can provide a function that will provide reasonable starting values
438
+ for the fit parameters possibly given the set of data.
439
+
440
+ Parameters
441
+ ----------
442
+ fcn : function
443
+ fcn(beta, x) --> y
444
+ fjacb : function
445
+ Jacobian of fcn wrt the fit parameters beta.
446
+
447
+ fjacb(beta, x) --> @f_i(x,B)/@B_j
448
+ fjacd : function
449
+ Jacobian of fcn wrt the (possibly multidimensional) input
450
+ variable.
451
+
452
+ fjacd(beta, x) --> @f_i(x,B)/@x_j
453
+ extra_args : tuple, optional
454
+ If specified, `extra_args` should be a tuple of extra
455
+ arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called
456
+ by `apply(fcn, (beta, x) + extra_args)`
457
+ estimate : array_like of rank-1
458
+ Provides estimates of the fit parameters from the data
459
+
460
+ estimate(data) --> estbeta
461
+ implicit : boolean
462
+ If TRUE, specifies that the model
463
+ is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit
464
+ against
465
+ meta : dict, optional
466
+ freeform dictionary of metadata for the model
467
+
468
+ Notes
469
+ -----
470
+ Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and
471
+ return a NumPy array. The `estimate` object takes an instance of the
472
+ Data class.
473
+
474
+ Here are the rules for the shapes of the argument and return
475
+ arrays of the callback functions:
476
+
477
+ `x`
478
+ if the input data is single-dimensional, then `x` is rank-1
479
+ array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)``
480
+ If the input data is multi-dimensional, then `x` is a rank-2 array;
481
+ i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``.
482
+ In all cases, it has the same shape as the input data array passed to
483
+ `~scipy.odr.odr`. `m` is the dimensionality of the input data,
484
+ `n` is the number of observations.
485
+ `y`
486
+ if the response variable is single-dimensional, then `y` is a
487
+ rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``.
488
+ If the response variable is multi-dimensional, then `y` is a rank-2
489
+ array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape =
490
+ (q, n)`` where `q` is the dimensionality of the response variable.
491
+ `beta`
492
+ rank-1 array of length `p` where `p` is the number of parameters;
493
+ i.e. ``beta = array([B_1, B_2, ..., B_p])``
494
+ `fjacb`
495
+ if the response variable is multi-dimensional, then the
496
+ return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] =
497
+ d f_l(X,B)/d B_k`` evaluated at the ith data point. If `q == 1`, then
498
+ the return array is only rank-2 and with shape `(p, n)`.
499
+ `fjacd`
500
+ as with fjacb, only the return array's shape is `(q, m, n)`
501
+ such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data
502
+ point. If `q == 1`, then the return array's shape is `(m, n)`. If
503
+ `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`.
504
+
505
+ """
506
+
507
+ def __init__(self, fcn, fjacb=None, fjacd=None,
508
+ extra_args=None, estimate=None, implicit=0, meta=None):
509
+
510
+ self.fcn = fcn
511
+ self.fjacb = fjacb
512
+ self.fjacd = fjacd
513
+
514
+ if extra_args is not None:
515
+ extra_args = tuple(extra_args)
516
+
517
+ self.extra_args = extra_args
518
+ self.estimate = estimate
519
+ self.implicit = implicit
520
+ self.meta = meta if meta is not None else {}
521
+
522
+ def set_meta(self, **kwds):
523
+ """ Update the metadata dictionary with the keywords and data provided
524
+ here.
525
+
526
+ Examples
527
+ --------
528
+ set_meta(name="Exponential", equation="y = a exp(b x) + c")
529
+ """
530
+
531
+ self.meta.update(kwds)
532
+
533
+ def __getattr__(self, attr):
534
+ """ Dispatch attribute access to the metadata.
535
+ """
536
+
537
+ if attr != "meta" and attr in self.meta:
538
+ return self.meta[attr]
539
+ else:
540
+ raise AttributeError("'%s' not in metadata" % attr)
541
+
542
+
543
+ class Output:
544
+ """
545
+ The Output class stores the output of an ODR run.
546
+
547
+ Attributes
548
+ ----------
549
+ beta : ndarray
550
+ Estimated parameter values, of shape (q,).
551
+ sd_beta : ndarray
552
+ Standard deviations of the estimated parameters, of shape (p,).
553
+ cov_beta : ndarray
554
+ Covariance matrix of the estimated parameters, of shape (p,p).
555
+ Note that this `cov_beta` is not scaled by the residual variance
556
+ `res_var`, whereas `sd_beta` is. This means
557
+ ``np.sqrt(np.diag(output.cov_beta * output.res_var))`` is the same
558
+ result as `output.sd_beta`.
559
+ delta : ndarray, optional
560
+ Array of estimated errors in input variables, of same shape as `x`.
561
+ eps : ndarray, optional
562
+ Array of estimated errors in response variables, of same shape as `y`.
563
+ xplus : ndarray, optional
564
+ Array of ``x + delta``.
565
+ y : ndarray, optional
566
+ Array ``y = fcn(x + delta)``.
567
+ res_var : float, optional
568
+ Residual variance.
569
+ sum_square : float, optional
570
+ Sum of squares error.
571
+ sum_square_delta : float, optional
572
+ Sum of squares of delta error.
573
+ sum_square_eps : float, optional
574
+ Sum of squares of eps error.
575
+ inv_condnum : float, optional
576
+ Inverse condition number (cf. ODRPACK UG p. 77).
577
+ rel_error : float, optional
578
+ Relative error in function values computed within fcn.
579
+ work : ndarray, optional
580
+ Final work array.
581
+ work_ind : dict, optional
582
+ Indices into work for drawing out values (cf. ODRPACK UG p. 83).
583
+ info : int, optional
584
+ Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38).
585
+ stopreason : list of str, optional
586
+ `info` interpreted into English.
587
+
588
+ Notes
589
+ -----
590
+ Takes one argument for initialization, the return value from the
591
+ function `~scipy.odr.odr`. The attributes listed as "optional" above are
592
+ only present if `~scipy.odr.odr` was run with ``full_output=1``.
593
+
594
+ """
595
+
596
+ def __init__(self, output):
597
+ self.beta = output[0]
598
+ self.sd_beta = output[1]
599
+ self.cov_beta = output[2]
600
+
601
+ if len(output) == 4:
602
+ # full output
603
+ self.__dict__.update(output[3])
604
+ self.stopreason = _report_error(self.info)
605
+
606
+ def pprint(self):
607
+ """ Pretty-print important results.
608
+ """
609
+
610
+ print('Beta:', self.beta)
611
+ print('Beta Std Error:', self.sd_beta)
612
+ print('Beta Covariance:', self.cov_beta)
613
+ if hasattr(self, 'info'):
614
+ print('Residual Variance:',self.res_var)
615
+ print('Inverse Condition #:', self.inv_condnum)
616
+ print('Reason(s) for Halting:')
617
+ for r in self.stopreason:
618
+ print(' %s' % r)
619
+
620
+
621
+ class ODR:
622
+ """
623
+ The ODR class gathers all information and coordinates the running of the
624
+ main fitting routine.
625
+
626
+ Members of instances of the ODR class have the same names as the arguments
627
+ to the initialization routine.
628
+
629
+ Parameters
630
+ ----------
631
+ data : Data class instance
632
+ instance of the Data class
633
+ model : Model class instance
634
+ instance of the Model class
635
+
636
+ Other Parameters
637
+ ----------------
638
+ beta0 : array_like of rank-1
639
+ a rank-1 sequence of initial parameter values. Optional if
640
+ model provides an "estimate" function to estimate these values.
641
+ delta0 : array_like of floats of rank-1, optional
642
+ a (double-precision) float array to hold the initial values of
643
+ the errors in the input variables. Must be same shape as data.x
644
+ ifixb : array_like of ints of rank-1, optional
645
+ sequence of integers with the same length as beta0 that determines
646
+ which parameters are held fixed. A value of 0 fixes the parameter,
647
+ a value > 0 makes the parameter free.
648
+ ifixx : array_like of ints with same shape as data.x, optional
649
+ an array of integers with the same shape as data.x that determines
650
+ which input observations are treated as fixed. One can use a sequence
651
+ of length m (the dimensionality of the input observations) to fix some
652
+ dimensions for all observations. A value of 0 fixes the observation,
653
+ a value > 0 makes it free.
654
+ job : int, optional
655
+ an integer telling ODRPACK what tasks to perform. See p. 31 of the
656
+ ODRPACK User's Guide if you absolutely must set the value here. Use the
657
+ method set_job post-initialization for a more readable interface.
658
+ iprint : int, optional
659
+ an integer telling ODRPACK what to print. See pp. 33-34 of the
660
+ ODRPACK User's Guide if you absolutely must set the value here. Use the
661
+ method set_iprint post-initialization for a more readable interface.
662
+ errfile : str, optional
663
+ string with the filename to print ODRPACK errors to. If the file already
664
+ exists, an error will be thrown. The `overwrite` argument can be used to
665
+ prevent this. *Do Not Open This File Yourself!*
666
+ rptfile : str, optional
667
+ string with the filename to print ODRPACK summaries to. If the file
668
+ already exists, an error will be thrown. The `overwrite` argument can be
669
+ used to prevent this. *Do Not Open This File Yourself!*
670
+ ndigit : int, optional
671
+ integer specifying the number of reliable digits in the computation
672
+ of the function.
673
+ taufac : float, optional
674
+ float specifying the initial trust region. The default value is 1.
675
+ The initial trust region is equal to taufac times the length of the
676
+ first computed Gauss-Newton step. taufac must be less than 1.
677
+ sstol : float, optional
678
+ float specifying the tolerance for convergence based on the relative
679
+ change in the sum-of-squares. The default value is eps**(1/2) where eps
680
+ is the smallest value such that 1 + eps > 1 for double precision
681
+ computation on the machine. sstol must be less than 1.
682
+ partol : float, optional
683
+ float specifying the tolerance for convergence based on the relative
684
+ change in the estimated parameters. The default value is eps**(2/3) for
685
+ explicit models and ``eps**(1/3)`` for implicit models. partol must be less
686
+ than 1.
687
+ maxit : int, optional
688
+ integer specifying the maximum number of iterations to perform. For
689
+ first runs, maxit is the total number of iterations performed and
690
+ defaults to 50. For restarts, maxit is the number of additional
691
+ iterations to perform and defaults to 10.
692
+ stpb : array_like, optional
693
+ sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute
694
+ finite difference derivatives wrt the parameters.
695
+ stpd : optional
696
+ array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative
697
+ step sizes to compute finite difference derivatives wrt the input
698
+ variable errors. If stpd is a rank-1 array with length m (the
699
+ dimensionality of the input variable), then the values are broadcast to
700
+ all observations.
701
+ sclb : array_like, optional
702
+ sequence (``len(stpb) == len(beta0)``) of scaling factors for the
703
+ parameters. The purpose of these scaling factors are to scale all of
704
+ the parameters to around unity. Normally appropriate scaling factors
705
+ are computed if this argument is not specified. Specify them yourself
706
+ if the automatic procedure goes awry.
707
+ scld : array_like, optional
708
+ array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling
709
+ factors for the *errors* in the input variables. Again, these factors
710
+ are automatically computed if you do not provide them. If scld.shape ==
711
+ (m,), then the scaling factors are broadcast to all observations.
712
+ work : ndarray, optional
713
+ array to hold the double-valued working data for ODRPACK. When
714
+ restarting, takes the value of self.output.work.
715
+ iwork : ndarray, optional
716
+ array to hold the integer-valued working data for ODRPACK. When
717
+ restarting, takes the value of self.output.iwork.
718
+ overwrite : bool, optional
719
+ If it is True, output files defined by `errfile` and `rptfile` are
720
+ overwritten. The default is False.
721
+
722
+ Attributes
723
+ ----------
724
+ data : Data
725
+ The data for this fit
726
+ model : Model
727
+ The model used in fit
728
+ output : Output
729
+ An instance if the Output class containing all of the returned
730
+ data from an invocation of ODR.run() or ODR.restart()
731
+
732
+ """
733
+
734
+ def __init__(self, data, model, beta0=None, delta0=None, ifixb=None,
735
+ ifixx=None, job=None, iprint=None, errfile=None, rptfile=None,
736
+ ndigit=None, taufac=None, sstol=None, partol=None, maxit=None,
737
+ stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None,
738
+ overwrite=False):
739
+
740
+ self.data = data
741
+ self.model = model
742
+
743
+ if beta0 is None:
744
+ if self.model.estimate is not None:
745
+ self.beta0 = _conv(self.model.estimate(self.data))
746
+ else:
747
+ raise ValueError(
748
+ "must specify beta0 or provide an estimator with the model"
749
+ )
750
+ else:
751
+ self.beta0 = _conv(beta0)
752
+
753
+ if ifixx is None and data.fix is not None:
754
+ ifixx = data.fix
755
+
756
+ if overwrite:
757
+ # remove output files for overwriting.
758
+ if rptfile is not None and os.path.exists(rptfile):
759
+ os.remove(rptfile)
760
+ if errfile is not None and os.path.exists(errfile):
761
+ os.remove(errfile)
762
+
763
+ self.delta0 = _conv(delta0)
764
+ # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit
765
+ # platforms.
766
+ # XXX: some other FORTRAN compilers may not agree.
767
+ self.ifixx = _conv(ifixx, dtype=np.int32)
768
+ self.ifixb = _conv(ifixb, dtype=np.int32)
769
+ self.job = job
770
+ self.iprint = iprint
771
+ self.errfile = errfile
772
+ self.rptfile = rptfile
773
+ self.ndigit = ndigit
774
+ self.taufac = taufac
775
+ self.sstol = sstol
776
+ self.partol = partol
777
+ self.maxit = maxit
778
+ self.stpb = _conv(stpb)
779
+ self.stpd = _conv(stpd)
780
+ self.sclb = _conv(sclb)
781
+ self.scld = _conv(scld)
782
+ self.work = _conv(work)
783
+ self.iwork = _conv(iwork)
784
+
785
+ self.output = None
786
+
787
+ self._check()
788
+
789
+ def _check(self):
790
+ """ Check the inputs for consistency, but don't bother checking things
791
+ that the builtin function odr will check.
792
+ """
793
+
794
+ x_s = list(self.data.x.shape)
795
+
796
+ if isinstance(self.data.y, np.ndarray):
797
+ y_s = list(self.data.y.shape)
798
+ if self.model.implicit:
799
+ raise OdrError("an implicit model cannot use response data")
800
+ else:
801
+ # implicit model with q == self.data.y
802
+ y_s = [self.data.y, x_s[-1]]
803
+ if not self.model.implicit:
804
+ raise OdrError("an explicit model needs response data")
805
+ self.set_job(fit_type=1)
806
+
807
+ if x_s[-1] != y_s[-1]:
808
+ raise OdrError("number of observations do not match")
809
+
810
+ n = x_s[-1]
811
+
812
+ if len(x_s) == 2:
813
+ m = x_s[0]
814
+ else:
815
+ m = 1
816
+ if len(y_s) == 2:
817
+ q = y_s[0]
818
+ else:
819
+ q = 1
820
+
821
+ p = len(self.beta0)
822
+
823
+ # permissible output array shapes
824
+
825
+ fcn_perms = [(q, n)]
826
+ fjacd_perms = [(q, m, n)]
827
+ fjacb_perms = [(q, p, n)]
828
+
829
+ if q == 1:
830
+ fcn_perms.append((n,))
831
+ fjacd_perms.append((m, n))
832
+ fjacb_perms.append((p, n))
833
+ if m == 1:
834
+ fjacd_perms.append((q, n))
835
+ if p == 1:
836
+ fjacb_perms.append((q, n))
837
+ if m == q == 1:
838
+ fjacd_perms.append((n,))
839
+ if p == q == 1:
840
+ fjacb_perms.append((n,))
841
+
842
+ # try evaluating the supplied functions to make sure they provide
843
+ # sensible outputs
844
+
845
+ arglist = (self.beta0, self.data.x)
846
+ if self.model.extra_args is not None:
847
+ arglist = arglist + self.model.extra_args
848
+ res = self.model.fcn(*arglist)
849
+
850
+ if res.shape not in fcn_perms:
851
+ print(res.shape)
852
+ print(fcn_perms)
853
+ raise OdrError("fcn does not output %s-shaped array" % y_s)
854
+
855
+ if self.model.fjacd is not None:
856
+ res = self.model.fjacd(*arglist)
857
+ if res.shape not in fjacd_perms:
858
+ raise OdrError(
859
+ "fjacd does not output %s-shaped array" % repr((q, m, n)))
860
+ if self.model.fjacb is not None:
861
+ res = self.model.fjacb(*arglist)
862
+ if res.shape not in fjacb_perms:
863
+ raise OdrError(
864
+ "fjacb does not output %s-shaped array" % repr((q, p, n)))
865
+
866
+ # check shape of delta0
867
+
868
+ if self.delta0 is not None and self.delta0.shape != self.data.x.shape:
869
+ raise OdrError(
870
+ "delta0 is not a %s-shaped array" % repr(self.data.x.shape))
871
+
872
+ if self.data.x.size == 0:
873
+ warn("Empty data detected for ODR instance. "
874
+ "Do not expect any fitting to occur",
875
+ OdrWarning, stacklevel=3)
876
+
877
+ def _gen_work(self):
878
+ """ Generate a suitable work array if one does not already exist.
879
+ """
880
+
881
+ n = self.data.x.shape[-1]
882
+ p = self.beta0.shape[0]
883
+
884
+ if len(self.data.x.shape) == 2:
885
+ m = self.data.x.shape[0]
886
+ else:
887
+ m = 1
888
+
889
+ if self.model.implicit:
890
+ q = self.data.y
891
+ elif len(self.data.y.shape) == 2:
892
+ q = self.data.y.shape[0]
893
+ else:
894
+ q = 1
895
+
896
+ if self.data.we is None:
897
+ ldwe = ld2we = 1
898
+ elif len(self.data.we.shape) == 3:
899
+ ld2we, ldwe = self.data.we.shape[1:]
900
+ else:
901
+ we = self.data.we
902
+ ldwe = 1
903
+ ld2we = 1
904
+ if we.ndim == 1 and q == 1:
905
+ ldwe = n
906
+ elif we.ndim == 2:
907
+ if we.shape == (q, q):
908
+ ld2we = q
909
+ elif we.shape == (q, n):
910
+ ldwe = n
911
+
912
+ if self.job % 10 < 2:
913
+ # ODR not OLS
914
+ lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p +
915
+ 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q)
916
+ else:
917
+ # OLS not ODR
918
+ lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p +
919
+ 5*q + q*(p+m) + ldwe*ld2we*q)
920
+
921
+ if isinstance(self.work, np.ndarray) and self.work.shape == (lwork,)\
922
+ and self.work.dtype.str.endswith('f8'):
923
+ # the existing array is fine
924
+ return
925
+ else:
926
+ self.work = np.zeros((lwork,), float)
927
+
928
+ def set_job(self, fit_type=None, deriv=None, var_calc=None,
929
+ del_init=None, restart=None):
930
+ """
931
+ Sets the "job" parameter is a hopefully comprehensible way.
932
+
933
+ If an argument is not specified, then the value is left as is. The
934
+ default value from class initialization is for all of these options set
935
+ to 0.
936
+
937
+ Parameters
938
+ ----------
939
+ fit_type : {0, 1, 2} int
940
+ 0 -> explicit ODR
941
+
942
+ 1 -> implicit ODR
943
+
944
+ 2 -> ordinary least-squares
945
+ deriv : {0, 1, 2, 3} int
946
+ 0 -> forward finite differences
947
+
948
+ 1 -> central finite differences
949
+
950
+ 2 -> user-supplied derivatives (Jacobians) with results
951
+ checked by ODRPACK
952
+
953
+ 3 -> user-supplied derivatives, no checking
954
+ var_calc : {0, 1, 2} int
955
+ 0 -> calculate asymptotic covariance matrix and fit
956
+ parameter uncertainties (V_B, s_B) using derivatives
957
+ recomputed at the final solution
958
+
959
+ 1 -> calculate V_B and s_B using derivatives from last iteration
960
+
961
+ 2 -> do not calculate V_B and s_B
962
+ del_init : {0, 1} int
963
+ 0 -> initial input variable offsets set to 0
964
+
965
+ 1 -> initial offsets provided by user in variable "work"
966
+ restart : {0, 1} int
967
+ 0 -> fit is not a restart
968
+
969
+ 1 -> fit is a restart
970
+
971
+ Notes
972
+ -----
973
+ The permissible values are different from those given on pg. 31 of the
974
+ ODRPACK User's Guide only in that one cannot specify numbers greater than
975
+ the last value for each variable.
976
+
977
+ If one does not supply functions to compute the Jacobians, the fitting
978
+ procedure will change deriv to 0, finite differences, as a default. To
979
+ initialize the input variable offsets by yourself, set del_init to 1 and
980
+ put the offsets into the "work" variable correctly.
981
+
982
+ """
983
+
984
+ if self.job is None:
985
+ job_l = [0, 0, 0, 0, 0]
986
+ else:
987
+ job_l = [self.job // 10000 % 10,
988
+ self.job // 1000 % 10,
989
+ self.job // 100 % 10,
990
+ self.job // 10 % 10,
991
+ self.job % 10]
992
+
993
+ if fit_type in (0, 1, 2):
994
+ job_l[4] = fit_type
995
+ if deriv in (0, 1, 2, 3):
996
+ job_l[3] = deriv
997
+ if var_calc in (0, 1, 2):
998
+ job_l[2] = var_calc
999
+ if del_init in (0, 1):
1000
+ job_l[1] = del_init
1001
+ if restart in (0, 1):
1002
+ job_l[0] = restart
1003
+
1004
+ self.job = (job_l[0]*10000 + job_l[1]*1000 +
1005
+ job_l[2]*100 + job_l[3]*10 + job_l[4])
1006
+
1007
+ def set_iprint(self, init=None, so_init=None,
1008
+ iter=None, so_iter=None, iter_step=None, final=None, so_final=None):
1009
+ """ Set the iprint parameter for the printing of computation reports.
1010
+
1011
+ If any of the arguments are specified here, then they are set in the
1012
+ iprint member. If iprint is not set manually or with this method, then
1013
+ ODRPACK defaults to no printing. If no filename is specified with the
1014
+ member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to
1015
+ print to stdout in addition to the specified filename by setting the
1016
+ so_* arguments to this function, but one cannot specify to print to
1017
+ stdout but not a file since one can do that by not specifying a rptfile
1018
+ filename.
1019
+
1020
+ There are three reports: initialization, iteration, and final reports.
1021
+ They are represented by the arguments init, iter, and final
1022
+ respectively. The permissible values are 0, 1, and 2 representing "no
1023
+ report", "short report", and "long report" respectively.
1024
+
1025
+ The argument iter_step (0 <= iter_step <= 9) specifies how often to make
1026
+ the iteration report; the report will be made for every iter_step'th
1027
+ iteration starting with iteration one. If iter_step == 0, then no
1028
+ iteration report is made, regardless of the other arguments.
1029
+
1030
+ If the rptfile is None, then any so_* arguments supplied will raise an
1031
+ exception.
1032
+ """
1033
+ if self.iprint is None:
1034
+ self.iprint = 0
1035
+
1036
+ ip = [self.iprint // 1000 % 10,
1037
+ self.iprint // 100 % 10,
1038
+ self.iprint // 10 % 10,
1039
+ self.iprint % 10]
1040
+
1041
+ # make a list to convert iprint digits to/from argument inputs
1042
+ # rptfile, stdout
1043
+ ip2arg = [[0, 0], # none, none
1044
+ [1, 0], # short, none
1045
+ [2, 0], # long, none
1046
+ [1, 1], # short, short
1047
+ [2, 1], # long, short
1048
+ [1, 2], # short, long
1049
+ [2, 2]] # long, long
1050
+
1051
+ if (self.rptfile is None and
1052
+ (so_init is not None or
1053
+ so_iter is not None or
1054
+ so_final is not None)):
1055
+ raise OdrError(
1056
+ "no rptfile specified, cannot output to stdout twice")
1057
+
1058
+ iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]]
1059
+
1060
+ if init is not None:
1061
+ iprint_l[0] = init
1062
+ if so_init is not None:
1063
+ iprint_l[1] = so_init
1064
+ if iter is not None:
1065
+ iprint_l[2] = iter
1066
+ if so_iter is not None:
1067
+ iprint_l[3] = so_iter
1068
+ if final is not None:
1069
+ iprint_l[4] = final
1070
+ if so_final is not None:
1071
+ iprint_l[5] = so_final
1072
+
1073
+ if iter_step in range(10):
1074
+ # 0..9
1075
+ ip[2] = iter_step
1076
+
1077
+ ip[0] = ip2arg.index(iprint_l[0:2])
1078
+ ip[1] = ip2arg.index(iprint_l[2:4])
1079
+ ip[3] = ip2arg.index(iprint_l[4:6])
1080
+
1081
+ self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3]
1082
+
1083
+ def run(self):
1084
+ """ Run the fitting routine with all of the information given and with ``full_output=1``.
1085
+
1086
+ Returns
1087
+ -------
1088
+ output : Output instance
1089
+ This object is also assigned to the attribute .output .
1090
+ """ # noqa: E501
1091
+
1092
+ args = (self.model.fcn, self.beta0, self.data.y, self.data.x)
1093
+ kwds = {'full_output': 1}
1094
+ kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile',
1095
+ 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb',
1096
+ 'stpd', 'sclb', 'scld', 'work', 'iwork']
1097
+
1098
+ if self.delta0 is not None and (self.job // 10000) % 10 == 0:
1099
+ # delta0 provided and fit is not a restart
1100
+ self._gen_work()
1101
+
1102
+ d0 = np.ravel(self.delta0)
1103
+
1104
+ self.work[:len(d0)] = d0
1105
+
1106
+ # set the kwds from other objects explicitly
1107
+ if self.model.fjacb is not None:
1108
+ kwds['fjacb'] = self.model.fjacb
1109
+ if self.model.fjacd is not None:
1110
+ kwds['fjacd'] = self.model.fjacd
1111
+ if self.data.we is not None:
1112
+ kwds['we'] = self.data.we
1113
+ if self.data.wd is not None:
1114
+ kwds['wd'] = self.data.wd
1115
+ if self.model.extra_args is not None:
1116
+ kwds['extra_args'] = self.model.extra_args
1117
+
1118
+ # implicitly set kwds from self's members
1119
+ for attr in kwd_l:
1120
+ obj = getattr(self, attr)
1121
+ if obj is not None:
1122
+ kwds[attr] = obj
1123
+
1124
+ self.output = Output(odr(*args, **kwds))
1125
+
1126
+ return self.output
1127
+
1128
+ def restart(self, iter=None):
1129
+ """ Restarts the run with iter more iterations.
1130
+
1131
+ Parameters
1132
+ ----------
1133
+ iter : int, optional
1134
+ ODRPACK's default for the number of new iterations is 10.
1135
+
1136
+ Returns
1137
+ -------
1138
+ output : Output instance
1139
+ This object is also assigned to the attribute .output .
1140
+ """
1141
+
1142
+ if self.output is None:
1143
+ raise OdrError("cannot restart: run() has not been called before")
1144
+
1145
+ self.set_job(restart=1)
1146
+ self.work = self.output.work
1147
+ self.iwork = self.output.iwork
1148
+
1149
+ self.maxit = iter
1150
+
1151
+ return self.run()
parrot/lib/python3.10/site-packages/scipy/odr/models.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.odr` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'Model', 'exponential', 'multilinear', 'unilinear',
9
+ 'quadratic', 'polynomial'
10
+ ]
11
+
12
+
13
+ def __dir__():
14
+ return __all__
15
+
16
+
17
+ def __getattr__(name):
18
+ return _sub_module_deprecation(sub_package="odr", module="models",
19
+ private_modules=["_models"], all=__all__,
20
+ attribute=name)
parrot/lib/python3.10/site-packages/scipy/odr/odrpack.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is not meant for public use and will be removed in SciPy v2.0.0.
2
+ # Use the `scipy.odr` namespace for importing the functions
3
+ # included below.
4
+
5
+ from scipy._lib.deprecation import _sub_module_deprecation
6
+
7
+ __all__ = [ # noqa: F822
8
+ 'odr', 'OdrWarning', 'OdrError', 'OdrStop',
9
+ 'Data', 'RealData', 'Model', 'Output', 'ODR',
10
+ 'odr_error', 'odr_stop'
11
+ ]
12
+
13
+
14
+ def __dir__():
15
+ return __all__
16
+
17
+
18
+ def __getattr__(name):
19
+ return _sub_module_deprecation(sub_package="odr", module="odrpack",
20
+ private_modules=["_odrpack"], all=__all__,
21
+ attribute=name)
parrot/lib/python3.10/site-packages/scipy/odr/tests/__init__.py ADDED
File without changes
parrot/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (167 Bytes). View file
 
parrot/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py ADDED
@@ -0,0 +1,606 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pickle
2
+ import tempfile
3
+ import shutil
4
+ import os
5
+
6
+ import numpy as np
7
+ from numpy import pi
8
+ from numpy.testing import (assert_array_almost_equal,
9
+ assert_equal, assert_warns,
10
+ assert_allclose)
11
+ import pytest
12
+ from pytest import raises as assert_raises
13
+
14
+ from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning,
15
+ multilinear, exponential, unilinear, quadratic,
16
+ polynomial)
17
+
18
+
19
+ class TestODR:
20
+
21
+ # Bad Data for 'x'
22
+
23
+ def test_bad_data(self):
24
+ assert_raises(ValueError, Data, 2, 1)
25
+ assert_raises(ValueError, RealData, 2, 1)
26
+
27
+ # Empty Data for 'x'
28
+ def empty_data_func(self, B, x):
29
+ return B[0]*x + B[1]
30
+
31
+ def test_empty_data(self):
32
+ beta0 = [0.02, 0.0]
33
+ linear = Model(self.empty_data_func)
34
+
35
+ empty_dat = Data([], [])
36
+ assert_warns(OdrWarning, ODR,
37
+ empty_dat, linear, beta0=beta0)
38
+
39
+ empty_dat = RealData([], [])
40
+ assert_warns(OdrWarning, ODR,
41
+ empty_dat, linear, beta0=beta0)
42
+
43
+ # Explicit Example
44
+
45
+ def explicit_fcn(self, B, x):
46
+ ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2)
47
+ return ret
48
+
49
+ def explicit_fjd(self, B, x):
50
+ eBx = np.exp(B[2]*x)
51
+ ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx
52
+ return ret
53
+
54
+ def explicit_fjb(self, B, x):
55
+ eBx = np.exp(B[2]*x)
56
+ res = np.vstack([np.ones(x.shape[-1]),
57
+ np.power(eBx-1.0, 2),
58
+ B[1]*2.0*(eBx-1.0)*eBx*x])
59
+ return res
60
+
61
+ def test_explicit(self):
62
+ explicit_mod = Model(
63
+ self.explicit_fcn,
64
+ fjacb=self.explicit_fjb,
65
+ fjacd=self.explicit_fjd,
66
+ meta=dict(name='Sample Explicit Model',
67
+ ref='ODRPACK UG, pg. 39'),
68
+ )
69
+ explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.],
70
+ [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6,
71
+ 1213.8,1215.5,1212.])
72
+ explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1],
73
+ ifixx=[0,0,1,1,1,1,1,1,1,1,1,0])
74
+ explicit_odr.set_job(deriv=2)
75
+ explicit_odr.set_iprint(init=0, iter=0, final=0)
76
+
77
+ out = explicit_odr.run()
78
+ assert_array_almost_equal(
79
+ out.beta,
80
+ np.array([1.2646548050648876e+03, -5.4018409956678255e+01,
81
+ -8.7849712165253724e-02]),
82
+ )
83
+ assert_array_almost_equal(
84
+ out.sd_beta,
85
+ np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]),
86
+ )
87
+ assert_array_almost_equal(
88
+ out.cov_beta,
89
+ np.array([[4.4949592379003039e-01, -3.7421976890364739e-01,
90
+ -8.0978217468468912e-04],
91
+ [-3.7421976890364739e-01, 1.0529686462751804e+00,
92
+ -1.9453521827942002e-03],
93
+ [-8.0978217468468912e-04, -1.9453521827942002e-03,
94
+ 1.6827336938454476e-05]]),
95
+ )
96
+
97
+ # Implicit Example
98
+
99
+ def implicit_fcn(self, B, x):
100
+ return (B[2]*np.power(x[0]-B[0], 2) +
101
+ 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) +
102
+ B[4]*np.power(x[1]-B[1], 2) - 1.0)
103
+
104
+ def test_implicit(self):
105
+ implicit_mod = Model(
106
+ self.implicit_fcn,
107
+ implicit=1,
108
+ meta=dict(name='Sample Implicit Model',
109
+ ref='ODRPACK UG, pg. 49'),
110
+ )
111
+ implicit_dat = Data([
112
+ [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28,
113
+ -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44],
114
+ [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32,
115
+ -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]],
116
+ 1,
117
+ )
118
+ implicit_odr = ODR(implicit_dat, implicit_mod,
119
+ beta0=[-1.0, -3.0, 0.09, 0.02, 0.08])
120
+
121
+ out = implicit_odr.run()
122
+ assert_array_almost_equal(
123
+ out.beta,
124
+ np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354,
125
+ 0.0162299708984738, 0.0797537982976416]),
126
+ )
127
+ assert_array_almost_equal(
128
+ out.sd_beta,
129
+ np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314,
130
+ 0.0027500347539902, 0.0034962501532468]),
131
+ )
132
+ assert_allclose(
133
+ out.cov_beta,
134
+ np.array([[2.1089274602333052e+00, -1.9437686411979040e+00,
135
+ 7.0263550868344446e-02, -4.7175267373474862e-02,
136
+ 5.2515575927380355e-02],
137
+ [-1.9437686411979040e+00, 2.0481509222414456e+00,
138
+ -6.1600515853057307e-02, 4.6268827806232933e-02,
139
+ -5.8822307501391467e-02],
140
+ [7.0263550868344446e-02, -6.1600515853057307e-02,
141
+ 2.8659542561579308e-03, -1.4628662260014491e-03,
142
+ 1.4528860663055824e-03],
143
+ [-4.7175267373474862e-02, 4.6268827806232933e-02,
144
+ -1.4628662260014491e-03, 1.2855592885514335e-03,
145
+ -1.2692942951415293e-03],
146
+ [5.2515575927380355e-02, -5.8822307501391467e-02,
147
+ 1.4528860663055824e-03, -1.2692942951415293e-03,
148
+ 2.0778813389755596e-03]]),
149
+ rtol=1e-6, atol=2e-6,
150
+ )
151
+
152
+ # Multi-variable Example
153
+
154
+ def multi_fcn(self, B, x):
155
+ if (x < 0.0).any():
156
+ raise OdrStop
157
+ theta = pi*B[3]/2.
158
+ ctheta = np.cos(theta)
159
+ stheta = np.sin(theta)
160
+ omega = np.power(2.*pi*x*np.exp(-B[2]), B[3])
161
+ phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta))
162
+ r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) +
163
+ np.power(omega*stheta, 2)), -B[4])
164
+ ret = np.vstack([B[1] + r*np.cos(B[4]*phi),
165
+ r*np.sin(B[4]*phi)])
166
+ return ret
167
+
168
+ def test_multi(self):
169
+ multi_mod = Model(
170
+ self.multi_fcn,
171
+ meta=dict(name='Sample Multi-Response Model',
172
+ ref='ODRPACK UG, pg. 56'),
173
+ )
174
+
175
+ multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0,
176
+ 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0,
177
+ 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0])
178
+ multi_y = np.array([
179
+ [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713,
180
+ 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984,
181
+ 2.934, 2.876, 2.838, 2.798, 2.759],
182
+ [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309,
183
+ 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218,
184
+ 0.202, 0.182, 0.168, 0.153, 0.139],
185
+ ])
186
+ n = len(multi_x)
187
+ multi_we = np.zeros((2, 2, n), dtype=float)
188
+ multi_ifixx = np.ones(n, dtype=int)
189
+ multi_delta = np.zeros(n, dtype=float)
190
+
191
+ multi_we[0,0,:] = 559.6
192
+ multi_we[1,0,:] = multi_we[0,1,:] = -1634.0
193
+ multi_we[1,1,:] = 8397.0
194
+
195
+ for i in range(n):
196
+ if multi_x[i] < 100.0:
197
+ multi_ifixx[i] = 0
198
+ elif multi_x[i] <= 150.0:
199
+ pass # defaults are fine
200
+ elif multi_x[i] <= 1000.0:
201
+ multi_delta[i] = 25.0
202
+ elif multi_x[i] <= 10000.0:
203
+ multi_delta[i] = 560.0
204
+ elif multi_x[i] <= 100000.0:
205
+ multi_delta[i] = 9500.0
206
+ else:
207
+ multi_delta[i] = 144000.0
208
+ if multi_x[i] == 100.0 or multi_x[i] == 150.0:
209
+ multi_we[:,:,i] = 0.0
210
+
211
+ multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2),
212
+ we=multi_we)
213
+ multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5],
214
+ delta0=multi_delta, ifixx=multi_ifixx)
215
+ multi_odr.set_job(deriv=1, del_init=1)
216
+
217
+ out = multi_odr.run()
218
+ assert_array_almost_equal(
219
+ out.beta,
220
+ np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978,
221
+ 0.5101147161764654, 0.5173902330489161]),
222
+ )
223
+ assert_array_almost_equal(
224
+ out.sd_beta,
225
+ np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757,
226
+ 0.0132642749596149, 0.0288529201353984]),
227
+ )
228
+ assert_array_almost_equal(
229
+ out.cov_beta,
230
+ np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406,
231
+ -0.0058700836512467, 0.011281212888768],
232
+ [0.0036159705923791, 0.0064793789429006, 0.0517610978353126,
233
+ -0.0051181304940204, 0.0130726943624117],
234
+ [0.0438637051470406, 0.0517610978353126, 0.5182263323095322,
235
+ -0.0563083340093696, 0.1269490939468611],
236
+ [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696,
237
+ 0.0066939246261263, -0.0140184391377962],
238
+ [0.011281212888768, 0.0130726943624117, 0.1269490939468611,
239
+ -0.0140184391377962, 0.0316733013820852]]),
240
+ )
241
+
242
+ # Pearson's Data
243
+ # K. Pearson, Philosophical Magazine, 2, 559 (1901)
244
+
245
+ def pearson_fcn(self, B, x):
246
+ return B[0] + B[1]*x
247
+
248
+ def test_pearson(self):
249
+ p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4])
250
+ p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5])
251
+ p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.])
252
+ p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04])
253
+
254
+ p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy)
255
+
256
+ # Reverse the data to test invariance of results
257
+ pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx)
258
+
259
+ p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit'))
260
+
261
+ p_odr = ODR(p_dat, p_mod, beta0=[1.,1.])
262
+ pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.])
263
+
264
+ out = p_odr.run()
265
+ assert_array_almost_equal(
266
+ out.beta,
267
+ np.array([5.4767400299231674, -0.4796082367610305]),
268
+ )
269
+ assert_array_almost_equal(
270
+ out.sd_beta,
271
+ np.array([0.3590121690702467, 0.0706291186037444]),
272
+ )
273
+ assert_array_almost_equal(
274
+ out.cov_beta,
275
+ np.array([[0.0854275622946333, -0.0161807025443155],
276
+ [-0.0161807025443155, 0.003306337993922]]),
277
+ )
278
+
279
+ rout = pr_odr.run()
280
+ assert_array_almost_equal(
281
+ rout.beta,
282
+ np.array([11.4192022410781231, -2.0850374506165474]),
283
+ )
284
+ assert_array_almost_equal(
285
+ rout.sd_beta,
286
+ np.array([0.9820231665657161, 0.3070515616198911]),
287
+ )
288
+ assert_array_almost_equal(
289
+ rout.cov_beta,
290
+ np.array([[0.6391799462548782, -0.1955657291119177],
291
+ [-0.1955657291119177, 0.0624888159223392]]),
292
+ )
293
+
294
+ # Lorentz Peak
295
+ # The data is taken from one of the undergraduate physics labs I performed.
296
+
297
+ def lorentz(self, beta, x):
298
+ return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x -
299
+ beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0)))
300
+
301
+ def test_lorentz(self):
302
+ l_sy = np.array([.29]*18)
303
+ l_sx = np.array([.000972971,.000948268,.000707632,.000706679,
304
+ .000706074, .000703918,.000698955,.000456856,
305
+ .000455207,.000662717,.000654619,.000652694,
306
+ .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839])
307
+
308
+ l_dat = RealData(
309
+ [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608,
310
+ 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982,
311
+ 3.6562, 3.62498, 3.55525, 3.41886],
312
+ [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122,
313
+ 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5],
314
+ sx=l_sx,
315
+ sy=l_sy,
316
+ )
317
+ l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak'))
318
+ l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8))
319
+
320
+ out = l_odr.run()
321
+ assert_array_almost_equal(
322
+ out.beta,
323
+ np.array([1.4306780846149925e+03, 1.3390509034538309e-01,
324
+ 3.7798193600109009e+00]),
325
+ )
326
+ assert_array_almost_equal(
327
+ out.sd_beta,
328
+ np.array([7.3621186811330963e-01, 3.5068899941471650e-04,
329
+ 2.4451209281408992e-04]),
330
+ )
331
+ assert_array_almost_equal(
332
+ out.cov_beta,
333
+ np.array([[2.4714409064597873e-01, -6.9067261911110836e-05,
334
+ -3.1236953270424990e-05],
335
+ [-6.9067261911110836e-05, 5.6077531517333009e-08,
336
+ 3.6133261832722601e-08],
337
+ [-3.1236953270424990e-05, 3.6133261832722601e-08,
338
+ 2.7261220025171730e-08]]),
339
+ )
340
+
341
+ def test_ticket_1253(self):
342
+ def linear(c, x):
343
+ return c[0]*x+c[1]
344
+
345
+ c = [2.0, 3.0]
346
+ x = np.linspace(0, 10)
347
+ y = linear(c, x)
348
+
349
+ model = Model(linear)
350
+ data = Data(x, y, wd=1.0, we=1.0)
351
+ job = ODR(data, model, beta0=[1.0, 1.0])
352
+ result = job.run()
353
+ assert_equal(result.info, 2)
354
+
355
+ # Verify fix for gh-9140
356
+
357
+ def test_ifixx(self):
358
+ x1 = [-2.01, -0.99, -0.001, 1.02, 1.98]
359
+ x2 = [3.98, 1.01, 0.001, 0.998, 4.01]
360
+ fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int)))
361
+ data = Data(np.vstack((x1, x2)), y=1, fix=fix)
362
+ model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True)
363
+
364
+ odr1 = ODR(data, model, beta0=np.array([1.]))
365
+ sol1 = odr1.run()
366
+ odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix)
367
+ sol2 = odr2.run()
368
+ assert_equal(sol1.beta, sol2.beta)
369
+
370
+ # verify bugfix for #11800 in #11802
371
+ def test_ticket_11800(self):
372
+ # parameters
373
+ beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5])
374
+ nr_measurements = 10
375
+
376
+ std_dev_x = 0.01
377
+ x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866,
378
+ -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301],
379
+ [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829,
380
+ 0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]])
381
+
382
+ std_dev_y = 0.05
383
+ y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642,
384
+ 0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929],
385
+ [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536,
386
+ -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]])
387
+
388
+ beta_solution = np.array([
389
+ 2.62920235756665876536e+00, -1.26608484996299608838e+02,
390
+ 1.29703572775403074502e+02, -1.88560985401185465804e+00,
391
+ 7.83834160771274923718e+01, -7.64124076838087091801e+01])
392
+
393
+ # model's function and Jacobians
394
+ def func(beta, x):
395
+ y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :]
396
+ y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :]
397
+
398
+ return np.vstack((y0, y1))
399
+
400
+ def df_dbeta_odr(beta, x):
401
+ nr_meas = np.shape(x)[1]
402
+ zeros = np.zeros(nr_meas)
403
+ ones = np.ones(nr_meas)
404
+
405
+ dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros])
406
+ dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]])
407
+
408
+ return np.stack((dy0, dy1))
409
+
410
+ def df_dx_odr(beta, x):
411
+ nr_meas = np.shape(x)[1]
412
+ ones = np.ones(nr_meas)
413
+
414
+ dy0 = np.array([beta[1] * ones, beta[2] * ones])
415
+ dy1 = np.array([beta[4] * ones, beta[5] * ones])
416
+ return np.stack((dy0, dy1))
417
+
418
+ # do measurements with errors in independent and dependent variables
419
+ x0_true = np.linspace(1, 10, nr_measurements)
420
+ x1_true = np.linspace(1, 10, nr_measurements)
421
+ x_true = np.array([x0_true, x1_true])
422
+
423
+ y_true = func(beta_true, x_true)
424
+
425
+ x_meas = x_true + x_error
426
+ y_meas = y_true + y_error
427
+
428
+ # estimate model's parameters
429
+ model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr)
430
+
431
+ data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y)
432
+
433
+ odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100)
434
+ #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1)
435
+ odr_obj.set_job(deriv=3)
436
+
437
+ odr_out = odr_obj.run()
438
+
439
+ # check results
440
+ assert_equal(odr_out.info, 1)
441
+ assert_array_almost_equal(odr_out.beta, beta_solution)
442
+
443
+ def test_multilinear_model(self):
444
+ x = np.linspace(0.0, 5.0)
445
+ y = 10.0 + 5.0 * x
446
+ data = Data(x, y)
447
+ odr_obj = ODR(data, multilinear)
448
+ output = odr_obj.run()
449
+ assert_array_almost_equal(output.beta, [10.0, 5.0])
450
+
451
+ def test_exponential_model(self):
452
+ x = np.linspace(0.0, 5.0)
453
+ y = -10.0 + np.exp(0.5*x)
454
+ data = Data(x, y)
455
+ odr_obj = ODR(data, exponential)
456
+ output = odr_obj.run()
457
+ assert_array_almost_equal(output.beta, [-10.0, 0.5])
458
+
459
+ def test_polynomial_model(self):
460
+ x = np.linspace(0.0, 5.0)
461
+ y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3
462
+ poly_model = polynomial(3)
463
+ data = Data(x, y)
464
+ odr_obj = ODR(data, poly_model)
465
+ output = odr_obj.run()
466
+ assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0])
467
+
468
+ def test_unilinear_model(self):
469
+ x = np.linspace(0.0, 5.0)
470
+ y = 1.0 * x + 2.0
471
+ data = Data(x, y)
472
+ odr_obj = ODR(data, unilinear)
473
+ output = odr_obj.run()
474
+ assert_array_almost_equal(output.beta, [1.0, 2.0])
475
+
476
+ def test_quadratic_model(self):
477
+ x = np.linspace(0.0, 5.0)
478
+ y = 1.0 * x ** 2 + 2.0 * x + 3.0
479
+ data = Data(x, y)
480
+ odr_obj = ODR(data, quadratic)
481
+ output = odr_obj.run()
482
+ assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0])
483
+
484
+ def test_work_ind(self):
485
+
486
+ def func(par, x):
487
+ b0, b1 = par
488
+ return b0 + b1 * x
489
+
490
+ # generate some data
491
+ n_data = 4
492
+ x = np.arange(n_data)
493
+ y = np.where(x % 2, x + 0.1, x - 0.1)
494
+ x_err = np.full(n_data, 0.1)
495
+ y_err = np.full(n_data, 0.1)
496
+
497
+ # do the fitting
498
+ linear_model = Model(func)
499
+ real_data = RealData(x, y, sx=x_err, sy=y_err)
500
+ odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4])
501
+ odr_obj.set_job(fit_type=0)
502
+ out = odr_obj.run()
503
+
504
+ sd_ind = out.work_ind['sd']
505
+ assert_array_almost_equal(out.sd_beta,
506
+ out.work[sd_ind:sd_ind + len(out.sd_beta)])
507
+
508
+ @pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better "
509
+ "not to run this test, see gh-13127")
510
+ def test_output_file_overwrite(self):
511
+ """
512
+ Verify fix for gh-1892
513
+ """
514
+ def func(b, x):
515
+ return b[0] + b[1] * x
516
+
517
+ p = Model(func)
518
+ data = Data(np.arange(10), 12 * np.arange(10))
519
+ tmp_dir = tempfile.mkdtemp()
520
+ error_file_path = os.path.join(tmp_dir, "error.dat")
521
+ report_file_path = os.path.join(tmp_dir, "report.dat")
522
+ try:
523
+ ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
524
+ rptfile=report_file_path).run()
525
+ ODR(data, p, beta0=[0.1, 13], errfile=error_file_path,
526
+ rptfile=report_file_path, overwrite=True).run()
527
+ finally:
528
+ # remove output files for clean up
529
+ shutil.rmtree(tmp_dir)
530
+
531
+ def test_odr_model_default_meta(self):
532
+ def func(b, x):
533
+ return b[0] + b[1] * x
534
+
535
+ p = Model(func)
536
+ p.set_meta(name='Sample Model Meta', ref='ODRPACK')
537
+ assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'})
538
+
539
+ def test_work_array_del_init(self):
540
+ """
541
+ Verify fix for gh-18739 where del_init=1 fails.
542
+ """
543
+ def func(b, x):
544
+ return b[0] + b[1] * x
545
+
546
+ # generate some data
547
+ n_data = 4
548
+ x = np.arange(n_data)
549
+ y = np.where(x % 2, x + 0.1, x - 0.1)
550
+ x_err = np.full(n_data, 0.1)
551
+ y_err = np.full(n_data, 0.1)
552
+
553
+ linear_model = Model(func)
554
+ # Try various shapes of the `we` array from various `sy` and `covy`
555
+ rd0 = RealData(x, y, sx=x_err, sy=y_err)
556
+ rd1 = RealData(x, y, sx=x_err, sy=0.1)
557
+ rd2 = RealData(x, y, sx=x_err, sy=[0.1])
558
+ rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1))
559
+ rd4 = RealData(x, y, sx=x_err, covy=[[0.01]])
560
+ rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01))
561
+ for rd in [rd0, rd1, rd2, rd3, rd4, rd5]:
562
+ odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4],
563
+ delta0=np.full(n_data, -0.1))
564
+ odr_obj.set_job(fit_type=0, del_init=1)
565
+ # Just make sure that it runs without raising an exception.
566
+ odr_obj.run()
567
+
568
+ def test_pickling_data(self):
569
+ x = np.linspace(0.0, 5.0)
570
+ y = 1.0 * x + 2.0
571
+ data = Data(x, y)
572
+
573
+ obj_pickle = pickle.dumps(data)
574
+ del data
575
+ pickle.loads(obj_pickle)
576
+
577
+ def test_pickling_real_data(self):
578
+ x = np.linspace(0.0, 5.0)
579
+ y = 1.0 * x + 2.0
580
+ data = RealData(x, y)
581
+
582
+ obj_pickle = pickle.dumps(data)
583
+ del data
584
+ pickle.loads(obj_pickle)
585
+
586
+ def test_pickling_model(self):
587
+ obj_pickle = pickle.dumps(unilinear)
588
+ pickle.loads(obj_pickle)
589
+
590
+ def test_pickling_odr(self):
591
+ x = np.linspace(0.0, 5.0)
592
+ y = 1.0 * x + 2.0
593
+ odr_obj = ODR(Data(x, y), unilinear)
594
+
595
+ obj_pickle = pickle.dumps(odr_obj)
596
+ del odr_obj
597
+ pickle.loads(obj_pickle)
598
+
599
+ def test_pickling_output(self):
600
+ x = np.linspace(0.0, 5.0)
601
+ y = 1.0 * x + 2.0
602
+ output = ODR(Data(x, y), unilinear).run
603
+
604
+ obj_pickle = pickle.dumps(output)
605
+ del output
606
+ pickle.loads(obj_pickle)
vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (166 Bytes). View file