diff --git a/.gitattributes b/.gitattributes index 86aa7ea344cdf5d67b7cd282cd9c51c5e0423406..e0e7e5ab247681b4141fc246a577c526e3dcdcdb 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1692,3 +1692,5 @@ vllm/lib/python3.10/site-packages/sympy/tensor/__pycache__/tensor.cpython-310.py vllm/lib/python3.10/site-packages/sympy/solvers/ode/__pycache__/single.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/sympy/solvers/__pycache__/solvers.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text vllm/lib/python3.10/site-packages/sympy/solvers/diophantine/__pycache__/diophantine.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text +parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text +vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7287a3785bd185bb92020208fc10da0342a77645 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_download_all.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8d0046685770123e37e262ac59e87da8b4a7a95b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_fetchers.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f93abb8da5483fb69d71168d2707ff22714c64cb Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_registry.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..3699a6e2458745a664bc9c80b67ee93e5b0fe2dc Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/datasets/__pycache__/_utils.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/_fetchers.py b/parrot/lib/python3.10/site-packages/scipy/datasets/_fetchers.py new file mode 100644 index 0000000000000000000000000000000000000000..f273b9eb826dde77d197e16ea92511bbd237b3d4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/datasets/_fetchers.py @@ -0,0 +1,221 @@ +from numpy import array, frombuffer, load +from ._registry import registry, registry_urls + +try: + import pooch +except ImportError: + pooch = None + data_fetcher = None +else: + data_fetcher = pooch.create( + # Use the default cache folder for the operating system + # Pooch uses appdirs (https://github.com/ActiveState/appdirs) to + # select an appropriate directory for the cache on each platform. + path=pooch.os_cache("scipy-data"), + + # The remote data is on Github + # base_url is a required param, even though we override this + # using individual urls in the registry. + base_url="https://github.com/scipy/", + registry=registry, + urls=registry_urls + ) + + +def fetch_data(dataset_name, data_fetcher=data_fetcher): + if data_fetcher is None: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + # The "fetch" method returns the full path to the downloaded data file. + return data_fetcher.fetch(dataset_name) + + +def ascent(): + """ + Get an 8-bit grayscale bit-depth, 512 x 512 derived image for easy + use in demos. + + The image is derived from + https://pixnio.com/people/accent-to-the-top + + Parameters + ---------- + None + + Returns + ------- + ascent : ndarray + convenient image to use for testing and demonstration + + Examples + -------- + >>> import scipy.datasets + >>> ascent = scipy.datasets.ascent() + >>> ascent.shape + (512, 512) + >>> ascent.max() + 255 + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(ascent) + >>> plt.show() + + """ + import pickle + + # The file will be downloaded automatically the first time this is run, + # returning the path to the downloaded file. Afterwards, Pooch finds + # it in the local cache and doesn't repeat the download. + fname = fetch_data("ascent.dat") + # Now we just need to load it with our standard Python tools. + with open(fname, 'rb') as f: + ascent = array(pickle.load(f)) + return ascent + + +def electrocardiogram(): + """ + Load an electrocardiogram as an example for a 1-D signal. + + The returned signal is a 5 minute long electrocardiogram (ECG), a medical + recording of the heart's electrical activity, sampled at 360 Hz. + + Returns + ------- + ecg : ndarray + The electrocardiogram in millivolt (mV) sampled at 360 Hz. + + Notes + ----- + The provided signal is an excerpt (19:35 to 24:35) from the `record 208`_ + (lead MLII) provided by the MIT-BIH Arrhythmia Database [1]_ on + PhysioNet [2]_. The excerpt includes noise induced artifacts, typical + heartbeats as well as pathological changes. + + .. _record 208: https://physionet.org/physiobank/database/html/mitdbdir/records.htm#208 + + .. versionadded:: 1.1.0 + + References + ---------- + .. [1] Moody GB, Mark RG. The impact of the MIT-BIH Arrhythmia Database. + IEEE Eng in Med and Biol 20(3):45-50 (May-June 2001). + (PMID: 11446209); :doi:`10.13026/C2F305` + .. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh, + Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. PhysioBank, + PhysioToolkit, and PhysioNet: Components of a New Research Resource + for Complex Physiologic Signals. Circulation 101(23):e215-e220; + :doi:`10.1161/01.CIR.101.23.e215` + + Examples + -------- + >>> from scipy.datasets import electrocardiogram + >>> ecg = electrocardiogram() + >>> ecg + array([-0.245, -0.215, -0.185, ..., -0.405, -0.395, -0.385]) + >>> ecg.shape, ecg.mean(), ecg.std() + ((108000,), -0.16510875, 0.5992473991177294) + + As stated the signal features several areas with a different morphology. + E.g., the first few seconds show the electrical activity of a heart in + normal sinus rhythm as seen below. + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> fs = 360 + >>> time = np.arange(ecg.size) / fs + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(9, 10.2) + >>> plt.ylim(-1, 1.5) + >>> plt.show() + + After second 16, however, the first premature ventricular contractions, + also called extrasystoles, appear. These have a different morphology + compared to typical heartbeats. The difference can easily be observed + in the following plot. + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(46.5, 50) + >>> plt.ylim(-2, 1.5) + >>> plt.show() + + At several points large artifacts disturb the recording, e.g.: + + >>> plt.plot(time, ecg) + >>> plt.xlabel("time in s") + >>> plt.ylabel("ECG in mV") + >>> plt.xlim(207, 215) + >>> plt.ylim(-2, 3.5) + >>> plt.show() + + Finally, examining the power spectrum reveals that most of the biosignal is + made up of lower frequencies. At 60 Hz the noise induced by the mains + electricity can be clearly observed. + + >>> from scipy.signal import welch + >>> f, Pxx = welch(ecg, fs=fs, nperseg=2048, scaling="spectrum") + >>> plt.semilogy(f, Pxx) + >>> plt.xlabel("Frequency in Hz") + >>> plt.ylabel("Power spectrum of the ECG in mV**2") + >>> plt.xlim(f[[0, -1]]) + >>> plt.show() + """ + fname = fetch_data("ecg.dat") + with load(fname) as file: + ecg = file["ecg"].astype(int) # np.uint16 -> int + # Convert raw output of ADC to mV: (ecg - adc_zero) / adc_gain + ecg = (ecg - 1024) / 200.0 + return ecg + + +def face(gray=False): + """ + Get a 1024 x 768, color image of a raccoon face. + + The image is derived from + https://pixnio.com/fauna-animals/raccoons/raccoon-procyon-lotor + + Parameters + ---------- + gray : bool, optional + If True return 8-bit grey-scale image, otherwise return a color image + + Returns + ------- + face : ndarray + image of a raccoon face + + Examples + -------- + >>> import scipy.datasets + >>> face = scipy.datasets.face() + >>> face.shape + (768, 1024, 3) + >>> face.max() + 255 + >>> face.dtype + dtype('uint8') + + >>> import matplotlib.pyplot as plt + >>> plt.gray() + >>> plt.imshow(face) + >>> plt.show() + + """ + import bz2 + fname = fetch_data("face.dat") + with open(fname, 'rb') as f: + rawdata = f.read() + face_data = bz2.decompress(rawdata) + face = frombuffer(face_data, dtype='uint8') + face.shape = (768, 1024, 3) + if gray is True: + face = (0.21 * face[:, :, 0] + 0.71 * face[:, :, 1] + + 0.07 * face[:, :, 2]).astype('uint8') + return face diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..2b8746805d2accbd536dad614d1cf46af6b51d77 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..454e1a38b960292546d056fdccd68a59a4edef3c Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/datasets/tests/__pycache__/test_data.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py b/parrot/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py new file mode 100644 index 0000000000000000000000000000000000000000..d29feb72f55f564d3bda3c5cfa956bf050e14135 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/datasets/tests/test_data.py @@ -0,0 +1,124 @@ +from scipy.datasets._registry import registry +from scipy.datasets._fetchers import data_fetcher +from scipy.datasets._utils import _clear_cache +from scipy.datasets import ascent, face, electrocardiogram, download_all +from numpy.testing import assert_equal, assert_almost_equal +import os +import pytest + +try: + import pooch +except ImportError: + raise ImportError("Missing optional dependency 'pooch' required " + "for scipy.datasets module. Please use pip or " + "conda to install 'pooch'.") + + +data_dir = data_fetcher.path # type: ignore + + +def _has_hash(path, expected_hash): + """Check if the provided path has the expected hash.""" + if not os.path.exists(path): + return False + return pooch.file_hash(path) == expected_hash + + +class TestDatasets: + + @pytest.fixture(scope='module', autouse=True) + def test_download_all(self): + # This fixture requires INTERNET CONNECTION + + # test_setup phase + download_all() + + yield + + @pytest.mark.fail_slow(5) + def test_existence_all(self): + assert len(os.listdir(data_dir)) >= len(registry) + + def test_ascent(self): + assert_equal(ascent().shape, (512, 512)) + + # hash check + assert _has_hash(os.path.join(data_dir, "ascent.dat"), + registry["ascent.dat"]) + + def test_face(self): + assert_equal(face().shape, (768, 1024, 3)) + + # hash check + assert _has_hash(os.path.join(data_dir, "face.dat"), + registry["face.dat"]) + + def test_electrocardiogram(self): + # Test shape, dtype and stats of signal + ecg = electrocardiogram() + assert_equal(ecg.dtype, float) + assert_equal(ecg.shape, (108000,)) + assert_almost_equal(ecg.mean(), -0.16510875) + assert_almost_equal(ecg.std(), 0.5992473991177294) + + # hash check + assert _has_hash(os.path.join(data_dir, "ecg.dat"), + registry["ecg.dat"]) + + +def test_clear_cache(tmp_path): + # Note: `tmp_path` is a pytest fixture, it handles cleanup + dummy_basepath = tmp_path / "dummy_cache_dir" + dummy_basepath.mkdir() + + # Create three dummy dataset files for dummy dataset methods + dummy_method_map = {} + for i in range(4): + dummy_method_map[f"data{i}"] = [f"data{i}.dat"] + data_filepath = dummy_basepath / f"data{i}.dat" + data_filepath.write_text("") + + # clear files associated to single dataset method data0 + # also test callable argument instead of list of callables + def data0(): + pass + _clear_cache(datasets=data0, cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data0.dat") + + # clear files associated to multiple dataset methods "data3" and "data4" + def data1(): + pass + + def data2(): + pass + _clear_cache(datasets=[data1, data2], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data1.dat") + assert not os.path.exists(dummy_basepath/"data2.dat") + + # clear multiple dataset files "data3_0.dat" and "data3_1.dat" + # associated with dataset method "data3" + def data4(): + pass + # create files + (dummy_basepath / "data4_0.dat").write_text("") + (dummy_basepath / "data4_1.dat").write_text("") + + dummy_method_map["data4"] = ["data4_0.dat", "data4_1.dat"] + _clear_cache(datasets=[data4], cache_dir=dummy_basepath, + method_map=dummy_method_map) + assert not os.path.exists(dummy_basepath/"data4_0.dat") + assert not os.path.exists(dummy_basepath/"data4_1.dat") + + # wrong dataset method should raise ValueError since it + # doesn't exist in the dummy_method_map + def data5(): + pass + with pytest.raises(ValueError): + _clear_cache(datasets=[data5], cache_dir=dummy_basepath, + method_map=dummy_method_map) + + # remove all dataset cache + _clear_cache(datasets=None, cache_dir=dummy_basepath) + assert not os.path.exists(dummy_basepath) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/__init__.py b/parrot/lib/python3.10/site-packages/scipy/integrate/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..039234777d35a1aebce2b93b943261e4f013a6b5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/__init__.py @@ -0,0 +1,110 @@ +""" +============================================= +Integration and ODEs (:mod:`scipy.integrate`) +============================================= + +.. currentmodule:: scipy.integrate + +Integrating functions, given function object +============================================ + +.. autosummary:: + :toctree: generated/ + + quad -- General purpose integration + quad_vec -- General purpose integration of vector-valued functions + dblquad -- General purpose double integration + tplquad -- General purpose triple integration + nquad -- General purpose N-D integration + fixed_quad -- Integrate func(x) using Gaussian quadrature of order n + quadrature -- Integrate with given tolerance using Gaussian quadrature + romberg -- Integrate func using Romberg integration + newton_cotes -- Weights and error coefficient for Newton-Cotes integration + qmc_quad -- N-D integration using Quasi-Monte Carlo quadrature + IntegrationWarning -- Warning on issues during integration + AccuracyWarning -- Warning on issues during quadrature integration + +Integrating functions, given fixed samples +========================================== + +.. autosummary:: + :toctree: generated/ + + trapezoid -- Use trapezoidal rule to compute integral. + cumulative_trapezoid -- Use trapezoidal rule to cumulatively compute integral. + simpson -- Use Simpson's rule to compute integral from samples. + cumulative_simpson -- Use Simpson's rule to cumulatively compute integral from samples. + romb -- Use Romberg Integration to compute integral from + -- (2**k + 1) evenly-spaced samples. + +.. seealso:: + + :mod:`scipy.special` for orthogonal polynomials (special) for Gaussian + quadrature roots and weights for other weighting factors and regions. + +Solving initial value problems for ODE systems +============================================== + +The solvers are implemented as individual classes, which can be used directly +(low-level usage) or through a convenience function. + +.. autosummary:: + :toctree: generated/ + + solve_ivp -- Convenient function for ODE integration. + RK23 -- Explicit Runge-Kutta solver of order 3(2). + RK45 -- Explicit Runge-Kutta solver of order 5(4). + DOP853 -- Explicit Runge-Kutta solver of order 8. + Radau -- Implicit Runge-Kutta solver of order 5. + BDF -- Implicit multi-step variable order (1 to 5) solver. + LSODA -- LSODA solver from ODEPACK Fortran package. + OdeSolver -- Base class for ODE solvers. + DenseOutput -- Local interpolant for computing a dense output. + OdeSolution -- Class which represents a continuous ODE solution. + + +Old API +------- + +These are the routines developed earlier for SciPy. They wrap older solvers +implemented in Fortran (mostly ODEPACK). While the interface to them is not +particularly convenient and certain features are missing compared to the new +API, the solvers themselves are of good quality and work fast as compiled +Fortran code. In some cases, it might be worth using this old API. + +.. autosummary:: + :toctree: generated/ + + odeint -- General integration of ordinary differential equations. + ode -- Integrate ODE using VODE and ZVODE routines. + complex_ode -- Convert a complex-valued ODE to real-valued and integrate. + ODEintWarning -- Warning raised during the execution of `odeint`. + + +Solving boundary value problems for ODE systems +=============================================== + +.. autosummary:: + :toctree: generated/ + + solve_bvp -- Solve a boundary value problem for a system of ODEs. +""" # noqa: E501 + + +from ._quadrature import * +from ._odepack_py import * +from ._quadpack_py import * +from ._ode import * +from ._bvp import solve_bvp +from ._ivp import (solve_ivp, OdeSolution, DenseOutput, + OdeSolver, RK23, RK45, DOP853, Radau, BDF, LSODA) +from ._quad_vec import quad_vec + +# Deprecated namespaces, to be removed in v2.0.0 +from . import dop, lsoda, vode, odepack, quadpack + +__all__ = [s for s in dir() if not s.startswith('_')] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d0688ee6cf4e0dea95d9b75ab2b353c5bd4ccffd Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_bvp.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..ca7afde008ae23851c9615ec1058fe1c2d25cbd5 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/_quad_vec.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..50fe8ed967def21abefbd0e018af53cf577dd1da Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/dop.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..cb5dba8ec6e60e7198fb256672f736e2fe133c68 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/__pycache__/vode.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..f3c8aaa36588651ae5e48b58fbb1d443bc71fc77 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/__init__.py @@ -0,0 +1,8 @@ +"""Suite of ODE solvers implemented in Python.""" +from .ivp import solve_ivp +from .rk import RK23, RK45, DOP853 +from .radau import Radau +from .bdf import BDF +from .lsoda import LSODA +from .common import OdeSolution +from .base import DenseOutput, OdeSolver diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py new file mode 100644 index 0000000000000000000000000000000000000000..46db9a69dfb3e7aee5c150ac6795234cd455dfe5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/base.py @@ -0,0 +1,290 @@ +import numpy as np + + +def check_arguments(fun, y0, support_complex): + """Helper function for checking arguments common to all solvers.""" + y0 = np.asarray(y0) + if np.issubdtype(y0.dtype, np.complexfloating): + if not support_complex: + raise ValueError("`y0` is complex, but the chosen solver does " + "not support integration in a complex domain.") + dtype = complex + else: + dtype = float + y0 = y0.astype(dtype, copy=False) + + if y0.ndim != 1: + raise ValueError("`y0` must be 1-dimensional.") + + if not np.isfinite(y0).all(): + raise ValueError("All components of the initial state `y0` must be finite.") + + def fun_wrapped(t, y): + return np.asarray(fun(t, y), dtype=dtype) + + return fun_wrapped, y0 + + +class OdeSolver: + """Base class for ODE solvers. + + In order to implement a new solver you need to follow the guidelines: + + 1. A constructor must accept parameters presented in the base class + (listed below) along with any other parameters specific to a solver. + 2. A constructor must accept arbitrary extraneous arguments + ``**extraneous``, but warn that these arguments are irrelevant + using `common.warn_extraneous` function. Do not pass these + arguments to the base class. + 3. A solver must implement a private method `_step_impl(self)` which + propagates a solver one step further. It must return tuple + ``(success, message)``, where ``success`` is a boolean indicating + whether a step was successful, and ``message`` is a string + containing description of a failure if a step failed or None + otherwise. + 4. A solver must implement a private method `_dense_output_impl(self)`, + which returns a `DenseOutput` object covering the last successful + step. + 5. A solver must have attributes listed below in Attributes section. + Note that ``t_old`` and ``step_size`` are updated automatically. + 6. Use `fun(self, t, y)` method for the system rhs evaluation, this + way the number of function evaluations (`nfev`) will be tracked + automatically. + 7. For convenience, a base class provides `fun_single(self, t, y)` and + `fun_vectorized(self, t, y)` for evaluating the rhs in + non-vectorized and vectorized fashions respectively (regardless of + how `fun` from the constructor is implemented). These calls don't + increment `nfev`. + 8. If a solver uses a Jacobian matrix and LU decompositions, it should + track the number of Jacobian evaluations (`njev`) and the number of + LU decompositions (`nlu`). + 9. By convention, the function evaluations used to compute a finite + difference approximation of the Jacobian should not be counted in + `nfev`, thus use `fun_single(self, t, y)` or + `fun_vectorized(self, t, y)` when computing a finite difference + approximation of the Jacobian. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time --- the integration won't continue beyond it. It also + determines the direction of the integration. + vectorized : bool + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by methods 'Radau' and 'BDF', but + will result in slower execution for other methods. It can also + result in slower overall execution for 'Radau' and 'BDF' in some + circumstances (e.g. small ``len(y0)``). + support_complex : bool, optional + Whether integration in a complex domain should be supported. + Generally determined by a derived solver class capabilities. + Default is False. + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of the system's rhs evaluations. + njev : int + Number of the Jacobian evaluations. + nlu : int + Number of LU decompositions. + """ + TOO_SMALL_STEP = "Required step size is less than spacing between numbers." + + def __init__(self, fun, t0, y0, t_bound, vectorized, + support_complex=False): + self.t_old = None + self.t = t0 + self._fun, self.y = check_arguments(fun, y0, support_complex) + self.t_bound = t_bound + self.vectorized = vectorized + + if vectorized: + def fun_single(t, y): + return self._fun(t, y[:, None]).ravel() + fun_vectorized = self._fun + else: + fun_single = self._fun + + def fun_vectorized(t, y): + f = np.empty_like(y) + for i, yi in enumerate(y.T): + f[:, i] = self._fun(t, yi) + return f + + def fun(t, y): + self.nfev += 1 + return self.fun_single(t, y) + + self.fun = fun + self.fun_single = fun_single + self.fun_vectorized = fun_vectorized + + self.direction = np.sign(t_bound - t0) if t_bound != t0 else 1 + self.n = self.y.size + self.status = 'running' + + self.nfev = 0 + self.njev = 0 + self.nlu = 0 + + @property + def step_size(self): + if self.t_old is None: + return None + else: + return np.abs(self.t - self.t_old) + + def step(self): + """Perform one integration step. + + Returns + ------- + message : string or None + Report from the solver. Typically a reason for a failure if + `self.status` is 'failed' after the step was taken or None + otherwise. + """ + if self.status != 'running': + raise RuntimeError("Attempt to step on a failed or finished " + "solver.") + + if self.n == 0 or self.t == self.t_bound: + # Handle corner cases of empty solver or no integration. + self.t_old = self.t + self.t = self.t_bound + message = None + self.status = 'finished' + else: + t = self.t + success, message = self._step_impl() + + if not success: + self.status = 'failed' + else: + self.t_old = t + if self.direction * (self.t - self.t_bound) >= 0: + self.status = 'finished' + + return message + + def dense_output(self): + """Compute a local interpolant over the last successful step. + + Returns + ------- + sol : `DenseOutput` + Local interpolant over the last successful step. + """ + if self.t_old is None: + raise RuntimeError("Dense output is available after a successful " + "step was made.") + + if self.n == 0 or self.t == self.t_old: + # Handle corner cases of empty solver and no integration. + return ConstantDenseOutput(self.t_old, self.t, self.y) + else: + return self._dense_output_impl() + + def _step_impl(self): + raise NotImplementedError + + def _dense_output_impl(self): + raise NotImplementedError + + +class DenseOutput: + """Base class for local interpolant over step made by an ODE solver. + + It interpolates between `t_min` and `t_max` (see Attributes below). + Evaluation outside this interval is not forbidden, but the accuracy is not + guaranteed. + + Attributes + ---------- + t_min, t_max : float + Time range of the interpolation. + """ + def __init__(self, t_old, t): + self.t_old = t_old + self.t = t + self.t_min = min(t, t_old) + self.t_max = max(t, t_old) + + def __call__(self, t): + """Evaluate the interpolant. + + Parameters + ---------- + t : float or array_like with shape (n_points,) + Points to evaluate the solution at. + + Returns + ------- + y : ndarray, shape (n,) or (n, n_points) + Computed values. Shape depends on whether `t` was a scalar or a + 1-D array. + """ + t = np.asarray(t) + if t.ndim > 1: + raise ValueError("`t` must be a float or a 1-D array.") + return self._call_impl(t) + + def _call_impl(self, t): + raise NotImplementedError + + +class ConstantDenseOutput(DenseOutput): + """Constant value interpolator. + + This class used for degenerate integration cases: equal integration limits + or a system with 0 equations. + """ + def __init__(self, t_old, t, value): + super().__init__(t_old, t) + self.value = value + + def _call_impl(self, t): + if t.ndim == 0: + return self.value + else: + ret = np.empty((self.value.shape[0], t.shape[0])) + ret[:] = self.value[:, None] + return ret diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py new file mode 100644 index 0000000000000000000000000000000000000000..29bd9461519255c0bdab6e2172a9e549cb709cbd --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/bdf.py @@ -0,0 +1,480 @@ +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import issparse, csc_matrix, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, EPS, num_jac, validate_first_step, + warn_extraneous) +from .base import OdeSolver, DenseOutput + + +MAX_ORDER = 5 +NEWTON_MAXITER = 4 +MIN_FACTOR = 0.2 +MAX_FACTOR = 10 + + +def compute_R(order, factor): + """Compute the matrix for changing the differences array.""" + I = np.arange(1, order + 1)[:, None] + J = np.arange(1, order + 1) + M = np.zeros((order + 1, order + 1)) + M[1:, 1:] = (I - 1 - factor * J) / I + M[0] = 1 + return np.cumprod(M, axis=0) + + +def change_D(D, order, factor): + """Change differences array in-place when step size is changed.""" + R = compute_R(order, factor) + U = compute_R(order, 1) + RU = R.dot(U) + D[:order + 1] = np.dot(RU.T, D[:order + 1]) + + +def solve_bdf_system(fun, t_new, y_predict, c, psi, LU, solve_lu, scale, tol): + """Solve the algebraic system resulting from BDF method.""" + d = 0 + y = y_predict.copy() + dy_norm_old = None + converged = False + for k in range(NEWTON_MAXITER): + f = fun(t_new, y) + if not np.all(np.isfinite(f)): + break + + dy = solve_lu(LU, c * f - psi - d) + dy_norm = norm(dy / scale) + + if dy_norm_old is None: + rate = None + else: + rate = dy_norm / dy_norm_old + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dy_norm > tol)): + break + + y += dy + d += dy + + if (dy_norm == 0 or + rate is not None and rate / (1 - rate) * dy_norm < tol): + converged = True + break + + dy_norm_old = dy_norm + + return converged, k + 1, y, d + + +class BDF(OdeSolver): + """Implicit method based on backward-differentiation formulas. + + This is a variable order method with the order varying automatically from + 1 to 5. The general framework of the BDF algorithm is described in [1]_. + This class implements a quasi-constant step size as explained in [2]_. + The error estimation strategy for the constant-step BDF is derived in [3]_. + An accuracy enhancement using modified formulas (NDF) [2]_ is also implemented. + + Can be applied in the complex domain. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. Here `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to y, + required by this method. The Jacobian matrix has shape (n, n) and its + element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [4]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by this method, but may result in slower + execution overall in some circumstances (e.g. small ``len(y0)``). + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] G. D. Byrne, A. C. Hindmarsh, "A Polyalgorithm for the Numerical + Solution of Ordinary Differential Equations", ACM Transactions on + Mathematical Software, Vol. 1, No. 1, pp. 71-96, March 1975. + .. [2] L. F. Shampine, M. W. Reichelt, "THE MATLAB ODE SUITE", SIAM J. SCI. + COMPUTE., Vol. 18, No. 1, pp. 1-22, January 1997. + .. [3] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations I: + Nonstiff Problems", Sec. III.2. + .. [4] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized, + support_complex=True) + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + f = self.fun(self.t, self.y) + if first_step is None: + self.h_abs = select_initial_step(self.fun, self.t, self.y, + t_bound, max_step, f, + self.direction, 1, + self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc', dtype=self.y.dtype) + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n, dtype=self.y.dtype) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + kappa = np.array([0, -0.1850, -1/9, -0.0823, -0.0415, 0]) + self.gamma = np.hstack((0, np.cumsum(1 / np.arange(1, MAX_ORDER + 1)))) + self.alpha = (1 - kappa) * self.gamma + self.error_const = kappa * self.gamma + 1 / np.arange(1, MAX_ORDER + 2) + + D = np.empty((MAX_ORDER + 3, self.n), dtype=self.y.dtype) + D[0] = self.y + D[1] = f * self.h_abs * self.direction + self.D = D + + self.order = 1 + self.n_equal_steps = 0 + self.LU = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y): + self.njev += 1 + f = self.fun_single(t, y) + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0) + elif callable(jac): + J = jac(t0, y0) + self.njev += 1 + if issparse(J): + J = csc_matrix(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=y0.dtype) + else: + J = np.asarray(J, dtype=y0.dtype) + + def jac_wrapped(t, y): + self.njev += 1 + return np.asarray(jac(t, y), dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + else: + if issparse(jac): + J = csc_matrix(jac, dtype=y0.dtype) + else: + J = np.asarray(jac, dtype=y0.dtype) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + D = self.D + + max_step = self.max_step + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + change_D(D, self.order, max_step / self.h_abs) + self.n_equal_steps = 0 + elif self.h_abs < min_step: + h_abs = min_step + change_D(D, self.order, min_step / self.h_abs) + self.n_equal_steps = 0 + else: + h_abs = self.h_abs + + atol = self.atol + rtol = self.rtol + order = self.order + + alpha = self.alpha + gamma = self.gamma + error_const = self.error_const + + J = self.J + LU = self.LU + current_jac = self.jac is None + + step_accepted = False + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + change_D(D, order, np.abs(t_new - t) / h_abs) + self.n_equal_steps = 0 + LU = None + + h = t_new - t + h_abs = np.abs(h) + + y_predict = np.sum(D[:order + 1], axis=0) + + scale = atol + rtol * np.abs(y_predict) + psi = np.dot(D[1: order + 1].T, gamma[1: order + 1]) / alpha[order] + + converged = False + c = h / alpha[order] + while not converged: + if LU is None: + LU = self.lu(self.I - c * J) + + converged, n_iter, y_new, d = solve_bdf_system( + self.fun, t_new, y_predict, c, psi, LU, self.solve_lu, + scale, self.newton_tol) + + if not converged: + if current_jac: + break + J = self.jac(t_new, y_predict) + LU = None + current_jac = True + + if not converged: + factor = 0.5 + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + LU = None + continue + + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + scale = atol + rtol * np.abs(y_new) + error = error_const[order] * d + error_norm = norm(error / scale) + + if error_norm > 1: + factor = max(MIN_FACTOR, + safety * error_norm ** (-1 / (order + 1))) + h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + # As we didn't have problems with convergence, we don't + # reset LU here. + else: + step_accepted = True + + self.n_equal_steps += 1 + + self.t = t_new + self.y = y_new + + self.h_abs = h_abs + self.J = J + self.LU = LU + + # Update differences. The principal relation here is + # D^{j + 1} y_n = D^{j} y_n - D^{j} y_{n - 1}. Keep in mind that D + # contained difference for previous interpolating polynomial and + # d = D^{k + 1} y_n. Thus this elegant code follows. + D[order + 2] = d - D[order + 1] + D[order + 1] = d + for i in reversed(range(order + 1)): + D[i] += D[i + 1] + + if self.n_equal_steps < order + 1: + return True, None + + if order > 1: + error_m = error_const[order - 1] * D[order] + error_m_norm = norm(error_m / scale) + else: + error_m_norm = np.inf + + if order < MAX_ORDER: + error_p = error_const[order + 1] * D[order + 2] + error_p_norm = norm(error_p / scale) + else: + error_p_norm = np.inf + + error_norms = np.array([error_m_norm, error_norm, error_p_norm]) + with np.errstate(divide='ignore'): + factors = error_norms ** (-1 / np.arange(order, order + 3)) + + delta_order = np.argmax(factors) - 1 + order += delta_order + self.order = order + + factor = min(MAX_FACTOR, safety * np.max(factors)) + self.h_abs *= factor + change_D(D, order, factor) + self.n_equal_steps = 0 + self.LU = None + + return True, None + + def _dense_output_impl(self): + return BdfDenseOutput(self.t_old, self.t, self.h_abs * self.direction, + self.order, self.D[:self.order + 1].copy()) + + +class BdfDenseOutput(DenseOutput): + def __init__(self, t_old, t, h, order, D): + super().__init__(t_old, t) + self.order = order + self.t_shift = self.t - h * np.arange(self.order) + self.denom = h * (1 + np.arange(self.order)) + self.D = D + + def _call_impl(self, t): + if t.ndim == 0: + x = (t - self.t_shift) / self.denom + p = np.cumprod(x) + else: + x = (t - self.t_shift[:, None]) / self.denom[:, None] + p = np.cumprod(x, axis=0) + + y = np.dot(self.D[1:].T, p) + if y.ndim == 1: + y += self.D[0] + else: + y += self.D[0, :, None] + + return y diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py new file mode 100644 index 0000000000000000000000000000000000000000..f39f2f3650d321e2c475d4e220f9769139118a5e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/dop853_coefficients.py @@ -0,0 +1,193 @@ +import numpy as np + +N_STAGES = 12 +N_STAGES_EXTENDED = 16 +INTERPOLATOR_POWER = 7 + +C = np.array([0.0, + 0.526001519587677318785587544488e-01, + 0.789002279381515978178381316732e-01, + 0.118350341907227396726757197510, + 0.281649658092772603273242802490, + 0.333333333333333333333333333333, + 0.25, + 0.307692307692307692307692307692, + 0.651282051282051282051282051282, + 0.6, + 0.857142857142857142857142857142, + 1.0, + 1.0, + 0.1, + 0.2, + 0.777777777777777777777777777778]) + +A = np.zeros((N_STAGES_EXTENDED, N_STAGES_EXTENDED)) +A[1, 0] = 5.26001519587677318785587544488e-2 + +A[2, 0] = 1.97250569845378994544595329183e-2 +A[2, 1] = 5.91751709536136983633785987549e-2 + +A[3, 0] = 2.95875854768068491816892993775e-2 +A[3, 2] = 8.87627564304205475450678981324e-2 + +A[4, 0] = 2.41365134159266685502369798665e-1 +A[4, 2] = -8.84549479328286085344864962717e-1 +A[4, 3] = 9.24834003261792003115737966543e-1 + +A[5, 0] = 3.7037037037037037037037037037e-2 +A[5, 3] = 1.70828608729473871279604482173e-1 +A[5, 4] = 1.25467687566822425016691814123e-1 + +A[6, 0] = 3.7109375e-2 +A[6, 3] = 1.70252211019544039314978060272e-1 +A[6, 4] = 6.02165389804559606850219397283e-2 +A[6, 5] = -1.7578125e-2 + +A[7, 0] = 3.70920001185047927108779319836e-2 +A[7, 3] = 1.70383925712239993810214054705e-1 +A[7, 4] = 1.07262030446373284651809199168e-1 +A[7, 5] = -1.53194377486244017527936158236e-2 +A[7, 6] = 8.27378916381402288758473766002e-3 + +A[8, 0] = 6.24110958716075717114429577812e-1 +A[8, 3] = -3.36089262944694129406857109825 +A[8, 4] = -8.68219346841726006818189891453e-1 +A[8, 5] = 2.75920996994467083049415600797e1 +A[8, 6] = 2.01540675504778934086186788979e1 +A[8, 7] = -4.34898841810699588477366255144e1 + +A[9, 0] = 4.77662536438264365890433908527e-1 +A[9, 3] = -2.48811461997166764192642586468 +A[9, 4] = -5.90290826836842996371446475743e-1 +A[9, 5] = 2.12300514481811942347288949897e1 +A[9, 6] = 1.52792336328824235832596922938e1 +A[9, 7] = -3.32882109689848629194453265587e1 +A[9, 8] = -2.03312017085086261358222928593e-2 + +A[10, 0] = -9.3714243008598732571704021658e-1 +A[10, 3] = 5.18637242884406370830023853209 +A[10, 4] = 1.09143734899672957818500254654 +A[10, 5] = -8.14978701074692612513997267357 +A[10, 6] = -1.85200656599969598641566180701e1 +A[10, 7] = 2.27394870993505042818970056734e1 +A[10, 8] = 2.49360555267965238987089396762 +A[10, 9] = -3.0467644718982195003823669022 + +A[11, 0] = 2.27331014751653820792359768449 +A[11, 3] = -1.05344954667372501984066689879e1 +A[11, 4] = -2.00087205822486249909675718444 +A[11, 5] = -1.79589318631187989172765950534e1 +A[11, 6] = 2.79488845294199600508499808837e1 +A[11, 7] = -2.85899827713502369474065508674 +A[11, 8] = -8.87285693353062954433549289258 +A[11, 9] = 1.23605671757943030647266201528e1 +A[11, 10] = 6.43392746015763530355970484046e-1 + +A[12, 0] = 5.42937341165687622380535766363e-2 +A[12, 5] = 4.45031289275240888144113950566 +A[12, 6] = 1.89151789931450038304281599044 +A[12, 7] = -5.8012039600105847814672114227 +A[12, 8] = 3.1116436695781989440891606237e-1 +A[12, 9] = -1.52160949662516078556178806805e-1 +A[12, 10] = 2.01365400804030348374776537501e-1 +A[12, 11] = 4.47106157277725905176885569043e-2 + +A[13, 0] = 5.61675022830479523392909219681e-2 +A[13, 6] = 2.53500210216624811088794765333e-1 +A[13, 7] = -2.46239037470802489917441475441e-1 +A[13, 8] = -1.24191423263816360469010140626e-1 +A[13, 9] = 1.5329179827876569731206322685e-1 +A[13, 10] = 8.20105229563468988491666602057e-3 +A[13, 11] = 7.56789766054569976138603589584e-3 +A[13, 12] = -8.298e-3 + +A[14, 0] = 3.18346481635021405060768473261e-2 +A[14, 5] = 2.83009096723667755288322961402e-2 +A[14, 6] = 5.35419883074385676223797384372e-2 +A[14, 7] = -5.49237485713909884646569340306e-2 +A[14, 10] = -1.08347328697249322858509316994e-4 +A[14, 11] = 3.82571090835658412954920192323e-4 +A[14, 12] = -3.40465008687404560802977114492e-4 +A[14, 13] = 1.41312443674632500278074618366e-1 + +A[15, 0] = -4.28896301583791923408573538692e-1 +A[15, 5] = -4.69762141536116384314449447206 +A[15, 6] = 7.68342119606259904184240953878 +A[15, 7] = 4.06898981839711007970213554331 +A[15, 8] = 3.56727187455281109270669543021e-1 +A[15, 12] = -1.39902416515901462129418009734e-3 +A[15, 13] = 2.9475147891527723389556272149 +A[15, 14] = -9.15095847217987001081870187138 + + +B = A[N_STAGES, :N_STAGES] + +E3 = np.zeros(N_STAGES + 1) +E3[:-1] = B.copy() +E3[0] -= 0.244094488188976377952755905512 +E3[8] -= 0.733846688281611857341361741547 +E3[11] -= 0.220588235294117647058823529412e-1 + +E5 = np.zeros(N_STAGES + 1) +E5[0] = 0.1312004499419488073250102996e-1 +E5[5] = -0.1225156446376204440720569753e+1 +E5[6] = -0.4957589496572501915214079952 +E5[7] = 0.1664377182454986536961530415e+1 +E5[8] = -0.3503288487499736816886487290 +E5[9] = 0.3341791187130174790297318841 +E5[10] = 0.8192320648511571246570742613e-1 +E5[11] = -0.2235530786388629525884427845e-1 + +# First 3 coefficients are computed separately. +D = np.zeros((INTERPOLATOR_POWER - 3, N_STAGES_EXTENDED)) +D[0, 0] = -0.84289382761090128651353491142e+1 +D[0, 5] = 0.56671495351937776962531783590 +D[0, 6] = -0.30689499459498916912797304727e+1 +D[0, 7] = 0.23846676565120698287728149680e+1 +D[0, 8] = 0.21170345824450282767155149946e+1 +D[0, 9] = -0.87139158377797299206789907490 +D[0, 10] = 0.22404374302607882758541771650e+1 +D[0, 11] = 0.63157877876946881815570249290 +D[0, 12] = -0.88990336451333310820698117400e-1 +D[0, 13] = 0.18148505520854727256656404962e+2 +D[0, 14] = -0.91946323924783554000451984436e+1 +D[0, 15] = -0.44360363875948939664310572000e+1 + +D[1, 0] = 0.10427508642579134603413151009e+2 +D[1, 5] = 0.24228349177525818288430175319e+3 +D[1, 6] = 0.16520045171727028198505394887e+3 +D[1, 7] = -0.37454675472269020279518312152e+3 +D[1, 8] = -0.22113666853125306036270938578e+2 +D[1, 9] = 0.77334326684722638389603898808e+1 +D[1, 10] = -0.30674084731089398182061213626e+2 +D[1, 11] = -0.93321305264302278729567221706e+1 +D[1, 12] = 0.15697238121770843886131091075e+2 +D[1, 13] = -0.31139403219565177677282850411e+2 +D[1, 14] = -0.93529243588444783865713862664e+1 +D[1, 15] = 0.35816841486394083752465898540e+2 + +D[2, 0] = 0.19985053242002433820987653617e+2 +D[2, 5] = -0.38703730874935176555105901742e+3 +D[2, 6] = -0.18917813819516756882830838328e+3 +D[2, 7] = 0.52780815920542364900561016686e+3 +D[2, 8] = -0.11573902539959630126141871134e+2 +D[2, 9] = 0.68812326946963000169666922661e+1 +D[2, 10] = -0.10006050966910838403183860980e+1 +D[2, 11] = 0.77771377980534432092869265740 +D[2, 12] = -0.27782057523535084065932004339e+1 +D[2, 13] = -0.60196695231264120758267380846e+2 +D[2, 14] = 0.84320405506677161018159903784e+2 +D[2, 15] = 0.11992291136182789328035130030e+2 + +D[3, 0] = -0.25693933462703749003312586129e+2 +D[3, 5] = -0.15418974869023643374053993627e+3 +D[3, 6] = -0.23152937917604549567536039109e+3 +D[3, 7] = 0.35763911791061412378285349910e+3 +D[3, 8] = 0.93405324183624310003907691704e+2 +D[3, 9] = -0.37458323136451633156875139351e+2 +D[3, 10] = 0.10409964950896230045147246184e+3 +D[3, 11] = 0.29840293426660503123344363579e+2 +D[3, 12] = -0.43533456590011143754432175058e+2 +D[3, 13] = 0.96324553959188282948394950600e+2 +D[3, 14] = -0.39177261675615439165231486172e+2 +D[3, 15] = -0.14972683625798562581422125276e+3 diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py new file mode 100644 index 0000000000000000000000000000000000000000..e13cb0f14c3c3e1102b828d4255609ab41d8d2a2 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_ivp/radau.py @@ -0,0 +1,574 @@ +import numpy as np +from scipy.linalg import lu_factor, lu_solve +from scipy.sparse import csc_matrix, issparse, eye +from scipy.sparse.linalg import splu +from scipy.optimize._numdiff import group_columns +from .common import (validate_max_step, validate_tol, select_initial_step, + norm, num_jac, EPS, warn_extraneous, + validate_first_step) +from .base import OdeSolver, DenseOutput + +S6 = 6 ** 0.5 + +# Butcher tableau. A is not used directly, see below. +C = np.array([(4 - S6) / 10, (4 + S6) / 10, 1]) +E = np.array([-13 - 7 * S6, -13 + 7 * S6, -1]) / 3 + +# Eigendecomposition of A is done: A = T L T**-1. There is 1 real eigenvalue +# and a complex conjugate pair. They are written below. +MU_REAL = 3 + 3 ** (2 / 3) - 3 ** (1 / 3) +MU_COMPLEX = (3 + 0.5 * (3 ** (1 / 3) - 3 ** (2 / 3)) + - 0.5j * (3 ** (5 / 6) + 3 ** (7 / 6))) + +# These are transformation matrices. +T = np.array([ + [0.09443876248897524, -0.14125529502095421, 0.03002919410514742], + [0.25021312296533332, 0.20412935229379994, -0.38294211275726192], + [1, 1, 0]]) +TI = np.array([ + [4.17871859155190428, 0.32768282076106237, 0.52337644549944951], + [-4.17871859155190428, -0.32768282076106237, 0.47662355450055044], + [0.50287263494578682, -2.57192694985560522, 0.59603920482822492]]) +# These linear combinations are used in the algorithm. +TI_REAL = TI[0] +TI_COMPLEX = TI[1] + 1j * TI[2] + +# Interpolator coefficients. +P = np.array([ + [13/3 + 7*S6/3, -23/3 - 22*S6/3, 10/3 + 5 * S6], + [13/3 - 7*S6/3, -23/3 + 22*S6/3, 10/3 - 5 * S6], + [1/3, -8/3, 10/3]]) + + +NEWTON_MAXITER = 6 # Maximum number of Newton iterations. +MIN_FACTOR = 0.2 # Minimum allowed decrease in a step size. +MAX_FACTOR = 10 # Maximum allowed increase in a step size. + + +def solve_collocation_system(fun, t, y, h, Z0, scale, tol, + LU_real, LU_complex, solve_lu): + """Solve the collocation system. + + Parameters + ---------- + fun : callable + Right-hand side of the system. + t : float + Current time. + y : ndarray, shape (n,) + Current state. + h : float + Step to try. + Z0 : ndarray, shape (3, n) + Initial guess for the solution. It determines new values of `y` at + ``t + h * C`` as ``y + Z0``, where ``C`` is the Radau method constants. + scale : ndarray, shape (n) + Problem tolerance scale, i.e. ``rtol * abs(y) + atol``. + tol : float + Tolerance to which solve the system. This value is compared with + the normalized by `scale` error. + LU_real, LU_complex + LU decompositions of the system Jacobians. + solve_lu : callable + Callable which solves a linear system given a LU decomposition. The + signature is ``solve_lu(LU, b)``. + + Returns + ------- + converged : bool + Whether iterations converged. + n_iter : int + Number of completed iterations. + Z : ndarray, shape (3, n) + Found solution. + rate : float + The rate of convergence. + """ + n = y.shape[0] + M_real = MU_REAL / h + M_complex = MU_COMPLEX / h + + W = TI.dot(Z0) + Z = Z0 + + F = np.empty((3, n)) + ch = h * C + + dW_norm_old = None + dW = np.empty_like(W) + converged = False + rate = None + for k in range(NEWTON_MAXITER): + for i in range(3): + F[i] = fun(t + ch[i], y + Z[i]) + + if not np.all(np.isfinite(F)): + break + + f_real = F.T.dot(TI_REAL) - M_real * W[0] + f_complex = F.T.dot(TI_COMPLEX) - M_complex * (W[1] + 1j * W[2]) + + dW_real = solve_lu(LU_real, f_real) + dW_complex = solve_lu(LU_complex, f_complex) + + dW[0] = dW_real + dW[1] = dW_complex.real + dW[2] = dW_complex.imag + + dW_norm = norm(dW / scale) + if dW_norm_old is not None: + rate = dW_norm / dW_norm_old + + if (rate is not None and (rate >= 1 or + rate ** (NEWTON_MAXITER - k) / (1 - rate) * dW_norm > tol)): + break + + W += dW + Z = T.dot(W) + + if (dW_norm == 0 or + rate is not None and rate / (1 - rate) * dW_norm < tol): + converged = True + break + + dW_norm_old = dW_norm + + return converged, k + 1, Z, rate + + +def predict_factor(h_abs, h_abs_old, error_norm, error_norm_old): + """Predict by which factor to increase/decrease the step size. + + The algorithm is described in [1]_. + + Parameters + ---------- + h_abs, h_abs_old : float + Current and previous values of the step size, `h_abs_old` can be None + (see Notes). + error_norm, error_norm_old : float + Current and previous values of the error norm, `error_norm_old` can + be None (see Notes). + + Returns + ------- + factor : float + Predicted factor. + + Notes + ----- + If `h_abs_old` and `error_norm_old` are both not None then a two-step + algorithm is used, otherwise a one-step algorithm is used. + + References + ---------- + .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential + Equations II: Stiff and Differential-Algebraic Problems", Sec. IV.8. + """ + if error_norm_old is None or h_abs_old is None or error_norm == 0: + multiplier = 1 + else: + multiplier = h_abs / h_abs_old * (error_norm_old / error_norm) ** 0.25 + + with np.errstate(divide='ignore'): + factor = min(1, multiplier) * error_norm ** -0.25 + + return factor + + +class Radau(OdeSolver): + """Implicit Runge-Kutta method of Radau IIA family of order 5. + + The implementation follows [1]_. The error is controlled with a + third-order accurate embedded formula. A cubic polynomial which satisfies + the collocation conditions is used for the dense output. + + Parameters + ---------- + fun : callable + Right-hand side of the system: the time derivative of the state ``y`` + at time ``t``. The calling signature is ``fun(t, y)``, where ``t`` is a + scalar and ``y`` is an ndarray with ``len(y) = len(y0)``. ``fun`` must + return an array of the same shape as ``y``. See `vectorized` for more + information. + t0 : float + Initial time. + y0 : array_like, shape (n,) + Initial state. + t_bound : float + Boundary time - the integration won't continue beyond it. It also + determines the direction of the integration. + first_step : float or None, optional + Initial step size. Default is ``None`` which means that the algorithm + should choose. + max_step : float, optional + Maximum allowed step size. Default is np.inf, i.e., the step size is not + bounded and determined solely by the solver. + rtol, atol : float and array_like, optional + Relative and absolute tolerances. The solver keeps the local error + estimates less than ``atol + rtol * abs(y)``. HHere `rtol` controls a + relative accuracy (number of correct digits), while `atol` controls + absolute accuracy (number of correct decimal places). To achieve the + desired `rtol`, set `atol` to be smaller than the smallest value that + can be expected from ``rtol * abs(y)`` so that `rtol` dominates the + allowable error. If `atol` is larger than ``rtol * abs(y)`` the + number of correct digits is not guaranteed. Conversely, to achieve the + desired `atol` set `rtol` such that ``rtol * abs(y)`` is always smaller + than `atol`. If components of y have different scales, it might be + beneficial to set different `atol` values for different components by + passing array_like with shape (n,) for `atol`. Default values are + 1e-3 for `rtol` and 1e-6 for `atol`. + jac : {None, array_like, sparse_matrix, callable}, optional + Jacobian matrix of the right-hand side of the system with respect to + y, required by this method. The Jacobian matrix has shape (n, n) and + its element (i, j) is equal to ``d f_i / d y_j``. + There are three ways to define the Jacobian: + + * If array_like or sparse_matrix, the Jacobian is assumed to + be constant. + * If callable, the Jacobian is assumed to depend on both + t and y; it will be called as ``jac(t, y)`` as necessary. + For the 'Radau' and 'BDF' methods, the return value might be a + sparse matrix. + * If None (default), the Jacobian will be approximated by + finite differences. + + It is generally recommended to provide the Jacobian rather than + relying on a finite-difference approximation. + jac_sparsity : {None, array_like, sparse matrix}, optional + Defines a sparsity structure of the Jacobian matrix for a + finite-difference approximation. Its shape must be (n, n). This argument + is ignored if `jac` is not `None`. If the Jacobian has only few non-zero + elements in *each* row, providing the sparsity structure will greatly + speed up the computations [2]_. A zero entry means that a corresponding + element in the Jacobian is always zero. If None (default), the Jacobian + is assumed to be dense. + vectorized : bool, optional + Whether `fun` can be called in a vectorized fashion. Default is False. + + If ``vectorized`` is False, `fun` will always be called with ``y`` of + shape ``(n,)``, where ``n = len(y0)``. + + If ``vectorized`` is True, `fun` may be called with ``y`` of shape + ``(n, k)``, where ``k`` is an integer. In this case, `fun` must behave + such that ``fun(t, y)[:, i] == fun(t, y[:, i])`` (i.e. each column of + the returned array is the time derivative of the state corresponding + with a column of ``y``). + + Setting ``vectorized=True`` allows for faster finite difference + approximation of the Jacobian by this method, but may result in slower + execution overall in some circumstances (e.g. small ``len(y0)``). + + Attributes + ---------- + n : int + Number of equations. + status : string + Current status of the solver: 'running', 'finished' or 'failed'. + t_bound : float + Boundary time. + direction : float + Integration direction: +1 or -1. + t : float + Current time. + y : ndarray + Current state. + t_old : float + Previous time. None if no steps were made yet. + step_size : float + Size of the last successful step. None if no steps were made yet. + nfev : int + Number of evaluations of the right-hand side. + njev : int + Number of evaluations of the Jacobian. + nlu : int + Number of LU decompositions. + + References + ---------- + .. [1] E. Hairer, G. Wanner, "Solving Ordinary Differential Equations II: + Stiff and Differential-Algebraic Problems", Sec. IV.8. + .. [2] A. Curtis, M. J. D. Powell, and J. Reid, "On the estimation of + sparse Jacobian matrices", Journal of the Institute of Mathematics + and its Applications, 13, pp. 117-120, 1974. + """ + def __init__(self, fun, t0, y0, t_bound, max_step=np.inf, + rtol=1e-3, atol=1e-6, jac=None, jac_sparsity=None, + vectorized=False, first_step=None, **extraneous): + warn_extraneous(extraneous) + super().__init__(fun, t0, y0, t_bound, vectorized) + self.y_old = None + self.max_step = validate_max_step(max_step) + self.rtol, self.atol = validate_tol(rtol, atol, self.n) + self.f = self.fun(self.t, self.y) + # Select initial step assuming the same order which is used to control + # the error. + if first_step is None: + self.h_abs = select_initial_step( + self.fun, self.t, self.y, t_bound, max_step, self.f, self.direction, + 3, self.rtol, self.atol) + else: + self.h_abs = validate_first_step(first_step, t0, t_bound) + self.h_abs_old = None + self.error_norm_old = None + + self.newton_tol = max(10 * EPS / rtol, min(0.03, rtol ** 0.5)) + self.sol = None + + self.jac_factor = None + self.jac, self.J = self._validate_jac(jac, jac_sparsity) + if issparse(self.J): + def lu(A): + self.nlu += 1 + return splu(A) + + def solve_lu(LU, b): + return LU.solve(b) + + I = eye(self.n, format='csc') + else: + def lu(A): + self.nlu += 1 + return lu_factor(A, overwrite_a=True) + + def solve_lu(LU, b): + return lu_solve(LU, b, overwrite_b=True) + + I = np.identity(self.n) + + self.lu = lu + self.solve_lu = solve_lu + self.I = I + + self.current_jac = True + self.LU_real = None + self.LU_complex = None + self.Z = None + + def _validate_jac(self, jac, sparsity): + t0 = self.t + y0 = self.y + + if jac is None: + if sparsity is not None: + if issparse(sparsity): + sparsity = csc_matrix(sparsity) + groups = group_columns(sparsity) + sparsity = (sparsity, groups) + + def jac_wrapped(t, y, f): + self.njev += 1 + J, self.jac_factor = num_jac(self.fun_vectorized, t, y, f, + self.atol, self.jac_factor, + sparsity) + return J + J = jac_wrapped(t0, y0, self.f) + elif callable(jac): + J = jac(t0, y0) + self.njev = 1 + if issparse(J): + J = csc_matrix(J) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return csc_matrix(jac(t, y), dtype=float) + + else: + J = np.asarray(J, dtype=float) + + def jac_wrapped(t, y, _=None): + self.njev += 1 + return np.asarray(jac(t, y), dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + else: + if issparse(jac): + J = csc_matrix(jac) + else: + J = np.asarray(jac, dtype=float) + + if J.shape != (self.n, self.n): + raise ValueError("`jac` is expected to have shape {}, but " + "actually has {}." + .format((self.n, self.n), J.shape)) + jac_wrapped = None + + return jac_wrapped, J + + def _step_impl(self): + t = self.t + y = self.y + f = self.f + + max_step = self.max_step + atol = self.atol + rtol = self.rtol + + min_step = 10 * np.abs(np.nextafter(t, self.direction * np.inf) - t) + if self.h_abs > max_step: + h_abs = max_step + h_abs_old = None + error_norm_old = None + elif self.h_abs < min_step: + h_abs = min_step + h_abs_old = None + error_norm_old = None + else: + h_abs = self.h_abs + h_abs_old = self.h_abs_old + error_norm_old = self.error_norm_old + + J = self.J + LU_real = self.LU_real + LU_complex = self.LU_complex + + current_jac = self.current_jac + jac = self.jac + + rejected = False + step_accepted = False + message = None + while not step_accepted: + if h_abs < min_step: + return False, self.TOO_SMALL_STEP + + h = h_abs * self.direction + t_new = t + h + + if self.direction * (t_new - self.t_bound) > 0: + t_new = self.t_bound + + h = t_new - t + h_abs = np.abs(h) + + if self.sol is None: + Z0 = np.zeros((3, y.shape[0])) + else: + Z0 = self.sol(t + h * C).T - y + + scale = atol + np.abs(y) * rtol + + converged = False + while not converged: + if LU_real is None or LU_complex is None: + LU_real = self.lu(MU_REAL / h * self.I - J) + LU_complex = self.lu(MU_COMPLEX / h * self.I - J) + + converged, n_iter, Z, rate = solve_collocation_system( + self.fun, t, y, h, Z0, scale, self.newton_tol, + LU_real, LU_complex, self.solve_lu) + + if not converged: + if current_jac: + break + + J = self.jac(t, y, f) + current_jac = True + LU_real = None + LU_complex = None + + if not converged: + h_abs *= 0.5 + LU_real = None + LU_complex = None + continue + + y_new = y + Z[-1] + ZE = Z.T.dot(E) / h + error = self.solve_lu(LU_real, f + ZE) + scale = atol + np.maximum(np.abs(y), np.abs(y_new)) * rtol + error_norm = norm(error / scale) + safety = 0.9 * (2 * NEWTON_MAXITER + 1) / (2 * NEWTON_MAXITER + + n_iter) + + if rejected and error_norm > 1: + error = self.solve_lu(LU_real, self.fun(t, y + error) + ZE) + error_norm = norm(error / scale) + + if error_norm > 1: + factor = predict_factor(h_abs, h_abs_old, + error_norm, error_norm_old) + h_abs *= max(MIN_FACTOR, safety * factor) + + LU_real = None + LU_complex = None + rejected = True + else: + step_accepted = True + + recompute_jac = jac is not None and n_iter > 2 and rate > 1e-3 + + factor = predict_factor(h_abs, h_abs_old, error_norm, error_norm_old) + factor = min(MAX_FACTOR, safety * factor) + + if not recompute_jac and factor < 1.2: + factor = 1 + else: + LU_real = None + LU_complex = None + + f_new = self.fun(t_new, y_new) + if recompute_jac: + J = jac(t_new, y_new, f_new) + current_jac = True + elif jac is not None: + current_jac = False + + self.h_abs_old = self.h_abs + self.error_norm_old = error_norm + + self.h_abs = h_abs * factor + + self.y_old = y + + self.t = t_new + self.y = y_new + self.f = f_new + + self.Z = Z + + self.LU_real = LU_real + self.LU_complex = LU_complex + self.current_jac = current_jac + self.J = J + + self.t_old = t + self.sol = self._compute_dense_output() + + return step_accepted, message + + def _compute_dense_output(self): + Q = np.dot(self.Z.T, P) + return RadauDenseOutput(self.t_old, self.t, self.y_old, Q) + + def _dense_output_impl(self): + return self.sol + + +class RadauDenseOutput(DenseOutput): + def __init__(self, t_old, t, y_old, Q): + super().__init__(t_old, t) + self.h = t - t_old + self.Q = Q + self.order = Q.shape[1] - 1 + self.y_old = y_old + + def _call_impl(self, t): + x = (t - self.t_old) / self.h + if t.ndim == 0: + p = np.tile(x, self.order + 1) + p = np.cumprod(p) + else: + p = np.tile(x, (self.order + 1, 1)) + p = np.cumprod(p, axis=0) + # Here we don't multiply by h, not a mistake. + y = np.dot(self.Q, p) + if y.ndim == 2: + y += self.y_old[:, None] + else: + y += self.y_old + + return y diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_ode.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_ode.py new file mode 100644 index 0000000000000000000000000000000000000000..794a4dc6372164db4c4f1540649b20fd945cd81c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_ode.py @@ -0,0 +1,1376 @@ +# Authors: Pearu Peterson, Pauli Virtanen, John Travers +""" +First-order ODE integrators. + +User-friendly interface to various numerical integrators for solving a +system of first order ODEs with prescribed initial conditions:: + + d y(t)[i] + --------- = f(t,y(t))[i], + d t + + y(t=0)[i] = y0[i], + +where:: + + i = 0, ..., len(y0) - 1 + +class ode +--------- + +A generic interface class to numeric integrators. It has the following +methods:: + + integrator = ode(f, jac=None) + integrator = integrator.set_integrator(name, **params) + integrator = integrator.set_initial_value(y0, t0=0.0) + integrator = integrator.set_f_params(*args) + integrator = integrator.set_jac_params(*args) + y1 = integrator.integrate(t1, step=False, relax=False) + flag = integrator.successful() + +class complex_ode +----------------- + +This class has the same generic interface as ode, except it can handle complex +f, y and Jacobians by transparently translating them into the equivalent +real-valued system. It supports the real-valued solvers (i.e., not zvode) and is +an alternative to ode with the zvode solver, sometimes performing better. +""" +# XXX: Integrators must have: +# =========================== +# cvode - C version of vode and vodpk with many improvements. +# Get it from http://www.netlib.org/ode/cvode.tar.gz. +# To wrap cvode to Python, one must write the extension module by +# hand. Its interface is too much 'advanced C' that using f2py +# would be too complicated (or impossible). +# +# How to define a new integrator: +# =============================== +# +# class myodeint(IntegratorBase): +# +# runner = or None +# +# def __init__(self,...): # required +# +# +# def reset(self,n,has_jac): # optional +# # n - the size of the problem (number of equations) +# # has_jac - whether user has supplied its own routine for Jacobian +# +# +# def run(self,f,jac,y0,t0,t1,f_params,jac_params): # required +# # this method is called to integrate from t=t0 to t=t1 +# # with initial condition y0. f and jac are user-supplied functions +# # that define the problem. f_params,jac_params are additional +# # arguments +# # to these functions. +# +# if : +# self.success = 0 +# return t1,y1 +# +# # In addition, one can define step() and run_relax() methods (they +# # take the same arguments as run()) if the integrator can support +# # these features (see IntegratorBase doc strings). +# +# if myodeint.runner: +# IntegratorBase.integrator_classes.append(myodeint) + +__all__ = ['ode', 'complex_ode'] + +import re +import warnings + +from numpy import asarray, array, zeros, isscalar, real, imag, vstack + +from . import _vode +from . import _dop +from . import _lsoda + + +_dop_int_dtype = _dop.types.intvar.dtype +_vode_int_dtype = _vode.types.intvar.dtype +_lsoda_int_dtype = _lsoda.types.intvar.dtype + + +# ------------------------------------------------------------------------------ +# User interface +# ------------------------------------------------------------------------------ + + +class ode: + """ + A generic interface class to numeric integrators. + + Solve an equation system :math:`y'(t) = f(t,y)` with (optional) ``jac = df/dy``. + + *Note*: The first two arguments of ``f(t, y, ...)`` are in the + opposite order of the arguments in the system definition function used + by `scipy.integrate.odeint`. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Right-hand side of the differential equation. t is a scalar, + ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + `f` should return a scalar, array or list (not a tuple). + jac : callable ``jac(t, y, *jac_args)``, optional + Jacobian of the right-hand side, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_jac_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + See also + -------- + odeint : an integrator with a simpler interface based on lsoda from ODEPACK + quad : for finding the area under a curve + + Notes + ----- + Available integrators are listed below. They can be selected using + the `set_integrator` method. + + "vode" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/vode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "vode" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. The + dimension of the matrix must be (lband+uband+1, len(y)). + - method: 'adams' or 'bdf' + Which solver to use, Adams (non-stiff) or BDF (stiff) + - with_jacobian : bool + This option is only considered when the user has not supplied a + Jacobian function and has not indicated (by setting either band) + that the Jacobian is banded. In this case, `with_jacobian` specifies + whether the iteration method of the ODE solver's correction step is + chord iteration with an internally generated full Jacobian or + functional iteration with no Jacobian. + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - order : int + Maximum order used by the integrator, + order <= 12 for Adams, <= 5 for BDF. + + "zvode" + + Complex-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + implicit Adams method (for non-stiff problems) and a method based on + backward differentiation formulas (BDF) (for stiff problems). + + Source: http://www.netlib.org/ode/zvode.f + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "zvode" integrator at the same time. + + This integrator accepts the same parameters in `set_integrator` + as the "vode" solver. + + .. note:: + + When using ZVODE for a stiff system, it should only be used for + the case in which the function f is analytic, that is, when each f(i) + is an analytic function of each y(j). Analyticity means that the + partial derivative df(i)/dy(j) is a unique complex number, and this + fact is critical in the way ZVODE solves the dense or banded linear + systems that arise in the stiff case. For a complex stiff ODE system + in which f is not analytic, ZVODE is likely to have convergence + failures, and for this problem one should instead use DVODE on the + equivalent real system (in the real and imaginary parts of y). + + "lsoda" + + Real-valued Variable-coefficient Ordinary Differential Equation + solver, with fixed-leading-coefficient implementation. It provides + automatic method switching between implicit Adams method (for non-stiff + problems) and a method based on backward differentiation formulas (BDF) + (for stiff problems). + + Source: http://www.netlib.org/odepack + + .. warning:: + + This integrator is not re-entrant. You cannot have two `ode` + instances using the "lsoda" integrator at the same time. + + This integrator accepts the following parameters in `set_integrator` + method of the `ode` class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - lband : None or int + - uband : None or int + Jacobian band width, jac[i,j] != 0 for i-lband <= j <= i+uband. + Setting these requires your jac routine to return the jacobian + in packed format, jac_packed[i-j+uband, j] = jac[i,j]. + - with_jacobian : bool + *Not used.* + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - min_step : float + - max_step : float + Limits for the step sizes used by the integrator. + - max_order_ns : int + Maximum order used in the nonstiff case (default 12). + - max_order_s : int + Maximum order used in the stiff case (default 5). + - max_hnil : int + Maximum number of messages reporting too small step size (t + h = t) + (default 0) + - ixpr : int + Whether to generate extra printing at method switches (default False). + + "dopri5" + + This is an explicit runge-kutta method of order (4)5 due to Dormand & + Prince (with stepsize control and dense output). + + Authors: + + E. Hairer and G. Wanner + Universite de Geneve, Dept. de Mathematiques + CH-1211 Geneve 24, Switzerland + e-mail: ernst.hairer@math.unige.ch, gerhard.wanner@math.unige.ch + + This code is described in [HNW93]_. + + This integrator accepts the following parameters in set_integrator() + method of the ode class: + + - atol : float or sequence + absolute tolerance for solution + - rtol : float or sequence + relative tolerance for solution + - nsteps : int + Maximum number of (internally defined) steps allowed during one + call to the solver. + - first_step : float + - max_step : float + - safety : float + Safety factor on new step selection (default 0.9) + - ifactor : float + - dfactor : float + Maximum factor to increase/decrease step size by in one step + - beta : float + Beta parameter for stabilised step size control. + - verbosity : int + Switch for printing messages (< 0 for no messages). + + "dop853" + + This is an explicit runge-kutta method of order 8(5,3) due to Dormand + & Prince (with stepsize control and dense output). + + Options and references the same as "dopri5". + + Examples + -------- + + A problem to integrate and the corresponding jacobian: + + >>> from scipy.integrate import ode + >>> + >>> y0, t0 = [1.0j, 2.0], 0 + >>> + >>> def f(t, y, arg1): + ... return [1j*arg1*y[0] + y[1], -arg1*y[1]**2] + >>> def jac(t, y, arg1): + ... return [[1j*arg1, 1], [0, -arg1*2*y[1]]] + + The integration: + + >>> r = ode(f, jac).set_integrator('zvode', method='bdf') + >>> r.set_initial_value(y0, t0).set_f_params(2.0).set_jac_params(2.0) + >>> t1 = 10 + >>> dt = 1 + >>> while r.successful() and r.t < t1: + ... print(r.t+dt, r.integrate(r.t+dt)) + 1 [-0.71038232+0.23749653j 0.40000271+0.j ] + 2.0 [0.19098503-0.52359246j 0.22222356+0.j ] + 3.0 [0.47153208+0.52701229j 0.15384681+0.j ] + 4.0 [-0.61905937+0.30726255j 0.11764744+0.j ] + 5.0 [0.02340997-0.61418799j 0.09523835+0.j ] + 6.0 [0.58643071+0.339819j 0.08000018+0.j ] + 7.0 [-0.52070105+0.44525141j 0.06896565+0.j ] + 8.0 [-0.15986733-0.61234476j 0.06060616+0.j ] + 9.0 [0.64850462+0.15048982j 0.05405414+0.j ] + 10.0 [-0.38404699+0.56382299j 0.04878055+0.j ] + + References + ---------- + .. [HNW93] E. Hairer, S.P. Norsett and G. Wanner, Solving Ordinary + Differential Equations i. Nonstiff Problems. 2nd edition. + Springer Series in Computational Mathematics, + Springer-Verlag (1993) + + """ + + def __init__(self, f, jac=None): + self.stiff = 0 + self.f = f + self.jac = jac + self.f_params = () + self.jac_params = () + self._y = [] + + @property + def y(self): + return self._y + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + if isscalar(y): + y = [y] + n_prev = len(self._y) + if not n_prev: + self.set_integrator('') # find first available integrator + self._y = asarray(y, self._integrator.scalar) + self.t = t + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator. + **integrator_params + Additional parameters for the integrator. + """ + integrator = find_integrator(name) + if integrator is None: + # FIXME: this really should be raise an exception. Will that break + # any code? + message = f'No integrator name match with {name!r} or is not available.' + warnings.warn(message, stacklevel=2) + else: + self._integrator = integrator(**integrator_params) + if not len(self._y): + self.t = 0.0 + self._y = array([0.0], self._integrator.scalar) + self._integrator.reset(len(self._y), self.jac is not None) + return self + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + if step and self._integrator.supports_step: + mth = self._integrator.step + elif relax and self._integrator.supports_run_relax: + mth = self._integrator.run_relax + else: + mth = self._integrator.run + + try: + self._y, self.t = mth(self.f, self.jac or (lambda: None), + self._y, self.t, t, + self.f_params, self.jac_params) + except SystemError as e: + # f2py issue with tuple returns, see ticket 1187. + raise ValueError( + 'Function to integrate must not return a tuple.' + ) from e + + return self._y + + def successful(self): + """Check if integration was successful.""" + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.success == 1 + + def get_return_code(self): + """Extracts the return code for the integration to enable better control + if the integration fails. + + In general, a return code > 0 implies success, while a return code < 0 + implies failure. + + Notes + ----- + This section describes possible return codes and their meaning, for available + integrators that can be selected by `set_integrator` method. + + "vode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "zvode" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call. (Perhaps wrong MF.) + -2 Excess accuracy requested. (Tolerances too small.) + -3 Illegal input detected. (See printed message.) + -4 Repeated error test failures. (Check all input.) + -5 Repeated convergence failures. (Perhaps bad Jacobian + supplied or wrong choice of MF or tolerances.) + -6 Error weight became zero during problem. (Solution + component i vanished, and ATOL or ATOL(i) = 0.) + =========== ======= + + "dopri5" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "dop853" + + =========== ======= + Return Code Message + =========== ======= + 1 Integration successful. + 2 Integration successful (interrupted by solout). + -1 Input is not consistent. + -2 Larger nsteps is needed. + -3 Step size becomes too small. + -4 Problem is probably stiff (interrupted). + =========== ======= + + "lsoda" + + =========== ======= + Return Code Message + =========== ======= + 2 Integration successful. + -1 Excess work done on this call (perhaps wrong Dfun type). + -2 Excess accuracy requested (tolerances too small). + -3 Illegal input detected (internal error). + -4 Repeated error test failures (internal error). + -5 Repeated convergence failures (perhaps bad Jacobian or tolerances). + -6 Error weight became zero during problem. + -7 Internal workspace insufficient to finish (internal error). + =========== ======= + """ + try: + self._integrator + except AttributeError: + self.set_integrator('') + return self._integrator.istate + + def set_f_params(self, *args): + """Set extra parameters for user-supplied function f.""" + self.f_params = args + return self + + def set_jac_params(self, *args): + """Set extra parameters for user-supplied function jac.""" + self.jac_params = args + return self + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current solution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout) + if self._y is not None: + self._integrator.reset(len(self._y), self.jac is not None) + else: + raise ValueError("selected integrator does not support solout," + " choose another one") + + +def _transform_banded_jac(bjac): + """ + Convert a real matrix of the form (for example) + + [0 0 A B] [0 0 0 B] + [0 0 C D] [0 0 A D] + [E F G H] to [0 F C H] + [I J K L] [E J G L] + [I 0 K 0] + + That is, every other column is shifted up one. + """ + # Shift every other column. + newjac = zeros((bjac.shape[0] + 1, bjac.shape[1])) + newjac[1:, ::2] = bjac[:, ::2] + newjac[:-1, 1::2] = bjac[:, 1::2] + return newjac + + +class complex_ode(ode): + """ + A wrapper of ode for complex systems. + + This functions similarly as `ode`, but re-maps a complex-valued + equation system to a real-valued one before using the integrators. + + Parameters + ---------- + f : callable ``f(t, y, *f_args)`` + Rhs of the equation. t is a scalar, ``y.shape == (n,)``. + ``f_args`` is set by calling ``set_f_params(*args)``. + jac : callable ``jac(t, y, *jac_args)`` + Jacobian of the rhs, ``jac[i,j] = d f[i] / d y[j]``. + ``jac_args`` is set by calling ``set_f_params(*args)``. + + Attributes + ---------- + t : float + Current time. + y : ndarray + Current variable values. + + Examples + -------- + For usage examples, see `ode`. + + """ + + def __init__(self, f, jac=None): + self.cf = f + self.cjac = jac + if jac is None: + ode.__init__(self, self._wrap, None) + else: + ode.__init__(self, self._wrap, self._wrap_jac) + + def _wrap(self, t, y, *f_args): + f = self.cf(*((t, y[::2] + 1j * y[1::2]) + f_args)) + # self.tmp is a real-valued array containing the interleaved + # real and imaginary parts of f. + self.tmp[::2] = real(f) + self.tmp[1::2] = imag(f) + return self.tmp + + def _wrap_jac(self, t, y, *jac_args): + # jac is the complex Jacobian computed by the user-defined function. + jac = self.cjac(*((t, y[::2] + 1j * y[1::2]) + jac_args)) + + # jac_tmp is the real version of the complex Jacobian. Each complex + # entry in jac, say 2+3j, becomes a 2x2 block of the form + # [2 -3] + # [3 2] + jac_tmp = zeros((2 * jac.shape[0], 2 * jac.shape[1])) + jac_tmp[1::2, 1::2] = jac_tmp[::2, ::2] = real(jac) + jac_tmp[1::2, ::2] = imag(jac) + jac_tmp[::2, 1::2] = -jac_tmp[1::2, ::2] + + ml = getattr(self._integrator, 'ml', None) + mu = getattr(self._integrator, 'mu', None) + if ml is not None or mu is not None: + # Jacobian is banded. The user's Jacobian function has computed + # the complex Jacobian in packed format. The corresponding + # real-valued version has every other column shifted up. + jac_tmp = _transform_banded_jac(jac_tmp) + + return jac_tmp + + @property + def y(self): + return self._y[::2] + 1j * self._y[1::2] + + def set_integrator(self, name, **integrator_params): + """ + Set integrator by name. + + Parameters + ---------- + name : str + Name of the integrator + **integrator_params + Additional parameters for the integrator. + """ + if name == 'zvode': + raise ValueError("zvode must be used with ode, not complex_ode") + + lband = integrator_params.get('lband') + uband = integrator_params.get('uband') + if lband is not None or uband is not None: + # The Jacobian is banded. Override the user-supplied bandwidths + # (which are for the complex Jacobian) with the bandwidths of + # the corresponding real-valued Jacobian wrapper of the complex + # Jacobian. + integrator_params['lband'] = 2 * (lband or 0) + 1 + integrator_params['uband'] = 2 * (uband or 0) + 1 + + return ode.set_integrator(self, name, **integrator_params) + + def set_initial_value(self, y, t=0.0): + """Set initial conditions y(t) = y.""" + y = asarray(y) + self.tmp = zeros(y.size * 2, 'float') + self.tmp[::2] = real(y) + self.tmp[1::2] = imag(y) + return ode.set_initial_value(self, self.tmp, t) + + def integrate(self, t, step=False, relax=False): + """Find y=y(t), set y as an initial condition, and return y. + + Parameters + ---------- + t : float + The endpoint of the integration step. + step : bool + If True, and if the integrator supports the step method, + then perform a single integration step and return. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + relax : bool + If True and if the integrator supports the run_relax method, + then integrate until t_1 >= t and return. ``relax`` is not + referenced if ``step=True``. + This parameter is provided in order to expose internals of + the implementation, and should not be changed from its default + value in most cases. + + Returns + ------- + y : float + The integrated value at t + """ + y = ode.integrate(self, t, step, relax) + return y[::2] + 1j * y[1::2] + + def set_solout(self, solout): + """ + Set callable to be called at every successful integration step. + + Parameters + ---------- + solout : callable + ``solout(t, y)`` is called at each internal integrator step, + t is a scalar providing the current independent position + y is the current solution ``y.shape == (n,)`` + solout should return -1 to stop integration + otherwise it should return None or 0 + + """ + if self._integrator.supports_solout: + self._integrator.set_solout(solout, complex=True) + else: + raise TypeError("selected integrator does not support solouta, " + "choose another one") + + +# ------------------------------------------------------------------------------ +# ODE integrators +# ------------------------------------------------------------------------------ + +def find_integrator(name): + for cl in IntegratorBase.integrator_classes: + if re.match(name, cl.__name__, re.I): + return cl + return None + + +class IntegratorConcurrencyError(RuntimeError): + """ + Failure due to concurrent usage of an integrator that can be used + only for a single problem at a time. + + """ + + def __init__(self, name): + msg = ("Integrator `%s` can be used to solve only a single problem " + "at a time. If you want to integrate multiple problems, " + "consider using a different integrator " + "(see `ode.set_integrator`)") % name + RuntimeError.__init__(self, msg) + + +class IntegratorBase: + runner = None # runner is None => integrator is not available + success = None # success==1 if integrator was called successfully + istate = None # istate > 0 means success, istate < 0 means failure + supports_run_relax = None + supports_step = None + supports_solout = False + integrator_classes = [] + scalar = float + + def acquire_new_handle(self): + # Some of the integrators have internal state (ancient + # Fortran...), and so only one instance can use them at a time. + # We keep track of this, and fail when concurrent usage is tried. + self.__class__.active_global_handle += 1 + self.handle = self.__class__.active_global_handle + + def check_handle(self): + if self.handle is not self.__class__.active_global_handle: + raise IntegratorConcurrencyError(self.__class__.__name__) + + def reset(self, n, has_jac): + """Prepare integrator for call: allocate memory, set flags, etc. + n - number of equations. + has_jac - if user has supplied function for evaluating Jacobian. + """ + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t=t1 using y0 as an initial condition. + Return 2-tuple (y1,t1) where y1 is the result and t=t1 + defines the stoppage coordinate of the result. + """ + raise NotImplementedError('all integrators must define ' + 'run(f, jac, t0, t1, y0, f_params, jac_params)') + + def step(self, f, jac, y0, t0, t1, f_params, jac_params): + """Make one integration step and return (y1,t1).""" + raise NotImplementedError('%s does not support step() method' % + self.__class__.__name__) + + def run_relax(self, f, jac, y0, t0, t1, f_params, jac_params): + """Integrate from t=t0 to t>=t1 and return (y1,t).""" + raise NotImplementedError('%s does not support run_relax() method' % + self.__class__.__name__) + + # XXX: __str__ method for getting visual state of the integrator + + +def _vode_banded_jac_wrapper(jacfunc, ml, jac_params): + """ + Wrap a banded Jacobian function with a function that pads + the Jacobian with `ml` rows of zeros. + """ + + def jac_wrapper(t, y): + jac = asarray(jacfunc(t, y, *jac_params)) + padded_jac = vstack((jac, zeros((ml, jac.shape[1])))) + return padded_jac + + return jac_wrapper + + +class vode(IntegratorBase): + runner = getattr(_vode, 'dvode', None) + + messages = {-1: 'Excess work done on this call. (Perhaps wrong MF.)', + -2: 'Excess accuracy requested. (Tolerances too small.)', + -3: 'Illegal input detected. (See printed message.)', + -4: 'Repeated error test failures. (Check all input.)', + -5: 'Repeated convergence failures. (Perhaps bad' + ' Jacobian supplied or wrong choice of MF or tolerances.)', + -6: 'Error weight became zero during problem. (Solution' + ' component i vanished, and ATOL or ATOL(i) = 0.)' + } + supports_run_relax = 1 + supports_step = 1 + active_global_handle = 0 + + def __init__(self, + method='adams', + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + order=12, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ): + + if re.match(method, r'adams', re.I): + self.meth = 1 + elif re.match(method, r'bdf', re.I): + self.meth = 2 + else: + raise ValueError('Unknown integration method %s' % method) + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.order = order + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.success = 1 + + self.initialized = False + + def _determine_mf_and_set_bands(self, has_jac): + """ + Determine the `MF` parameter (Method Flag) for the Fortran subroutine `dvode`. + + In the Fortran code, the legal values of `MF` are: + 10, 11, 12, 13, 14, 15, 20, 21, 22, 23, 24, 25, + -11, -12, -14, -15, -21, -22, -24, -25 + but this Python wrapper does not use negative values. + + Returns + + mf = 10*self.meth + miter + + self.meth is the linear multistep method: + self.meth == 1: method="adams" + self.meth == 2: method="bdf" + + miter is the correction iteration method: + miter == 0: Functional iteration; no Jacobian involved. + miter == 1: Chord iteration with user-supplied full Jacobian. + miter == 2: Chord iteration with internally computed full Jacobian. + miter == 3: Chord iteration with internally computed diagonal Jacobian. + miter == 4: Chord iteration with user-supplied banded Jacobian. + miter == 5: Chord iteration with internally computed banded Jacobian. + + Side effects: If either self.mu or self.ml is not None and the other is None, + then the one that is None is set to 0. + """ + + jac_is_banded = self.mu is not None or self.ml is not None + if jac_is_banded: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + + # has_jac is True if the user provided a Jacobian function. + if has_jac: + if jac_is_banded: + miter = 4 + else: + miter = 1 + else: + if jac_is_banded: + if self.ml == self.mu == 0: + miter = 3 # Chord iteration with internal diagonal Jacobian. + else: + miter = 5 # Chord iteration with internal banded Jacobian. + else: + # self.with_jacobian is set by the user in + # the call to ode.set_integrator. + if self.with_jacobian: + miter = 2 # Chord iteration with internal full Jacobian. + else: + miter = 0 # Functional iteration; no Jacobian involved. + + mf = 10 * self.meth + miter + return mf + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf == 10: + lrw = 20 + 16 * n + elif mf in [11, 12]: + lrw = 22 + 16 * n + 2 * n * n + elif mf == 13: + lrw = 22 + 17 * n + elif mf in [14, 15]: + lrw = 22 + 18 * n + (3 * self.ml + 2 * self.mu) * n + elif mf == 20: + lrw = 20 + 9 * n + elif mf in [21, 22]: + lrw = 22 + 9 * n + 2 * n * n + elif mf == 23: + lrw = 22 + 10 * n + elif mf in [24, 25]: + lrw = 22 + 11 * n + (3 * self.ml + 2 * self.mu) * n + else: + raise ValueError('Unexpected mf=%s' % mf) + + if mf % 10 in [0, 3]: + liw = 30 + else: + liw = 30 + n + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), _vode_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + + if self.ml is not None and self.ml > 0: + # Banded Jacobian. Wrap the user-provided function with one + # that pads the Jacobian array with the extra `self.ml` rows + # required by the f2py-generated wrapper. + jac = _vode_banded_jac_wrapper(jac, self.ml, jac_params) + + args = ((f, jac, y0, t0, t1) + tuple(self.call_args) + + (f_params, jac_params)) + y1, t, istate = self.runner(*args) + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg)), + stacklevel=2) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if vode.runner is not None: + IntegratorBase.integrator_classes.append(vode) + + +class zvode(vode): + runner = getattr(_vode, 'zvode', None) + + supports_run_relax = 1 + supports_step = 1 + scalar = complex + active_global_handle = 0 + + def reset(self, n, has_jac): + mf = self._determine_mf_and_set_bands(has_jac) + + if mf in (10,): + lzw = 15 * n + elif mf in (11, 12): + lzw = 15 * n + 2 * n ** 2 + elif mf in (-11, -12): + lzw = 15 * n + n ** 2 + elif mf in (13,): + lzw = 16 * n + elif mf in (14, 15): + lzw = 17 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-14, -15): + lzw = 16 * n + (2 * self.ml + self.mu) * n + elif mf in (20,): + lzw = 8 * n + elif mf in (21, 22): + lzw = 8 * n + 2 * n ** 2 + elif mf in (-21, -22): + lzw = 8 * n + n ** 2 + elif mf in (23,): + lzw = 9 * n + elif mf in (24, 25): + lzw = 10 * n + (3 * self.ml + 2 * self.mu) * n + elif mf in (-24, -25): + lzw = 9 * n + (2 * self.ml + self.mu) * n + + lrw = 20 + n + + if mf % 10 in (0, 3): + liw = 30 + else: + liw = 30 + n + + zwork = zeros((lzw,), complex) + self.zwork = zwork + + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + + iwork = zeros((liw,), _vode_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.order + iwork[5] = self.nsteps + iwork[6] = 2 # mxhnil + self.iwork = iwork + + self.call_args = [self.rtol, self.atol, 1, 1, + self.zwork, self.rwork, self.iwork, mf] + self.success = 1 + self.initialized = False + + +if zvode.runner is not None: + IntegratorBase.integrator_classes.append(zvode) + + +class dopri5(IntegratorBase): + runner = getattr(_dop, 'dopri5', None) + name = 'dopri5' + supports_solout = True + + messages = {1: 'computation successful', + 2: 'computation successful (interrupted by solout)', + -1: 'input is not consistent', + -2: 'larger nsteps is needed', + -3: 'step size becomes too small', + -4: 'problem is probably stiff (interrupted)', + } + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=10.0, + dfactor=0.2, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + self.rtol = rtol + self.atol = atol + self.nsteps = nsteps + self.max_step = max_step + self.first_step = first_step + self.safety = safety + self.ifactor = ifactor + self.dfactor = dfactor + self.beta = beta + self.verbosity = verbosity + self.success = 1 + self.set_solout(None) + + def set_solout(self, solout, complex=False): + self.solout = solout + self.solout_cmplx = complex + if solout is None: + self.iout = 0 + else: + self.iout = 1 + + def reset(self, n, has_jac): + work = zeros((8 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), _dop_int_dtype) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + x, y, iwork, istate = self.runner(*((f, t0, y0, t1) + + tuple(self.call_args) + (f_params,))) + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg)), + stacklevel=2) + self.success = 0 + return y, x + + def _solout(self, nr, xold, x, y, nd, icomp, con): + if self.solout is not None: + if self.solout_cmplx: + y = y[::2] + 1j * y[1::2] + return self.solout(x, y) + else: + return 1 + + +if dopri5.runner is not None: + IntegratorBase.integrator_classes.append(dopri5) + + +class dop853(dopri5): + runner = getattr(_dop, 'dop853', None) + name = 'dop853' + + def __init__(self, + rtol=1e-6, atol=1e-12, + nsteps=500, + max_step=0.0, + first_step=0.0, # determined by solver + safety=0.9, + ifactor=6.0, + dfactor=0.3, + beta=0.0, + method=None, + verbosity=-1, # no messages if negative + ): + super().__init__(rtol, atol, nsteps, max_step, first_step, safety, + ifactor, dfactor, beta, method, verbosity) + + def reset(self, n, has_jac): + work = zeros((11 * n + 21,), float) + work[1] = self.safety + work[2] = self.dfactor + work[3] = self.ifactor + work[4] = self.beta + work[5] = self.max_step + work[6] = self.first_step + self.work = work + iwork = zeros((21,), _dop_int_dtype) + iwork[0] = self.nsteps + iwork[2] = self.verbosity + self.iwork = iwork + self.call_args = [self.rtol, self.atol, self._solout, + self.iout, self.work, self.iwork] + self.success = 1 + + +if dop853.runner is not None: + IntegratorBase.integrator_classes.append(dop853) + + +class lsoda(IntegratorBase): + runner = getattr(_lsoda, 'lsoda', None) + active_global_handle = 0 + + messages = { + 2: "Integration successful.", + -1: "Excess work done on this call (perhaps wrong Dfun type).", + -2: "Excess accuracy requested (tolerances too small).", + -3: "Illegal input detected (internal error).", + -4: "Repeated error test failures (internal error).", + -5: "Repeated convergence failures (perhaps bad Jacobian or tolerances).", + -6: "Error weight became zero during problem.", + -7: "Internal workspace insufficient to finish (internal error)." + } + + def __init__(self, + with_jacobian=False, + rtol=1e-6, atol=1e-12, + lband=None, uband=None, + nsteps=500, + max_step=0.0, # corresponds to infinite + min_step=0.0, + first_step=0.0, # determined by solver + ixpr=0, + max_hnil=0, + max_order_ns=12, + max_order_s=5, + method=None + ): + + self.with_jacobian = with_jacobian + self.rtol = rtol + self.atol = atol + self.mu = uband + self.ml = lband + + self.max_order_ns = max_order_ns + self.max_order_s = max_order_s + self.nsteps = nsteps + self.max_step = max_step + self.min_step = min_step + self.first_step = first_step + self.ixpr = ixpr + self.max_hnil = max_hnil + self.success = 1 + + self.initialized = False + + def reset(self, n, has_jac): + # Calculate parameters for Fortran subroutine dvode. + if has_jac: + if self.mu is None and self.ml is None: + jt = 1 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 4 + else: + if self.mu is None and self.ml is None: + jt = 2 + else: + if self.mu is None: + self.mu = 0 + if self.ml is None: + self.ml = 0 + jt = 5 + lrn = 20 + (self.max_order_ns + 4) * n + if jt in [1, 2]: + lrs = 22 + (self.max_order_s + 4) * n + n * n + elif jt in [4, 5]: + lrs = 22 + (self.max_order_s + 5 + 2 * self.ml + self.mu) * n + else: + raise ValueError('Unexpected jt=%s' % jt) + lrw = max(lrn, lrs) + liw = 20 + n + rwork = zeros((lrw,), float) + rwork[4] = self.first_step + rwork[5] = self.max_step + rwork[6] = self.min_step + self.rwork = rwork + iwork = zeros((liw,), _lsoda_int_dtype) + if self.ml is not None: + iwork[0] = self.ml + if self.mu is not None: + iwork[1] = self.mu + iwork[4] = self.ixpr + iwork[5] = self.nsteps + iwork[6] = self.max_hnil + iwork[7] = self.max_order_ns + iwork[8] = self.max_order_s + self.iwork = iwork + self.call_args = [self.rtol, self.atol, 1, 1, + self.rwork, self.iwork, jt] + self.success = 1 + self.initialized = False + + def run(self, f, jac, y0, t0, t1, f_params, jac_params): + if self.initialized: + self.check_handle() + else: + self.initialized = True + self.acquire_new_handle() + args = [f, y0, t0, t1] + self.call_args[:-1] + \ + [jac, self.call_args[-1], f_params, 0, jac_params] + y1, t, istate = self.runner(*args) + self.istate = istate + if istate < 0: + unexpected_istate_msg = f'Unexpected istate={istate:d}' + warnings.warn('{:s}: {:s}'.format(self.__class__.__name__, + self.messages.get(istate, unexpected_istate_msg)), + stacklevel=2) + self.success = 0 + else: + self.call_args[3] = 2 # upgrade istate from 1 to 2 + self.istate = 2 + return y1, t + + def step(self, *args): + itask = self.call_args[2] + self.call_args[2] = 2 + r = self.run(*args) + self.call_args[2] = itask + return r + + def run_relax(self, *args): + itask = self.call_args[2] + self.call_args[2] = 3 + r = self.run(*args) + self.call_args[2] = itask + return r + + +if lsoda.runner: + IntegratorBase.integrator_classes.append(lsoda) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py new file mode 100644 index 0000000000000000000000000000000000000000..af7ed047c0c523608af4c36f1ae9ef71e9e0bfd3 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_quadpack_py.py @@ -0,0 +1,1279 @@ +# Author: Travis Oliphant 2001 +# Author: Nathan Woods 2013 (nquad &c) +import sys +import warnings +from functools import partial + +from . import _quadpack +import numpy as np + +__all__ = ["quad", "dblquad", "tplquad", "nquad", "IntegrationWarning"] + + +class IntegrationWarning(UserWarning): + """ + Warning on issues during integration. + """ + pass + + +def quad(func, a, b, args=(), full_output=0, epsabs=1.49e-8, epsrel=1.49e-8, + limit=50, points=None, weight=None, wvar=None, wopts=None, maxp1=50, + limlst=50, complex_func=False): + """ + Compute a definite integral. + + Integrate func from `a` to `b` (possibly infinite interval) using a + technique from the Fortran library QUADPACK. + + Parameters + ---------- + func : {function, scipy.LowLevelCallable} + A Python function or method to integrate. If `func` takes many + arguments, it is integrated along the axis corresponding to the + first argument. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(double x) + double func(double x, void *user_data) + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + The ``user_data`` is the data contained in the `scipy.LowLevelCallable`. + In the call forms with ``xx``, ``n`` is the length of the ``xx`` + array which contains ``xx[0] == x`` and the rest of the items are + numbers contained in the ``args`` argument of quad. + + In addition, certain ctypes call signatures are supported for + backward compatibility, but those should not be used in new code. + a : float + Lower limit of integration (use -numpy.inf for -infinity). + b : float + Upper limit of integration (use numpy.inf for +infinity). + args : tuple, optional + Extra arguments to pass to `func`. + full_output : int, optional + Non-zero to return a dictionary of integration information. + If non-zero, warning messages are also suppressed and the + message is appended to the output tuple. + complex_func : bool, optional + Indicate if the function's (`func`) return type is real + (``complex_func=False``: default) or complex (``complex_func=True``). + In both cases, the function's argument is real. + If full_output is also non-zero, the `infodict`, `message`, and + `explain` for the real and complex components are returned in + a dictionary with keys "real output" and "imag output". + + Returns + ------- + y : float + The integral of func from `a` to `b`. + abserr : float + An estimate of the absolute error in the result. + infodict : dict + A dictionary containing additional information. + message + A convergence message. + explain + Appended only with 'cos' or 'sin' weighting and infinite + integration limits, it contains an explanation of the codes in + infodict['ierlst'] + + Other Parameters + ---------------- + epsabs : float or int, optional + Absolute error tolerance. Default is 1.49e-8. `quad` tries to obtain + an accuracy of ``abs(i-result) <= max(epsabs, epsrel*abs(i))`` + where ``i`` = integral of `func` from `a` to `b`, and ``result`` is the + numerical approximation. See `epsrel` below. + epsrel : float or int, optional + Relative error tolerance. Default is 1.49e-8. + If ``epsabs <= 0``, `epsrel` must be greater than both 5e-29 + and ``50 * (machine epsilon)``. See `epsabs` above. + limit : float or int, optional + An upper bound on the number of subintervals used in the adaptive + algorithm. + points : (sequence of floats,ints), optional + A sequence of break points in the bounded integration interval + where local difficulties of the integrand may occur (e.g., + singularities, discontinuities). The sequence does not have + to be sorted. Note that this option cannot be used in conjunction + with ``weight``. + weight : float or int, optional + String indicating weighting function. Full explanation for this + and the remaining arguments can be found below. + wvar : optional + Variables for use with weighting functions. + wopts : optional + Optional input for reusing Chebyshev moments. + maxp1 : float or int, optional + An upper bound on the number of Chebyshev moments. + limlst : int, optional + Upper bound on the number of cycles (>=3) for use with a sinusoidal + weighting and an infinite end-point. + + See Also + -------- + dblquad : double integral + tplquad : triple integral + nquad : n-dimensional integrals (uses `quad` recursively) + fixed_quad : fixed-order Gaussian quadrature + simpson : integrator for sampled data + romb : integrator for sampled data + scipy.special : for coefficients and roots of orthogonal polynomials + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Extra information for quad() inputs and outputs** + + If full_output is non-zero, then the third output argument + (infodict) is a dictionary with entries as tabulated below. For + infinite limits, the range is transformed to (0,1) and the + optional outputs are given with respect to this transformed range. + Let M be the input argument limit and let K be infodict['last']. + The entries are: + + 'neval' + The number of function evaluations. + 'last' + The number, K, of subintervals produced in the subdivision process. + 'alist' + A rank-1 array of length M, the first K elements of which are the + left end points of the subintervals in the partition of the + integration range. + 'blist' + A rank-1 array of length M, the first K elements of which are the + right end points of the subintervals. + 'rlist' + A rank-1 array of length M, the first K elements of which are the + integral approximations on the subintervals. + 'elist' + A rank-1 array of length M, the first K elements of which are the + moduli of the absolute error estimates on the subintervals. + 'iord' + A rank-1 integer array of length M, the first L elements of + which are pointers to the error estimates over the subintervals + with ``L=K`` if ``K<=M/2+2`` or ``L=M+1-K`` otherwise. Let I be the + sequence ``infodict['iord']`` and let E be the sequence + ``infodict['elist']``. Then ``E[I[1]], ..., E[I[L]]`` forms a + decreasing sequence. + + If the input argument points is provided (i.e., it is not None), + the following additional outputs are placed in the output + dictionary. Assume the points sequence is of length P. + + 'pts' + A rank-1 array of length P+2 containing the integration limits + and the break points of the intervals in ascending order. + This is an array giving the subintervals over which integration + will occur. + 'level' + A rank-1 integer array of length M (=limit), containing the + subdivision levels of the subintervals, i.e., if (aa,bb) is a + subinterval of ``(pts[1], pts[2])`` where ``pts[0]`` and ``pts[2]`` + are adjacent elements of ``infodict['pts']``, then (aa,bb) has level l + if ``|bb-aa| = |pts[2]-pts[1]| * 2**(-l)``. + 'ndin' + A rank-1 integer array of length P+2. After the first integration + over the intervals (pts[1], pts[2]), the error estimates over some + of the intervals may have been increased artificially in order to + put their subdivision forward. This array has ones in slots + corresponding to the subintervals for which this happens. + + **Weighting the integrand** + + The input variables, *weight* and *wvar*, are used to weight the + integrand by a select list of functions. Different integration + methods are used to compute the integral with these weighting + functions, and these do not support specifying break points. The + possible values of weight and the corresponding weighting functions are. + + ========== =================================== ===================== + ``weight`` Weight function used ``wvar`` + ========== =================================== ===================== + 'cos' cos(w*x) wvar = w + 'sin' sin(w*x) wvar = w + 'alg' g(x) = ((x-a)**alpha)*((b-x)**beta) wvar = (alpha, beta) + 'alg-loga' g(x)*log(x-a) wvar = (alpha, beta) + 'alg-logb' g(x)*log(b-x) wvar = (alpha, beta) + 'alg-log' g(x)*log(x-a)*log(b-x) wvar = (alpha, beta) + 'cauchy' 1/(x-c) wvar = c + ========== =================================== ===================== + + wvar holds the parameter w, (alpha, beta), or c depending on the weight + selected. In these expressions, a and b are the integration limits. + + For the 'cos' and 'sin' weighting, additional inputs and outputs are + available. + + For finite integration limits, the integration is performed using a + Clenshaw-Curtis method which uses Chebyshev moments. For repeated + calculations, these moments are saved in the output dictionary: + + 'momcom' + The maximum level of Chebyshev moments that have been computed, + i.e., if ``M_c`` is ``infodict['momcom']`` then the moments have been + computed for intervals of length ``|b-a| * 2**(-l)``, + ``l=0,1,...,M_c``. + 'nnlog' + A rank-1 integer array of length M(=limit), containing the + subdivision levels of the subintervals, i.e., an element of this + array is equal to l if the corresponding subinterval is + ``|b-a|* 2**(-l)``. + 'chebmo' + A rank-2 array of shape (25, maxp1) containing the computed + Chebyshev moments. These can be passed on to an integration + over the same interval by passing this array as the second + element of the sequence wopts and passing infodict['momcom'] as + the first element. + + If one of the integration limits is infinite, then a Fourier integral is + computed (assuming w neq 0). If full_output is 1 and a numerical error + is encountered, besides the error message attached to the output tuple, + a dictionary is also appended to the output tuple which translates the + error codes in the array ``info['ierlst']`` to English messages. The + output information dictionary contains the following entries instead of + 'last', 'alist', 'blist', 'rlist', and 'elist': + + 'lst' + The number of subintervals needed for the integration (call it ``K_f``). + 'rslst' + A rank-1 array of length M_f=limlst, whose first ``K_f`` elements + contain the integral contribution over the interval + ``(a+(k-1)c, a+kc)`` where ``c = (2*floor(|w|) + 1) * pi / |w|`` + and ``k=1,2,...,K_f``. + 'erlst' + A rank-1 array of length ``M_f`` containing the error estimate + corresponding to the interval in the same position in + ``infodict['rslist']``. + 'ierlst' + A rank-1 integer array of length ``M_f`` containing an error flag + corresponding to the interval in the same position in + ``infodict['rslist']``. See the explanation dictionary (last entry + in the output tuple) for the meaning of the codes. + + + **Details of QUADPACK level routines** + + `quad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. The routine called depends on + `weight`, `points` and the integration limits `a` and `b`. + + ================ ============== ========== ===================== + QUADPACK routine `weight` `points` infinite bounds + ================ ============== ========== ===================== + qagse None No No + qagie None No Yes + qagpe None Yes No + qawoe 'sin', 'cos' No No + qawfe 'sin', 'cos' No either `a` or `b` + qawse 'alg*' No No + qawce 'cauchy' No No + ================ ============== ========== ===================== + + The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + qagpe + serves the same purposes as QAGS, but also allows the + user to provide explicit information about the location + and type of trouble-spots i.e. the abscissae of internal + singularities, discontinuities and other difficulties of + the integrand function. + qawoe + is an integrator for the evaluation of + :math:`\\int^b_a \\cos(\\omega x)f(x)dx` or + :math:`\\int^b_a \\sin(\\omega x)f(x)dx` + over a finite interval [a,b], where :math:`\\omega` and :math:`f` + are specified by the user. The rule evaluation component is based + on the modified Clenshaw-Curtis technique + + An adaptive subdivision scheme is used in connection + with an extrapolation procedure, which is a modification + of that in ``QAGS`` and allows the algorithm to deal with + singularities in :math:`f(x)`. + qawfe + calculates the Fourier transform + :math:`\\int^\\infty_a \\cos(\\omega x)f(x)dx` or + :math:`\\int^\\infty_a \\sin(\\omega x)f(x)dx` + for user-provided :math:`\\omega` and :math:`f`. The procedure of + ``QAWO`` is applied on successive finite intervals, and convergence + acceleration by means of the :math:`\\varepsilon`-algorithm is applied + to the series of integral approximations. + qawse + approximate :math:`\\int^b_a w(x)f(x)dx`, with :math:`a < b` where + :math:`w(x) = (x-a)^{\\alpha}(b-x)^{\\beta}v(x)` with + :math:`\\alpha,\\beta > -1`, where :math:`v(x)` may be one of the + following functions: :math:`1`, :math:`\\log(x-a)`, :math:`\\log(b-x)`, + :math:`\\log(x-a)\\log(b-x)`. + + The user specifies :math:`\\alpha`, :math:`\\beta` and the type of the + function :math:`v`. A globally adaptive subdivision strategy is + applied, with modified Clenshaw-Curtis integration on those + subintervals which contain `a` or `b`. + qawce + compute :math:`\\int^b_a f(x) / (x-c)dx` where the integral must be + interpreted as a Cauchy principal value integral, for user specified + :math:`c` and :math:`f`. The strategy is globally adaptive. Modified + Clenshaw-Curtis integration is used on those intervals containing the + point :math:`x = c`. + + **Integration of Complex Function of a Real Variable** + + A complex valued function, :math:`f`, of a real variable can be written as + :math:`f = g + ih`. Similarly, the integral of :math:`f` can be + written as + + .. math:: + \\int_a^b f(x) dx = \\int_a^b g(x) dx + i\\int_a^b h(x) dx + + assuming that the integrals of :math:`g` and :math:`h` exist + over the interval :math:`[a,b]` [2]_. Therefore, ``quad`` integrates + complex-valued functions by integrating the real and imaginary components + separately. + + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + .. [2] McCullough, Thomas; Phillips, Keith (1973). + Foundations of Analysis in the Complex Plane. + Holt Rinehart Winston. + ISBN 0-03-086370-8 + + Examples + -------- + Calculate :math:`\\int^4_0 x^2 dx` and compare with an analytic result + + >>> from scipy import integrate + >>> import numpy as np + >>> x2 = lambda x: x**2 + >>> integrate.quad(x2, 0, 4) + (21.333333333333332, 2.3684757858670003e-13) + >>> print(4**3 / 3.) # analytical result + 21.3333333333 + + Calculate :math:`\\int^\\infty_0 e^{-x} dx` + + >>> invexp = lambda x: np.exp(-x) + >>> integrate.quad(invexp, 0, np.inf) + (1.0, 5.842605999138044e-11) + + Calculate :math:`\\int^1_0 a x \\,dx` for :math:`a = 1, 3` + + >>> f = lambda x, a: a*x + >>> y, err = integrate.quad(f, 0, 1, args=(1,)) + >>> y + 0.5 + >>> y, err = integrate.quad(f, 0, 1, args=(3,)) + >>> y + 1.5 + + Calculate :math:`\\int^1_0 x^2 + y^2 dx` with ctypes, holding + y parameter as 1:: + + testlib.c => + double func(int n, double args[n]){ + return args[0]*args[0] + args[1]*args[1];} + compile to library testlib.* + + :: + + from scipy import integrate + import ctypes + lib = ctypes.CDLL('/home/.../testlib.*') #use absolute path + lib.func.restype = ctypes.c_double + lib.func.argtypes = (ctypes.c_int,ctypes.c_double) + integrate.quad(lib.func,0,1,(1)) + #(1.3333333333333333, 1.4802973661668752e-14) + print((1.0**3/3.0 + 1.0) - (0.0**3/3.0 + 0.0)) #Analytic result + # 1.3333333333333333 + + Be aware that pulse shapes and other sharp features as compared to the + size of the integration interval may not be integrated correctly using + this method. A simplified example of this limitation is integrating a + y-axis reflected step function with many zero values within the integrals + bounds. + + >>> y = lambda x: 1 if x<=0 else 0 + >>> integrate.quad(y, -1, 1) + (1.0, 1.1102230246251565e-14) + >>> integrate.quad(y, -1, 100) + (1.0000000002199108, 1.0189464580163188e-08) + >>> integrate.quad(y, -1, 10000) + (0.0, 0.0) + + """ + if not isinstance(args, tuple): + args = (args,) + + # check the limits of integration: \int_a^b, expect a < b + flip, a, b = b < a, min(a, b), max(a, b) + + if complex_func: + def imfunc(x, *args): + return func(x, *args).imag + + def refunc(x, *args): + return func(x, *args).real + + re_retval = quad(refunc, a, b, args, full_output, epsabs, + epsrel, limit, points, weight, wvar, wopts, + maxp1, limlst, complex_func=False) + im_retval = quad(imfunc, a, b, args, full_output, epsabs, + epsrel, limit, points, weight, wvar, wopts, + maxp1, limlst, complex_func=False) + integral = re_retval[0] + 1j*im_retval[0] + error_estimate = re_retval[1] + 1j*im_retval[1] + retval = integral, error_estimate + if full_output: + msgexp = {} + msgexp["real"] = re_retval[2:] + msgexp["imag"] = im_retval[2:] + retval = retval + (msgexp,) + + return retval + + if weight is None: + retval = _quad(func, a, b, args, full_output, epsabs, epsrel, limit, + points) + else: + if points is not None: + msg = ("Break points cannot be specified when using weighted integrand.\n" + "Continuing, ignoring specified points.") + warnings.warn(msg, IntegrationWarning, stacklevel=2) + retval = _quad_weight(func, a, b, args, full_output, epsabs, epsrel, + limlst, limit, maxp1, weight, wvar, wopts) + + if flip: + retval = (-retval[0],) + retval[1:] + + ier = retval[-1] + if ier == 0: + return retval[:-1] + + msgs = {80: "A Python error occurred possibly while calling the function.", + 1: f"The maximum number of subdivisions ({limit}) has been achieved.\n " + f"If increasing the limit yields no improvement it is advised to " + f"analyze \n the integrand in order to determine the difficulties. " + f"If the position of a \n local difficulty can be determined " + f"(singularity, discontinuity) one will \n probably gain from " + f"splitting up the interval and calling the integrator \n on the " + f"subranges. Perhaps a special-purpose integrator should be used.", + 2: "The occurrence of roundoff error is detected, which prevents \n " + "the requested tolerance from being achieved. " + "The error may be \n underestimated.", + 3: "Extremely bad integrand behavior occurs at some points of the\n " + "integration interval.", + 4: "The algorithm does not converge. Roundoff error is detected\n " + "in the extrapolation table. It is assumed that the requested " + "tolerance\n cannot be achieved, and that the returned result " + "(if full_output = 1) is \n the best which can be obtained.", + 5: "The integral is probably divergent, or slowly convergent.", + 6: "The input is invalid.", + 7: "Abnormal termination of the routine. The estimates for result\n " + "and error are less reliable. It is assumed that the requested " + "accuracy\n has not been achieved.", + 'unknown': "Unknown error."} + + if weight in ['cos','sin'] and (b == np.inf or a == -np.inf): + msgs[1] = ( + "The maximum number of cycles allowed has been achieved., e.e.\n of " + "subintervals (a+(k-1)c, a+kc) where c = (2*int(abs(omega)+1))\n " + "*pi/abs(omega), for k = 1, 2, ..., lst. " + "One can allow more cycles by increasing the value of limlst. " + "Look at info['ierlst'] with full_output=1." + ) + msgs[4] = ( + "The extrapolation table constructed for convergence acceleration\n of " + "the series formed by the integral contributions over the cycles, \n does " + "not converge to within the requested accuracy. " + "Look at \n info['ierlst'] with full_output=1." + ) + msgs[7] = ( + "Bad integrand behavior occurs within one or more of the cycles.\n " + "Location and type of the difficulty involved can be determined from \n " + "the vector info['ierlist'] obtained with full_output=1." + ) + explain = {1: "The maximum number of subdivisions (= limit) has been \n " + "achieved on this cycle.", + 2: "The occurrence of roundoff error is detected and prevents\n " + "the tolerance imposed on this cycle from being achieved.", + 3: "Extremely bad integrand behavior occurs at some points of\n " + "this cycle.", + 4: "The integral over this cycle does not converge (to within the " + "required accuracy) due to roundoff in the extrapolation " + "procedure invoked on this cycle. It is assumed that the result " + "on this interval is the best which can be obtained.", + 5: "The integral over this cycle is probably divergent or " + "slowly convergent."} + + try: + msg = msgs[ier] + except KeyError: + msg = msgs['unknown'] + + if ier in [1,2,3,4,5,7]: + if full_output: + if weight in ['cos', 'sin'] and (b == np.inf or a == -np.inf): + return retval[:-1] + (msg, explain) + else: + return retval[:-1] + (msg,) + else: + warnings.warn(msg, IntegrationWarning, stacklevel=2) + return retval[:-1] + + elif ier == 6: # Forensic decision tree when QUADPACK throws ier=6 + if epsabs <= 0: # Small error tolerance - applies to all methods + if epsrel < max(50 * sys.float_info.epsilon, 5e-29): + msg = ("If 'epsabs'<=0, 'epsrel' must be greater than both" + " 5e-29 and 50*(machine epsilon).") + elif weight in ['sin', 'cos'] and (abs(a) + abs(b) == np.inf): + msg = ("Sine or cosine weighted integrals with infinite domain" + " must have 'epsabs'>0.") + + elif weight is None: + if points is None: # QAGSE/QAGIE + msg = ("Invalid 'limit' argument. There must be" + " at least one subinterval") + else: # QAGPE + if not (min(a, b) <= min(points) <= max(points) <= max(a, b)): + msg = ("All break points in 'points' must lie within the" + " integration limits.") + elif len(points) >= limit: + msg = (f"Number of break points ({len(points):d}) " + f"must be less than subinterval limit ({limit:d})") + + else: + if maxp1 < 1: + msg = "Chebyshev moment limit maxp1 must be >=1." + + elif weight in ('cos', 'sin') and abs(a+b) == np.inf: # QAWFE + msg = "Cycle limit limlst must be >=3." + + elif weight.startswith('alg'): # QAWSE + if min(wvar) < -1: + msg = "wvar parameters (alpha, beta) must both be >= -1." + if b < a: + msg = "Integration limits a, b must satistfy a>> import numpy as np + >>> from scipy import integrate + >>> f = lambda y, x: x*y**2 + >>> integrate.dblquad(f, 0, 2, 0, 1) + (0.6666666666666667, 7.401486830834377e-15) + + Calculate :math:`\\int^{x=\\pi/4}_{x=0} \\int^{y=\\cos(x)}_{y=\\sin(x)} 1 + \\,dy \\,dx`. + + >>> f = lambda y, x: 1 + >>> integrate.dblquad(f, 0, np.pi/4, np.sin, np.cos) + (0.41421356237309503, 1.1083280054755938e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=2-x}_{y=x} a x y \\,dy \\,dx` + for :math:`a=1, 3`. + + >>> f = lambda y, x, a: a*x*y + >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(1,)) + (0.33333333333333337, 5.551115123125783e-15) + >>> integrate.dblquad(f, 0, 1, lambda x: x, lambda x: 2-x, args=(3,)) + (0.9999999999999999, 1.6653345369377348e-14) + + Compute the two-dimensional Gaussian Integral, which is the integral of the + Gaussian function :math:`f(x,y) = e^{-(x^{2} + y^{2})}`, over + :math:`(-\\infty,+\\infty)`. That is, compute the integral + :math:`\\iint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2})} \\,dy\\,dx`. + + >>> f = lambda x, y: np.exp(-(x ** 2 + y ** 2)) + >>> integrate.dblquad(f, -np.inf, np.inf, -np.inf, np.inf) + (3.141592653589777, 2.5173086737433208e-08) + + """ + + def temp_ranges(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + return nquad(func, [temp_ranges, [a, b]], args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def tplquad(func, a, b, gfun, hfun, qfun, rfun, args=(), epsabs=1.49e-8, + epsrel=1.49e-8): + """ + Compute a triple (definite) integral. + + Return the triple integral of ``func(z, y, x)`` from ``x = a..b``, + ``y = gfun(x)..hfun(x)``, and ``z = qfun(x,y)..rfun(x,y)``. + + Parameters + ---------- + func : function + A Python function or method of at least three variables in the + order (z, y, x). + a, b : float + The limits of integration in x: `a` < `b` + gfun : function or float + The lower boundary curve in y which is a function taking a single + floating point argument (x) and returning a floating point result + or a float indicating a constant boundary curve. + hfun : function or float + The upper boundary curve in y (same requirements as `gfun`). + qfun : function or float + The lower boundary surface in z. It must be a function that takes + two floats in the order (x, y) and returns a float or a float + indicating a constant boundary surface. + rfun : function or float + The upper boundary surface in z. (Same requirements as `qfun`.) + args : tuple, optional + Extra arguments to pass to `func`. + epsabs : float, optional + Absolute tolerance passed directly to the innermost 1-D quadrature + integration. Default is 1.49e-8. + epsrel : float, optional + Relative tolerance of the innermost 1-D integrals. Default is 1.49e-8. + + Returns + ------- + y : float + The resultant integral. + abserr : float + An estimate of the error. + + See Also + -------- + quad : Adaptive quadrature using QUADPACK + fixed_quad : Fixed-order Gaussian quadrature + dblquad : Double integrals + nquad : N-dimensional integrals + romb : Integrators for sampled data + simpson : Integrators for sampled data + scipy.special : For coefficients and roots of orthogonal polynomials + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Details of QUADPACK level routines** + + `quad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. For each level of integration, ``qagse`` + is used for finite limits or ``qagie`` is used, if either limit (or both!) + are infinite. The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + Examples + -------- + Compute the triple integral of ``x * y * z``, over ``x`` ranging + from 1 to 2, ``y`` ranging from 2 to 3, ``z`` ranging from 0 to 1. + That is, :math:`\\int^{x=2}_{x=1} \\int^{y=3}_{y=2} \\int^{z=1}_{z=0} x y z + \\,dz \\,dy \\,dx`. + + >>> import numpy as np + >>> from scipy import integrate + >>> f = lambda z, y, x: x*y*z + >>> integrate.tplquad(f, 1, 2, 2, 3, 0, 1) + (1.8749999999999998, 3.3246447942574074e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1-2x}_{y=0} + \\int^{z=1-x-2y}_{z=0} x y z \\,dz \\,dy \\,dx`. + Note: `qfun`/`rfun` takes arguments in the order (x, y), even though ``f`` + takes arguments in the order (z, y, x). + + >>> f = lambda z, y, x: x*y*z + >>> integrate.tplquad(f, 0, 1, 0, lambda x: 1-2*x, 0, lambda x, y: 1-x-2*y) + (0.05416666666666668, 2.1774196738157757e-14) + + Calculate :math:`\\int^{x=1}_{x=0} \\int^{y=1}_{y=0} \\int^{z=1}_{z=0} + a x y z \\,dz \\,dy \\,dx` for :math:`a=1, 3`. + + >>> f = lambda z, y, x, a: a*x*y*z + >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(1,)) + (0.125, 5.527033708952211e-15) + >>> integrate.tplquad(f, 0, 1, 0, 1, 0, 1, args=(3,)) + (0.375, 1.6581101126856635e-14) + + Compute the three-dimensional Gaussian Integral, which is the integral of + the Gaussian function :math:`f(x,y,z) = e^{-(x^{2} + y^{2} + z^{2})}`, over + :math:`(-\\infty,+\\infty)`. That is, compute the integral + :math:`\\iiint^{+\\infty}_{-\\infty} e^{-(x^{2} + y^{2} + z^{2})} \\,dz + \\,dy\\,dx`. + + >>> f = lambda x, y, z: np.exp(-(x ** 2 + y ** 2 + z ** 2)) + >>> integrate.tplquad(f, -np.inf, np.inf, -np.inf, np.inf, -np.inf, np.inf) + (5.568327996830833, 4.4619078828029765e-08) + + """ + # f(z, y, x) + # qfun/rfun(x, y) + # gfun/hfun(x) + # nquad will hand (y, x, t0, ...) to ranges0 + # nquad will hand (x, t0, ...) to ranges1 + # Only qfun / rfun is different API... + + def ranges0(*args): + return [qfun(args[1], args[0]) if callable(qfun) else qfun, + rfun(args[1], args[0]) if callable(rfun) else rfun] + + def ranges1(*args): + return [gfun(args[0]) if callable(gfun) else gfun, + hfun(args[0]) if callable(hfun) else hfun] + + ranges = [ranges0, ranges1, [a, b]] + return nquad(func, ranges, args=args, + opts={"epsabs": epsabs, "epsrel": epsrel}) + + +def nquad(func, ranges, args=None, opts=None, full_output=False): + r""" + Integration over multiple variables. + + Wraps `quad` to enable integration over multiple variables. + Various options allow improved integration of discontinuous functions, as + well as the use of weighted integration, and generally finer control of the + integration process. + + Parameters + ---------- + func : {callable, scipy.LowLevelCallable} + The function to be integrated. Has arguments of ``x0, ... xn``, + ``t0, ... tm``, where integration is carried out over ``x0, ... xn``, + which must be floats. Where ``t0, ... tm`` are extra arguments + passed in args. + Function signature should be ``func(x0, x1, ..., xn, t0, t1, ..., tm)``. + Integration is carried out in order. That is, integration over ``x0`` + is the innermost integral, and ``xn`` is the outermost. + + If the user desires improved integration performance, then `f` may + be a `scipy.LowLevelCallable` with one of the signatures:: + + double func(int n, double *xx) + double func(int n, double *xx, void *user_data) + + where ``n`` is the number of variables and args. The ``xx`` array + contains the coordinates and extra arguments. ``user_data`` is the data + contained in the `scipy.LowLevelCallable`. + ranges : iterable object + Each element of ranges may be either a sequence of 2 numbers, or else + a callable that returns such a sequence. ``ranges[0]`` corresponds to + integration over x0, and so on. If an element of ranges is a callable, + then it will be called with all of the integration arguments available, + as well as any parametric arguments. e.g., if + ``func = f(x0, x1, x2, t0, t1)``, then ``ranges[0]`` may be defined as + either ``(a, b)`` or else as ``(a, b) = range0(x1, x2, t0, t1)``. + args : iterable object, optional + Additional arguments ``t0, ... tn``, required by ``func``, ``ranges``, + and ``opts``. + opts : iterable object or dict, optional + Options to be passed to `quad`. May be empty, a dict, or + a sequence of dicts or functions that return a dict. If empty, the + default options from scipy.integrate.quad are used. If a dict, the same + options are used for all levels of integraion. If a sequence, then each + element of the sequence corresponds to a particular integration. e.g., + ``opts[0]`` corresponds to integration over ``x0``, and so on. If a + callable, the signature must be the same as for ``ranges``. The + available options together with their default values are: + + - epsabs = 1.49e-08 + - epsrel = 1.49e-08 + - limit = 50 + - points = None + - weight = None + - wvar = None + - wopts = None + + For more information on these options, see `quad`. + + full_output : bool, optional + Partial implementation of ``full_output`` from scipy.integrate.quad. + The number of integrand function evaluations ``neval`` can be obtained + by setting ``full_output=True`` when calling nquad. + + Returns + ------- + result : float + The result of the integration. + abserr : float + The maximum of the estimates of the absolute error in the various + integration results. + out_dict : dict, optional + A dict containing additional information on the integration. + + See Also + -------- + quad : 1-D numerical integration + dblquad, tplquad : double and triple integrals + fixed_quad : fixed-order Gaussian quadrature + + Notes + ----- + For valid results, the integral must converge; behavior for divergent + integrals is not guaranteed. + + **Details of QUADPACK level routines** + + `nquad` calls routines from the FORTRAN library QUADPACK. This section + provides details on the conditions for each routine to be called and a + short description of each routine. The routine called depends on + `weight`, `points` and the integration limits `a` and `b`. + + ================ ============== ========== ===================== + QUADPACK routine `weight` `points` infinite bounds + ================ ============== ========== ===================== + qagse None No No + qagie None No Yes + qagpe None Yes No + qawoe 'sin', 'cos' No No + qawfe 'sin', 'cos' No either `a` or `b` + qawse 'alg*' No No + qawce 'cauchy' No No + ================ ============== ========== ===================== + + The following provides a short description from [1]_ for each + routine. + + qagse + is an integrator based on globally adaptive interval + subdivision in connection with extrapolation, which will + eliminate the effects of integrand singularities of + several types. + qagie + handles integration over infinite intervals. The infinite range is + mapped onto a finite interval and subsequently the same strategy as + in ``QAGS`` is applied. + qagpe + serves the same purposes as QAGS, but also allows the + user to provide explicit information about the location + and type of trouble-spots i.e. the abscissae of internal + singularities, discontinuities and other difficulties of + the integrand function. + qawoe + is an integrator for the evaluation of + :math:`\int^b_a \cos(\omega x)f(x)dx` or + :math:`\int^b_a \sin(\omega x)f(x)dx` + over a finite interval [a,b], where :math:`\omega` and :math:`f` + are specified by the user. The rule evaluation component is based + on the modified Clenshaw-Curtis technique + + An adaptive subdivision scheme is used in connection + with an extrapolation procedure, which is a modification + of that in ``QAGS`` and allows the algorithm to deal with + singularities in :math:`f(x)`. + qawfe + calculates the Fourier transform + :math:`\int^\infty_a \cos(\omega x)f(x)dx` or + :math:`\int^\infty_a \sin(\omega x)f(x)dx` + for user-provided :math:`\omega` and :math:`f`. The procedure of + ``QAWO`` is applied on successive finite intervals, and convergence + acceleration by means of the :math:`\varepsilon`-algorithm is applied + to the series of integral approximations. + qawse + approximate :math:`\int^b_a w(x)f(x)dx`, with :math:`a < b` where + :math:`w(x) = (x-a)^{\alpha}(b-x)^{\beta}v(x)` with + :math:`\alpha,\beta > -1`, where :math:`v(x)` may be one of the + following functions: :math:`1`, :math:`\log(x-a)`, :math:`\log(b-x)`, + :math:`\log(x-a)\log(b-x)`. + + The user specifies :math:`\alpha`, :math:`\beta` and the type of the + function :math:`v`. A globally adaptive subdivision strategy is + applied, with modified Clenshaw-Curtis integration on those + subintervals which contain `a` or `b`. + qawce + compute :math:`\int^b_a f(x) / (x-c)dx` where the integral must be + interpreted as a Cauchy principal value integral, for user specified + :math:`c` and :math:`f`. The strategy is globally adaptive. Modified + Clenshaw-Curtis integration is used on those intervals containing the + point :math:`x = c`. + + References + ---------- + + .. [1] Piessens, Robert; de Doncker-Kapenga, Elise; + Überhuber, Christoph W.; Kahaner, David (1983). + QUADPACK: A subroutine package for automatic integration. + Springer-Verlag. + ISBN 978-3-540-12553-2. + + Examples + -------- + Calculate + + .. math:: + + \int^{1}_{-0.15} \int^{0.8}_{0.13} \int^{1}_{-1} \int^{1}_{0} + f(x_0, x_1, x_2, x_3) \,dx_0 \,dx_1 \,dx_2 \,dx_3 , + + where + + .. math:: + + f(x_0, x_1, x_2, x_3) = \begin{cases} + x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+1 & (x_0-0.2 x_3-0.5-0.25 x_1 > 0) \\ + x_0^2+x_1 x_2-x_3^3+ \sin{x_0}+0 & (x_0-0.2 x_3-0.5-0.25 x_1 \leq 0) + \end{cases} . + + >>> import numpy as np + >>> from scipy import integrate + >>> func = lambda x0,x1,x2,x3 : x0**2 + x1*x2 - x3**3 + np.sin(x0) + ( + ... 1 if (x0-.2*x3-.5-.25*x1>0) else 0) + >>> def opts0(*args, **kwargs): + ... return {'points':[0.2*args[2] + 0.5 + 0.25*args[0]]} + >>> integrate.nquad(func, [[0,1], [-1,1], [.13,.8], [-.15,1]], + ... opts=[opts0,{},{},{}], full_output=True) + (1.5267454070738633, 2.9437360001402324e-14, {'neval': 388962}) + + Calculate + + .. math:: + + \int^{t_0+t_1+1}_{t_0+t_1-1} + \int^{x_2+t_0^2 t_1^3+1}_{x_2+t_0^2 t_1^3-1} + \int^{t_0 x_1+t_1 x_2+1}_{t_0 x_1+t_1 x_2-1} + f(x_0,x_1, x_2,t_0,t_1) + \,dx_0 \,dx_1 \,dx_2, + + where + + .. math:: + + f(x_0, x_1, x_2, t_0, t_1) = \begin{cases} + x_0 x_2^2 + \sin{x_1}+2 & (x_0+t_1 x_1-t_0 > 0) \\ + x_0 x_2^2 +\sin{x_1}+1 & (x_0+t_1 x_1-t_0 \leq 0) + \end{cases} + + and :math:`(t_0, t_1) = (0, 1)` . + + >>> def func2(x0, x1, x2, t0, t1): + ... return x0*x2**2 + np.sin(x1) + 1 + (1 if x0+t1*x1-t0>0 else 0) + >>> def lim0(x1, x2, t0, t1): + ... return [t0*x1 + t1*x2 - 1, t0*x1 + t1*x2 + 1] + >>> def lim1(x2, t0, t1): + ... return [x2 + t0**2*t1**3 - 1, x2 + t0**2*t1**3 + 1] + >>> def lim2(t0, t1): + ... return [t0 + t1 - 1, t0 + t1 + 1] + >>> def opts0(x1, x2, t0, t1): + ... return {'points' : [t0 - t1*x1]} + >>> def opts1(x2, t0, t1): + ... return {} + >>> def opts2(t0, t1): + ... return {} + >>> integrate.nquad(func2, [lim0, lim1, lim2], args=(0,1), + ... opts=[opts0, opts1, opts2]) + (36.099919226771625, 1.8546948553373528e-07) + + """ + depth = len(ranges) + ranges = [rng if callable(rng) else _RangeFunc(rng) for rng in ranges] + if args is None: + args = () + if opts is None: + opts = [dict([])] * depth + + if isinstance(opts, dict): + opts = [_OptFunc(opts)] * depth + else: + opts = [opt if callable(opt) else _OptFunc(opt) for opt in opts] + return _NQuad(func, ranges, opts, full_output).integrate(*args) + + +class _RangeFunc: + def __init__(self, range_): + self.range_ = range_ + + def __call__(self, *args): + """Return stored value. + + *args needed because range_ can be float or func, and is called with + variable number of parameters. + """ + return self.range_ + + +class _OptFunc: + def __init__(self, opt): + self.opt = opt + + def __call__(self, *args): + """Return stored dict.""" + return self.opt + + +class _NQuad: + def __init__(self, func, ranges, opts, full_output): + self.abserr = 0 + self.func = func + self.ranges = ranges + self.opts = opts + self.maxdepth = len(ranges) + self.full_output = full_output + if self.full_output: + self.out_dict = {'neval': 0} + + def integrate(self, *args, **kwargs): + depth = kwargs.pop('depth', 0) + if kwargs: + raise ValueError('unexpected kwargs') + + # Get the integration range and options for this depth. + ind = -(depth + 1) + fn_range = self.ranges[ind] + low, high = fn_range(*args) + fn_opt = self.opts[ind] + opt = dict(fn_opt(*args)) + + if 'points' in opt: + opt['points'] = [x for x in opt['points'] if low <= x <= high] + if depth + 1 == self.maxdepth: + f = self.func + else: + f = partial(self.integrate, depth=depth+1) + quad_r = quad(f, low, high, args=args, full_output=self.full_output, + **opt) + value = quad_r[0] + abserr = quad_r[1] + if self.full_output: + infodict = quad_r[2] + # The 'neval' parameter in full_output returns the total + # number of times the integrand function was evaluated. + # Therefore, only the innermost integration loop counts. + if depth + 1 == self.maxdepth: + self.out_dict['neval'] += infodict['neval'] + self.abserr = max(self.abserr, abserr) + if depth > 0: + return value + else: + # Final result of N-D integration with error + if self.full_output: + return value, self.abserr, self.out_dict + else: + return value, self.abserr diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_quadrature.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_quadrature.py new file mode 100644 index 0000000000000000000000000000000000000000..7fe4ef9424eb1acb0e488f5600b2d83f8ca99090 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_quadrature.py @@ -0,0 +1,1684 @@ +from __future__ import annotations +from typing import TYPE_CHECKING, Callable, Any, cast +import numpy as np +import numpy.typing as npt +import math +import warnings +from collections import namedtuple + +from scipy.special import roots_legendre +from scipy.special import gammaln, logsumexp +from scipy._lib._util import _rng_spawn +from scipy._lib.deprecation import _deprecated + + +__all__ = ['fixed_quad', 'quadrature', 'romberg', 'romb', + 'trapezoid', 'simpson', + 'cumulative_trapezoid', 'newton_cotes', + 'qmc_quad', 'AccuracyWarning', 'cumulative_simpson'] + + +def trapezoid(y, x=None, dx=1.0, axis=-1): + r""" + Integrate along the given axis using the composite trapezoidal rule. + + If `x` is provided, the integration happens in sequence along its + elements - they are not sorted. + + Integrate `y` (`x`) along each 1d slice on the given axis, compute + :math:`\int y(x) dx`. + When `x` is specified, this integrates along the parametric curve, + computing :math:`\int_t y(t) dt = + \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. + + Parameters + ---------- + y : array_like + Input array to integrate. + x : array_like, optional + The sample points corresponding to the `y` values. If `x` is None, + the sample points are assumed to be evenly spaced `dx` apart. The + default is None. + dx : scalar, optional + The spacing between sample points when `x` is None. The default is 1. + axis : int, optional + The axis along which to integrate. + + Returns + ------- + trapezoid : float or ndarray + Definite integral of `y` = n-dimensional array as approximated along + a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, + then the result is a float. If `n` is greater than 1, then the result + is an `n`-1 dimensional array. + + See Also + -------- + cumulative_trapezoid, simpson, romb + + Notes + ----- + Image [2]_ illustrates trapezoidal rule -- y-axis locations of points + will be taken from `y` array, by default x-axis distances between + points will be 1.0, alternatively they can be provided with `x` array + or with `dx` scalar. Return value will be equal to combined area under + the red lines. + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule + + .. [2] Illustration image: + https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png + + Examples + -------- + Use the trapezoidal rule on evenly spaced points: + + >>> import numpy as np + >>> from scipy import integrate + >>> integrate.trapezoid([1, 2, 3]) + 4.0 + + The spacing between sample points can be selected by either the + ``x`` or ``dx`` arguments: + + >>> integrate.trapezoid([1, 2, 3], x=[4, 6, 8]) + 8.0 + >>> integrate.trapezoid([1, 2, 3], dx=2) + 8.0 + + Using a decreasing ``x`` corresponds to integrating in reverse: + + >>> integrate.trapezoid([1, 2, 3], x=[8, 6, 4]) + -8.0 + + More generally ``x`` is used to integrate along a parametric curve. We can + estimate the integral :math:`\int_0^1 x^2 = 1/3` using: + + >>> x = np.linspace(0, 1, num=50) + >>> y = x**2 + >>> integrate.trapezoid(y, x) + 0.33340274885464394 + + Or estimate the area of a circle, noting we repeat the sample which closes + the curve: + + >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) + >>> integrate.trapezoid(np.cos(theta), x=np.sin(theta)) + 3.141571941375841 + + ``trapezoid`` can be applied along a specified axis to do multiple + computations in one call: + + >>> a = np.arange(6).reshape(2, 3) + >>> a + array([[0, 1, 2], + [3, 4, 5]]) + >>> integrate.trapezoid(a, axis=0) + array([1.5, 2.5, 3.5]) + >>> integrate.trapezoid(a, axis=1) + array([2., 8.]) + """ + y = np.asanyarray(y) + if x is None: + d = dx + else: + x = np.asanyarray(x) + if x.ndim == 1: + d = np.diff(x) + # reshape to correct shape + shape = [1]*y.ndim + shape[axis] = d.shape[0] + d = d.reshape(shape) + else: + d = np.diff(x, axis=axis) + nd = y.ndim + slice1 = [slice(None)]*nd + slice2 = [slice(None)]*nd + slice1[axis] = slice(1, None) + slice2[axis] = slice(None, -1) + try: + ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) + except ValueError: + # Operations didn't work, cast to ndarray + d = np.asarray(d) + y = np.asarray(y) + ret = np.add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) + return ret + + +class AccuracyWarning(Warning): + pass + + +if TYPE_CHECKING: + # workaround for mypy function attributes see: + # https://github.com/python/mypy/issues/2087#issuecomment-462726600 + from typing import Protocol + + class CacheAttributes(Protocol): + cache: dict[int, tuple[Any, Any]] +else: + CacheAttributes = Callable + + +def cache_decorator(func: Callable) -> CacheAttributes: + return cast(CacheAttributes, func) + + +@cache_decorator +def _cached_roots_legendre(n): + """ + Cache roots_legendre results to speed up calls of the fixed_quad + function. + """ + if n in _cached_roots_legendre.cache: + return _cached_roots_legendre.cache[n] + + _cached_roots_legendre.cache[n] = roots_legendre(n) + return _cached_roots_legendre.cache[n] + + +_cached_roots_legendre.cache = dict() + + +def fixed_quad(func, a, b, args=(), n=5): + """ + Compute a definite integral using fixed-order Gaussian quadrature. + + Integrate `func` from `a` to `b` using Gaussian quadrature of + order `n`. + + Parameters + ---------- + func : callable + A Python function or method to integrate (must accept vector inputs). + If integrating a vector-valued function, the returned array must have + shape ``(..., len(x))``. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + args : tuple, optional + Extra arguments to pass to function, if any. + n : int, optional + Order of quadrature integration. Default is 5. + + Returns + ------- + val : float + Gaussian quadrature approximation to the integral + none : None + Statically returned value of None + + See Also + -------- + quad : adaptive quadrature using QUADPACK + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + simpson : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> f = lambda x: x**8 + >>> integrate.fixed_quad(f, 0.0, 1.0, n=4) + (0.1110884353741496, None) + >>> integrate.fixed_quad(f, 0.0, 1.0, n=5) + (0.11111111111111102, None) + >>> print(1/9.0) # analytical result + 0.1111111111111111 + + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4) + (0.9999999771971152, None) + >>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5) + (1.000000000039565, None) + >>> np.sin(np.pi/2)-np.sin(0) # analytical result + 1.0 + + """ + x, w = _cached_roots_legendre(n) + x = np.real(x) + if np.isinf(a) or np.isinf(b): + raise ValueError("Gaussian quadrature is only available for " + "finite limits.") + y = (b-a)*(x+1)/2.0 + a + return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None + + +def vectorize1(func, args=(), vec_func=False): + """Vectorize the call to a function. + + This is an internal utility function used by `romberg` and + `quadrature` to create a vectorized version of a function. + + If `vec_func` is True, the function `func` is assumed to take vector + arguments. + + Parameters + ---------- + func : callable + User defined function. + args : tuple, optional + Extra arguments for the function. + vec_func : bool, optional + True if the function func takes vector arguments. + + Returns + ------- + vfunc : callable + A function that will take a vector argument and return the + result. + + """ + if vec_func: + def vfunc(x): + return func(x, *args) + else: + def vfunc(x): + if np.isscalar(x): + return func(x, *args) + x = np.asarray(x) + # call with first point to get output type + y0 = func(x[0], *args) + n = len(x) + dtype = getattr(y0, 'dtype', type(y0)) + output = np.empty((n,), dtype=dtype) + output[0] = y0 + for i in range(1, n): + output[i] = func(x[i], *args) + return output + return vfunc + + +@_deprecated("`scipy.integrate.quadrature` is deprecated as of SciPy 1.12.0" + "and will be removed in SciPy 1.15.0. Please use" + "`scipy.integrate.quad` instead.") +def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50, + vec_func=True, miniter=1): + """ + Compute a definite integral using fixed-tolerance Gaussian quadrature. + + .. deprecated:: 1.12.0 + + This function is deprecated as of SciPy 1.12.0 and will be removed + in SciPy 1.15.0. Please use `scipy.integrate.quad` instead. + + Integrate `func` from `a` to `b` using Gaussian quadrature + with absolute tolerance `tol`. + + Parameters + ---------- + func : function + A Python function or method to integrate. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + args : tuple, optional + Extra arguments to pass to function. + tol, rtol : float, optional + Iteration stops when error between last two iterates is less than + `tol` OR the relative change is less than `rtol`. + maxiter : int, optional + Maximum order of Gaussian quadrature. + vec_func : bool, optional + True or False if func handles arrays as arguments (is + a "vector" function). Default is True. + miniter : int, optional + Minimum order of Gaussian quadrature. + + Returns + ------- + val : float + Gaussian quadrature approximation (within tolerance) to integral. + err : float + Difference between last two estimates of the integral. + + See Also + -------- + fixed_quad : fixed-order Gaussian quadrature + quad : adaptive quadrature using QUADPACK + dblquad : double integrals + tplquad : triple integrals + romb : integrator for sampled data + simpson : integrator for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> f = lambda x: x**8 + >>> integrate.quadrature(f, 0.0, 1.0) + (0.11111111111111106, 4.163336342344337e-17) + >>> print(1/9.0) # analytical result + 0.1111111111111111 + + >>> integrate.quadrature(np.cos, 0.0, np.pi/2) + (0.9999999999999536, 3.9611425250996035e-11) + >>> np.sin(np.pi/2)-np.sin(0) # analytical result + 1.0 + + """ + if not isinstance(args, tuple): + args = (args,) + vfunc = vectorize1(func, args, vec_func=vec_func) + val = np.inf + err = np.inf + maxiter = max(miniter+1, maxiter) + for n in range(miniter, maxiter+1): + newval = fixed_quad(vfunc, a, b, (), n)[0] + err = abs(newval-val) + val = newval + + if err < tol or err < rtol*abs(val): + break + else: + warnings.warn( + "maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err), + AccuracyWarning, stacklevel=2 + ) + return val, err + + +def tupleset(t, i, value): + l = list(t) + l[i] = value + return tuple(l) + + +def cumulative_trapezoid(y, x=None, dx=1.0, axis=-1, initial=None): + """ + Cumulatively integrate y(x) using the composite trapezoidal rule. + + Parameters + ---------- + y : array_like + Values to integrate. + x : array_like, optional + The coordinate to integrate along. If None (default), use spacing `dx` + between consecutive elements in `y`. + dx : float, optional + Spacing between elements of `y`. Only used if `x` is None. + axis : int, optional + Specifies the axis to cumulate. Default is -1 (last axis). + initial : scalar, optional + If given, insert this value at the beginning of the returned result. + 0 or None are the only values accepted. Default is None, which means + `res` has one element less than `y` along the axis of integration. + + .. deprecated:: 1.12.0 + The option for non-zero inputs for `initial` will be deprecated in + SciPy 1.15.0. After this time, a ValueError will be raised if + `initial` is not None or 0. + + Returns + ------- + res : ndarray + The result of cumulative integration of `y` along `axis`. + If `initial` is None, the shape is such that the axis of integration + has one less value than `y`. If `initial` is given, the shape is equal + to that of `y`. + + See Also + -------- + numpy.cumsum, numpy.cumprod + cumulative_simpson : cumulative integration using Simpson's 1/3 rule + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> import matplotlib.pyplot as plt + + >>> x = np.linspace(-2, 2, num=20) + >>> y = x + >>> y_int = integrate.cumulative_trapezoid(y, x, initial=0) + >>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-') + >>> plt.show() + + """ + y = np.asarray(y) + if y.shape[axis] == 0: + raise ValueError("At least one point is required along `axis`.") + if x is None: + d = dx + else: + x = np.asarray(x) + if x.ndim == 1: + d = np.diff(x) + # reshape to correct shape + shape = [1] * y.ndim + shape[axis] = -1 + d = d.reshape(shape) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-D or the " + "same as y.") + else: + d = np.diff(x, axis=axis) + + if d.shape[axis] != y.shape[axis] - 1: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + + nd = len(y.shape) + slice1 = tupleset((slice(None),)*nd, axis, slice(1, None)) + slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1)) + res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis) + + if initial is not None: + if initial != 0: + warnings.warn( + "The option for values for `initial` other than None or 0 is " + "deprecated as of SciPy 1.12.0 and will raise a value error in" + " SciPy 1.15.0.", + DeprecationWarning, stacklevel=2 + ) + if not np.isscalar(initial): + raise ValueError("`initial` parameter should be a scalar.") + + shape = list(res.shape) + shape[axis] = 1 + res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res], + axis=axis) + + return res + + +def _basic_simpson(y, start, stop, x, dx, axis): + nd = len(y.shape) + if start is None: + start = 0 + step = 2 + slice_all = (slice(None),)*nd + slice0 = tupleset(slice_all, axis, slice(start, stop, step)) + slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step)) + + if x is None: # Even-spaced Simpson's rule. + result = np.sum(y[slice0] + 4.0*y[slice1] + y[slice2], axis=axis) + result *= dx / 3.0 + else: + # Account for possibly different spacings. + # Simpson's rule changes a bit. + h = np.diff(x, axis=axis) + sl0 = tupleset(slice_all, axis, slice(start, stop, step)) + sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step)) + h0 = h[sl0].astype(float, copy=False) + h1 = h[sl1].astype(float, copy=False) + hsum = h0 + h1 + hprod = h0 * h1 + h0divh1 = np.true_divide(h0, h1, out=np.zeros_like(h0), where=h1 != 0) + tmp = hsum/6.0 * (y[slice0] * + (2.0 - np.true_divide(1.0, h0divh1, + out=np.zeros_like(h0divh1), + where=h0divh1 != 0)) + + y[slice1] * (hsum * + np.true_divide(hsum, hprod, + out=np.zeros_like(hsum), + where=hprod != 0)) + + y[slice2] * (2.0 - h0divh1)) + result = np.sum(tmp, axis=axis) + return result + + +def simpson(y, *, x=None, dx=1.0, axis=-1): + """ + Integrate y(x) using samples along the given axis and the composite + Simpson's rule. If x is None, spacing of dx is assumed. + + If there are an even number of samples, N, then there are an odd + number of intervals (N-1), but Simpson's rule requires an even number + of intervals. The parameter 'even' controls how this is handled. + + Parameters + ---------- + y : array_like + Array to be integrated. + x : array_like, optional + If given, the points at which `y` is sampled. + dx : float, optional + Spacing of integration points along axis of `x`. Only used when + `x` is None. Default is 1. + axis : int, optional + Axis along which to integrate. Default is the last axis. + + Returns + ------- + float + The estimated integral computed with the composite Simpson's rule. + + See Also + -------- + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + romb : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + cumulative_simpson : cumulative integration using Simpson's 1/3 rule + + Notes + ----- + For an odd number of samples that are equally spaced the result is + exact if the function is a polynomial of order 3 or less. If + the samples are not equally spaced, then the result is exact only + if the function is a polynomial of order 2 or less. + + References + ---------- + .. [1] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with + MS Excel and Irregularly-spaced Data. Journal of Mathematical + Sciences and Mathematics Education. 12 (2): 1-9 + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> x = np.arange(0, 10) + >>> y = np.arange(0, 10) + + >>> integrate.simpson(y, x=x) + 40.5 + + >>> y = np.power(x, 3) + >>> integrate.simpson(y, x=x) + 1640.5 + >>> integrate.quad(lambda x: x**3, 0, 9)[0] + 1640.25 + + """ + y = np.asarray(y) + nd = len(y.shape) + N = y.shape[axis] + last_dx = dx + returnshape = 0 + if x is not None: + x = np.asarray(x) + if len(x.shape) == 1: + shapex = [1] * nd + shapex[axis] = x.shape[0] + saveshape = x.shape + returnshape = 1 + x = x.reshape(tuple(shapex)) + elif len(x.shape) != len(y.shape): + raise ValueError("If given, shape of x must be 1-D or the " + "same as y.") + if x.shape[axis] != N: + raise ValueError("If given, length of x along axis must be the " + "same as y.") + + if N % 2 == 0: + val = 0.0 + result = 0.0 + slice_all = (slice(None),) * nd + + if N == 2: + # need at least 3 points in integration axis to form parabolic + # segment. If there are two points then any of 'avg', 'first', + # 'last' should give the same result. + slice1 = tupleset(slice_all, axis, -1) + slice2 = tupleset(slice_all, axis, -2) + if x is not None: + last_dx = x[slice1] - x[slice2] + val += 0.5 * last_dx * (y[slice1] + y[slice2]) + else: + # use Simpson's rule on first intervals + result = _basic_simpson(y, 0, N-3, x, dx, axis) + + slice1 = tupleset(slice_all, axis, -1) + slice2 = tupleset(slice_all, axis, -2) + slice3 = tupleset(slice_all, axis, -3) + + h = np.asarray([dx, dx], dtype=np.float64) + if x is not None: + # grab the last two spacings from the appropriate axis + hm2 = tupleset(slice_all, axis, slice(-2, -1, 1)) + hm1 = tupleset(slice_all, axis, slice(-1, None, 1)) + + diffs = np.float64(np.diff(x, axis=axis)) + h = [np.squeeze(diffs[hm2], axis=axis), + np.squeeze(diffs[hm1], axis=axis)] + + # This is the correction for the last interval according to + # Cartwright. + # However, I used the equations given at + # https://en.wikipedia.org/wiki/Simpson%27s_rule#Composite_Simpson's_rule_for_irregularly_spaced_data + # A footnote on Wikipedia says: + # Cartwright 2017, Equation 8. The equation in Cartwright is + # calculating the first interval whereas the equations in the + # Wikipedia article are adjusting for the last integral. If the + # proper algebraic substitutions are made, the equation results in + # the values shown. + num = 2 * h[1] ** 2 + 3 * h[0] * h[1] + den = 6 * (h[1] + h[0]) + alpha = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + num = h[1] ** 2 + 3.0 * h[0] * h[1] + den = 6 * h[0] + beta = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + num = 1 * h[1] ** 3 + den = 6 * h[0] * (h[0] + h[1]) + eta = np.true_divide( + num, + den, + out=np.zeros_like(den), + where=den != 0 + ) + + result += alpha*y[slice1] + beta*y[slice2] - eta*y[slice3] + + result += val + else: + result = _basic_simpson(y, 0, N-2, x, dx, axis) + if returnshape: + x = x.reshape(saveshape) + return result + + +def _cumulatively_sum_simpson_integrals( + y: np.ndarray, + dx: np.ndarray, + integration_func: Callable[[np.ndarray, np.ndarray], np.ndarray], +) -> np.ndarray: + """Calculate cumulative sum of Simpson integrals. + Takes as input the integration function to be used. + The integration_func is assumed to return the cumulative sum using + composite Simpson's rule. Assumes the axis of summation is -1. + """ + sub_integrals_h1 = integration_func(y, dx) + sub_integrals_h2 = integration_func(y[..., ::-1], dx[..., ::-1])[..., ::-1] + + shape = list(sub_integrals_h1.shape) + shape[-1] += 1 + sub_integrals = np.empty(shape) + sub_integrals[..., :-1:2] = sub_integrals_h1[..., ::2] + sub_integrals[..., 1::2] = sub_integrals_h2[..., ::2] + # Integral over last subinterval can only be calculated from + # formula for h2 + sub_integrals[..., -1] = sub_integrals_h2[..., -1] + res = np.cumsum(sub_integrals, axis=-1) + return res + + +def _cumulative_simpson_equal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray: + """Calculate the Simpson integrals for all h1 intervals assuming equal interval + widths. The function can also be used to calculate the integral for all + h2 intervals by reversing the inputs, `y` and `dx`. + """ + d = dx[..., :-1] + f1 = y[..., :-2] + f2 = y[..., 1:-1] + f3 = y[..., 2:] + + # Calculate integral over the subintervals (eqn (10) of Reference [2]) + return d / 3 * (5 * f1 / 4 + 2 * f2 - f3 / 4) + + +def _cumulative_simpson_unequal_intervals(y: np.ndarray, dx: np.ndarray) -> np.ndarray: + """Calculate the Simpson integrals for all h1 intervals assuming unequal interval + widths. The function can also be used to calculate the integral for all + h2 intervals by reversing the inputs, `y` and `dx`. + """ + x21 = dx[..., :-1] + x32 = dx[..., 1:] + f1 = y[..., :-2] + f2 = y[..., 1:-1] + f3 = y[..., 2:] + + x31 = x21 + x32 + x21_x31 = x21/x31 + x21_x32 = x21/x32 + x21x21_x31x32 = x21_x31 * x21_x32 + + # Calculate integral over the subintervals (eqn (8) of Reference [2]) + coeff1 = 3 - x21_x31 + coeff2 = 3 + x21x21_x31x32 + x21_x31 + coeff3 = -x21x21_x31x32 + + return x21/6 * (coeff1*f1 + coeff2*f2 + coeff3*f3) + + +def _ensure_float_array(arr: npt.ArrayLike) -> np.ndarray: + arr = np.asarray(arr) + if np.issubdtype(arr.dtype, np.integer): + arr = arr.astype(float, copy=False) + return arr + + +def cumulative_simpson(y, *, x=None, dx=1.0, axis=-1, initial=None): + r""" + Cumulatively integrate y(x) using the composite Simpson's 1/3 rule. + The integral of the samples at every point is calculated by assuming a + quadratic relationship between each point and the two adjacent points. + + Parameters + ---------- + y : array_like + Values to integrate. Requires at least one point along `axis`. If two or fewer + points are provided along `axis`, Simpson's integration is not possible and the + result is calculated with `cumulative_trapezoid`. + x : array_like, optional + The coordinate to integrate along. Must have the same shape as `y` or + must be 1D with the same length as `y` along `axis`. `x` must also be + strictly increasing along `axis`. + If `x` is None (default), integration is performed using spacing `dx` + between consecutive elements in `y`. + dx : scalar or array_like, optional + Spacing between elements of `y`. Only used if `x` is None. Can either + be a float, or an array with the same shape as `y`, but of length one along + `axis`. Default is 1.0. + axis : int, optional + Specifies the axis to integrate along. Default is -1 (last axis). + initial : scalar or array_like, optional + If given, insert this value at the beginning of the returned result, + and add it to the rest of the result. Default is None, which means no + value at ``x[0]`` is returned and `res` has one element less than `y` + along the axis of integration. Can either be a float, or an array with + the same shape as `y`, but of length one along `axis`. + + Returns + ------- + res : ndarray + The result of cumulative integration of `y` along `axis`. + If `initial` is None, the shape is such that the axis of integration + has one less value than `y`. If `initial` is given, the shape is equal + to that of `y`. + + See Also + -------- + numpy.cumsum + cumulative_trapezoid : cumulative integration using the composite + trapezoidal rule + simpson : integrator for sampled data using the Composite Simpson's Rule + + Notes + ----- + + .. versionadded:: 1.12.0 + + The composite Simpson's 1/3 method can be used to approximate the definite + integral of a sampled input function :math:`y(x)` [1]_. The method assumes + a quadratic relationship over the interval containing any three consecutive + sampled points. + + Consider three consecutive points: + :math:`(x_1, y_1), (x_2, y_2), (x_3, y_3)`. + + Assuming a quadratic relationship over the three points, the integral over + the subinterval between :math:`x_1` and :math:`x_2` is given by formula + (8) of [2]_: + + .. math:: + \int_{x_1}^{x_2} y(x) dx\ &= \frac{x_2-x_1}{6}\left[\ + \left\{3-\frac{x_2-x_1}{x_3-x_1}\right\} y_1 + \ + \left\{3 + \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} + \ + \frac{x_2-x_1}{x_3-x_1}\right\} y_2\\ + - \frac{(x_2-x_1)^2}{(x_3-x_2)(x_3-x_1)} y_3\right] + + The integral between :math:`x_2` and :math:`x_3` is given by swapping + appearances of :math:`x_1` and :math:`x_3`. The integral is estimated + separately for each subinterval and then cumulatively summed to obtain + the final result. + + For samples that are equally spaced, the result is exact if the function + is a polynomial of order three or less [1]_ and the number of subintervals + is even. Otherwise, the integral is exact for polynomials of order two or + less. + + References + ---------- + .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Simpson's_rule + .. [2] Cartwright, Kenneth V. Simpson's Rule Cumulative Integration with + MS Excel and Irregularly-spaced Data. Journal of Mathematical + Sciences and Mathematics Education. 12 (2): 1-9 + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> x = np.linspace(-2, 2, num=20) + >>> y = x**2 + >>> y_int = integrate.cumulative_simpson(y, x=x, initial=0) + >>> fig, ax = plt.subplots() + >>> ax.plot(x, y_int, 'ro', x, x**3/3 - (x[0])**3/3, 'b-') + >>> ax.grid() + >>> plt.show() + + The output of `cumulative_simpson` is similar to that of iteratively + calling `simpson` with successively higher upper limits of integration, but + not identical. + + >>> def cumulative_simpson_reference(y, x): + ... return np.asarray([integrate.simpson(y[:i], x=x[:i]) + ... for i in range(2, len(y) + 1)]) + >>> + >>> rng = np.random.default_rng(354673834679465) + >>> x, y = rng.random(size=(2, 10)) + >>> x.sort() + >>> + >>> res = integrate.cumulative_simpson(y, x=x) + >>> ref = cumulative_simpson_reference(y, x) + >>> equal = np.abs(res - ref) < 1e-15 + >>> equal # not equal when `simpson` has even number of subintervals + array([False, True, False, True, False, True, False, True, True]) + + This is expected: because `cumulative_simpson` has access to more + information than `simpson`, it can typically produce more accurate + estimates of the underlying integral over subintervals. + + """ + y = _ensure_float_array(y) + + # validate `axis` and standardize to work along the last axis + original_y = y + original_shape = y.shape + try: + y = np.swapaxes(y, axis, -1) + except IndexError as e: + message = f"`axis={axis}` is not valid for `y` with `y.ndim={y.ndim}`." + raise ValueError(message) from e + if y.shape[-1] < 3: + res = cumulative_trapezoid(original_y, x, dx=dx, axis=axis, initial=None) + res = np.swapaxes(res, axis, -1) + + elif x is not None: + x = _ensure_float_array(x) + message = ("If given, shape of `x` must be the same as `y` or 1-D with " + "the same length as `y` along `axis`.") + if not (x.shape == original_shape + or (x.ndim == 1 and len(x) == original_shape[axis])): + raise ValueError(message) + + x = np.broadcast_to(x, y.shape) if x.ndim == 1 else np.swapaxes(x, axis, -1) + dx = np.diff(x, axis=-1) + if np.any(dx <= 0): + raise ValueError("Input x must be strictly increasing.") + res = _cumulatively_sum_simpson_integrals( + y, dx, _cumulative_simpson_unequal_intervals + ) + + else: + dx = _ensure_float_array(dx) + final_dx_shape = tupleset(original_shape, axis, original_shape[axis] - 1) + alt_input_dx_shape = tupleset(original_shape, axis, 1) + message = ("If provided, `dx` must either be a scalar or have the same " + "shape as `y` but with only 1 point along `axis`.") + if not (dx.ndim == 0 or dx.shape == alt_input_dx_shape): + raise ValueError(message) + dx = np.broadcast_to(dx, final_dx_shape) + dx = np.swapaxes(dx, axis, -1) + res = _cumulatively_sum_simpson_integrals( + y, dx, _cumulative_simpson_equal_intervals + ) + + if initial is not None: + initial = _ensure_float_array(initial) + alt_initial_input_shape = tupleset(original_shape, axis, 1) + message = ("If provided, `initial` must either be a scalar or have the " + "same shape as `y` but with only 1 point along `axis`.") + if not (initial.ndim == 0 or initial.shape == alt_initial_input_shape): + raise ValueError(message) + initial = np.broadcast_to(initial, alt_initial_input_shape) + initial = np.swapaxes(initial, axis, -1) + + res += initial + res = np.concatenate((initial, res), axis=-1) + + res = np.swapaxes(res, -1, axis) + return res + + +def romb(y, dx=1.0, axis=-1, show=False): + """ + Romberg integration using samples of a function. + + Parameters + ---------- + y : array_like + A vector of ``2**k + 1`` equally-spaced samples of a function. + dx : float, optional + The sample spacing. Default is 1. + axis : int, optional + The axis along which to integrate. Default is -1 (last axis). + show : bool, optional + When `y` is a single 1-D array, then if this argument is True + print the table showing Richardson extrapolation from the + samples. Default is False. + + Returns + ------- + romb : ndarray + The integrated result for `axis`. + + See Also + -------- + quad : adaptive quadrature using QUADPACK + fixed_quad : fixed-order Gaussian quadrature + dblquad : double integrals + tplquad : triple integrals + simpson : integrators for sampled data + cumulative_trapezoid : cumulative integration for sampled data + + Examples + -------- + >>> from scipy import integrate + >>> import numpy as np + >>> x = np.arange(10, 14.25, 0.25) + >>> y = np.arange(3, 12) + + >>> integrate.romb(y) + 56.0 + + >>> y = np.sin(np.power(x, 2.5)) + >>> integrate.romb(y) + -0.742561336672229 + + >>> integrate.romb(y, show=True) + Richardson Extrapolation Table for Romberg Integration + ====================================================== + -0.81576 + 4.63862 6.45674 + -1.10581 -3.02062 -3.65245 + -2.57379 -3.06311 -3.06595 -3.05664 + -1.34093 -0.92997 -0.78776 -0.75160 -0.74256 + ====================================================== + -0.742561336672229 # may vary + + """ + y = np.asarray(y) + nd = len(y.shape) + Nsamps = y.shape[axis] + Ninterv = Nsamps-1 + n = 1 + k = 0 + while n < Ninterv: + n <<= 1 + k += 1 + if n != Ninterv: + raise ValueError("Number of samples must be one plus a " + "non-negative power of 2.") + + R = {} + slice_all = (slice(None),) * nd + slice0 = tupleset(slice_all, axis, 0) + slicem1 = tupleset(slice_all, axis, -1) + h = Ninterv * np.asarray(dx, dtype=float) + R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h + slice_R = slice_all + start = stop = step = Ninterv + for i in range(1, k+1): + start >>= 1 + slice_R = tupleset(slice_R, axis, slice(start, stop, step)) + step >>= 1 + R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis)) + for j in range(1, i+1): + prev = R[(i, j-1)] + R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1) + h /= 2.0 + + if show: + if not np.isscalar(R[(0, 0)]): + print("*** Printing table only supported for integrals" + + " of a single data set.") + else: + try: + precis = show[0] + except (TypeError, IndexError): + precis = 5 + try: + width = show[1] + except (TypeError, IndexError): + width = 8 + formstr = "%%%d.%df" % (width, precis) + + title = "Richardson Extrapolation Table for Romberg Integration" + print(title, "=" * len(title), sep="\n", end="\n") + for i in range(k+1): + for j in range(i+1): + print(formstr % R[(i, j)], end=" ") + print() + print("=" * len(title)) + + return R[(k, k)] + +# Romberg quadratures for numeric integration. +# +# Written by Scott M. Ransom +# last revision: 14 Nov 98 +# +# Cosmetic changes by Konrad Hinsen +# last revision: 1999-7-21 +# +# Adapted to SciPy by Travis Oliphant +# last revision: Dec 2001 + + +def _difftrap(function, interval, numtraps): + """ + Perform part of the trapezoidal rule to integrate a function. + Assume that we had called difftrap with all lower powers-of-2 + starting with 1. Calling difftrap only returns the summation + of the new ordinates. It does _not_ multiply by the width + of the trapezoids. This must be performed by the caller. + 'function' is the function to evaluate (must accept vector arguments). + 'interval' is a sequence with lower and upper limits + of integration. + 'numtraps' is the number of trapezoids to use (must be a + power-of-2). + """ + if numtraps <= 0: + raise ValueError("numtraps must be > 0 in difftrap().") + elif numtraps == 1: + return 0.5*(function(interval[0])+function(interval[1])) + else: + numtosum = numtraps/2 + h = float(interval[1]-interval[0])/numtosum + lox = interval[0] + 0.5 * h + points = lox + h * np.arange(numtosum) + s = np.sum(function(points), axis=0) + return s + + +def _romberg_diff(b, c, k): + """ + Compute the differences for the Romberg quadrature corrections. + See Forman Acton's "Real Computing Made Real," p 143. + """ + tmp = 4.0**k + return (tmp * c - b)/(tmp - 1.0) + + +def _printresmat(function, interval, resmat): + # Print the Romberg result matrix. + i = j = 0 + print('Romberg integration of', repr(function), end=' ') + print('from', interval) + print('') + print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results')) + for i in range(len(resmat)): + print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ') + for j in range(i+1): + print('%9f' % (resmat[i][j]), end=' ') + print('') + print('') + print('The final result is', resmat[i][j], end=' ') + print('after', 2**(len(resmat)-1)+1, 'function evaluations.') + + +@_deprecated("`scipy.integrate.romberg` is deprecated as of SciPy 1.12.0" + "and will be removed in SciPy 1.15.0. Please use" + "`scipy.integrate.quad` instead.") +def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False, + divmax=10, vec_func=False): + """ + Romberg integration of a callable function or method. + + .. deprecated:: 1.12.0 + + This function is deprecated as of SciPy 1.12.0 and will be removed + in SciPy 1.15.0. Please use `scipy.integrate.quad` instead. + + Returns the integral of `function` (a function of one variable) + over the interval (`a`, `b`). + + If `show` is 1, the triangular array of the intermediate results + will be printed. If `vec_func` is True (default is False), then + `function` is assumed to support vector arguments. + + Parameters + ---------- + function : callable + Function to be integrated. + a : float + Lower limit of integration. + b : float + Upper limit of integration. + + Returns + ------- + results : float + Result of the integration. + + Other Parameters + ---------------- + args : tuple, optional + Extra arguments to pass to function. Each element of `args` will + be passed as a single argument to `func`. Default is to pass no + extra arguments. + tol, rtol : float, optional + The desired absolute and relative tolerances. Defaults are 1.48e-8. + show : bool, optional + Whether to print the results. Default is False. + divmax : int, optional + Maximum order of extrapolation. Default is 10. + vec_func : bool, optional + Whether `func` handles arrays as arguments (i.e., whether it is a + "vector" function). Default is False. + + See Also + -------- + fixed_quad : Fixed-order Gaussian quadrature. + quad : Adaptive quadrature using QUADPACK. + dblquad : Double integrals. + tplquad : Triple integrals. + romb : Integrators for sampled data. + simpson : Integrators for sampled data. + cumulative_trapezoid : Cumulative integration for sampled data. + + References + ---------- + .. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method + + Examples + -------- + Integrate a gaussian from 0 to 1 and compare to the error function. + + >>> from scipy import integrate + >>> from scipy.special import erf + >>> import numpy as np + >>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2) + >>> result = integrate.romberg(gaussian, 0, 1, show=True) + Romberg integration of from [0, 1] + + :: + + Steps StepSize Results + 1 1.000000 0.385872 + 2 0.500000 0.412631 0.421551 + 4 0.250000 0.419184 0.421368 0.421356 + 8 0.125000 0.420810 0.421352 0.421350 0.421350 + 16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350 + 32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350 + + The final result is 0.421350396475 after 33 function evaluations. + + >>> print("%g %g" % (2*result, erf(1))) + 0.842701 0.842701 + + """ + if np.isinf(a) or np.isinf(b): + raise ValueError("Romberg integration only available " + "for finite limits.") + vfunc = vectorize1(function, args, vec_func=vec_func) + n = 1 + interval = [a, b] + intrange = b - a + ordsum = _difftrap(vfunc, interval, n) + result = intrange * ordsum + resmat = [[result]] + err = np.inf + last_row = resmat[0] + for i in range(1, divmax+1): + n *= 2 + ordsum += _difftrap(vfunc, interval, n) + row = [intrange * ordsum / n] + for k in range(i): + row.append(_romberg_diff(last_row[k], row[k], k+1)) + result = row[i] + lastresult = last_row[i-1] + if show: + resmat.append(row) + err = abs(result - lastresult) + if err < tol or err < rtol * abs(result): + break + last_row = row + else: + warnings.warn( + "divmax (%d) exceeded. Latest difference = %e" % (divmax, err), + AccuracyWarning, stacklevel=2) + + if show: + _printresmat(vfunc, interval, resmat) + return result + + +# Coefficients for Newton-Cotes quadrature +# +# These are the points being used +# to construct the local interpolating polynomial +# a are the weights for Newton-Cotes integration +# B is the error coefficient. +# error in these coefficients grows as N gets larger. +# or as samples are closer and closer together + +# You can use maxima to find these rational coefficients +# for equally spaced data using the commands +# a(i,N) := (integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) +# / ((N-i)! * i!) * (-1)^(N-i)); +# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N)); +# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N)); +# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N)); +# +# pre-computed for equally-spaced weights +# +# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N] +# +# a = num_a*array(int_a)/den_a +# B = num_B*1.0 / den_B +# +# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*) +# where k = N // 2 +# +_builtincoeffs = { + 1: (1,2,[1,1],-1,12), + 2: (1,3,[1,4,1],-1,90), + 3: (3,8,[1,3,3,1],-3,80), + 4: (2,45,[7,32,12,32,7],-8,945), + 5: (5,288,[19,75,50,50,75,19],-275,12096), + 6: (1,140,[41,216,27,272,27,216,41],-9,1400), + 7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400), + 8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989], + -2368,467775), + 9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080, + 15741,2857], -4671, 394240), + 10: (5,299376,[16067,106300,-48525,272400,-260550,427368, + -260550,272400,-48525,106300,16067], + -673175, 163459296), + 11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542, + 15493566,15493566,-9595542,25226685,-3237113, + 13486539,2171465], -2224234463, 237758976000), + 12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295, + 87516288,-87797136,87516288,-51491295,35725120, + -7587864,9903168,1364651], -3012, 875875), + 13: (13, 402361344000,[8181904909, 56280729661, -31268252574, + 156074417954,-151659573325,206683437987, + -43111992612,-43111992612,206683437987, + -151659573325,156074417954,-31268252574, + 56280729661,8181904909], -2639651053, + 344881152000), + 14: (7, 2501928000, [90241897,710986864,-770720657,3501442784, + -6625093363,12630121616,-16802270373,19534438464, + -16802270373,12630121616,-6625093363,3501442784, + -770720657,710986864,90241897], -3740727473, + 1275983280000) + } + + +def newton_cotes(rn, equal=0): + r""" + Return weights and error coefficient for Newton-Cotes integration. + + Suppose we have (N+1) samples of f at the positions + x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the + integral between x_0 and x_N is: + + :math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i) + + B_N (\Delta x)^{N+2} f^{N+1} (\xi)` + + where :math:`\xi \in [x_0,x_N]` + and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing. + + If the samples are equally-spaced and N is even, then the error + term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`. + + Parameters + ---------- + rn : int + The integer order for equally-spaced data or the relative positions of + the samples with the first sample at 0 and the last at N, where N+1 is + the length of `rn`. N is the order of the Newton-Cotes integration. + equal : int, optional + Set to 1 to enforce equally spaced data. + + Returns + ------- + an : ndarray + 1-D array of weights to apply to the function at the provided sample + positions. + B : float + Error coefficient. + + Notes + ----- + Normally, the Newton-Cotes rules are used on smaller integration + regions and a composite rule is used to return the total integral. + + Examples + -------- + Compute the integral of sin(x) in [0, :math:`\pi`]: + + >>> from scipy.integrate import newton_cotes + >>> import numpy as np + >>> def f(x): + ... return np.sin(x) + >>> a = 0 + >>> b = np.pi + >>> exact = 2 + >>> for N in [2, 4, 6, 8, 10]: + ... x = np.linspace(a, b, N + 1) + ... an, B = newton_cotes(N, 1) + ... dx = (b - a) / N + ... quad = dx * np.sum(an * f(x)) + ... error = abs(quad - exact) + ... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error)) + ... + 2 2.094395102 9.43951e-02 + 4 1.998570732 1.42927e-03 + 6 2.000017814 1.78136e-05 + 8 1.999999835 1.64725e-07 + 10 2.000000001 1.14677e-09 + + """ + try: + N = len(rn)-1 + if equal: + rn = np.arange(N+1) + elif np.all(np.diff(rn) == 1): + equal = 1 + except Exception: + N = rn + rn = np.arange(N+1) + equal = 1 + + if equal and N in _builtincoeffs: + na, da, vi, nb, db = _builtincoeffs[N] + an = na * np.array(vi, dtype=float) / da + return an, float(nb)/db + + if (rn[0] != 0) or (rn[-1] != N): + raise ValueError("The sample positions must start at 0" + " and end at N") + yi = rn / float(N) + ti = 2 * yi - 1 + nvec = np.arange(N+1) + C = ti ** nvec[:, np.newaxis] + Cinv = np.linalg.inv(C) + # improve precision of result + for i in range(2): + Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv) + vec = 2.0 / (nvec[::2]+1) + ai = Cinv[:, ::2].dot(vec) * (N / 2.) + + if (N % 2 == 0) and equal: + BN = N/(N+3.) + power = N+2 + else: + BN = N/(N+2.) + power = N+1 + + BN = BN - np.dot(yi**power, ai) + p1 = power+1 + fac = power*math.log(N) - gammaln(p1) + fac = math.exp(fac) + return ai, BN*fac + + +def _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log): + + # lazy import to avoid issues with partially-initialized submodule + if not hasattr(qmc_quad, 'qmc'): + from scipy import stats + qmc_quad.stats = stats + else: + stats = qmc_quad.stats + + if not callable(func): + message = "`func` must be callable." + raise TypeError(message) + + # a, b will be modified, so copy. Oh well if it's copied twice. + a = np.atleast_1d(a).copy() + b = np.atleast_1d(b).copy() + a, b = np.broadcast_arrays(a, b) + dim = a.shape[0] + + try: + func((a + b) / 2) + except Exception as e: + message = ("`func` must evaluate the integrand at points within " + "the integration range; e.g. `func( (a + b) / 2)` " + "must return the integrand at the centroid of the " + "integration volume.") + raise ValueError(message) from e + + try: + func(np.array([a, b]).T) + vfunc = func + except Exception as e: + message = ("Exception encountered when attempting vectorized call to " + f"`func`: {e}. For better performance, `func` should " + "accept two-dimensional array `x` with shape `(len(a), " + "n_points)` and return an array of the integrand value at " + "each of the `n_points.") + warnings.warn(message, stacklevel=3) + + def vfunc(x): + return np.apply_along_axis(func, axis=-1, arr=x) + + n_points_int = np.int64(n_points) + if n_points != n_points_int: + message = "`n_points` must be an integer." + raise TypeError(message) + + n_estimates_int = np.int64(n_estimates) + if n_estimates != n_estimates_int: + message = "`n_estimates` must be an integer." + raise TypeError(message) + + if qrng is None: + qrng = stats.qmc.Halton(dim) + elif not isinstance(qrng, stats.qmc.QMCEngine): + message = "`qrng` must be an instance of scipy.stats.qmc.QMCEngine." + raise TypeError(message) + + if qrng.d != a.shape[0]: + message = ("`qrng` must be initialized with dimensionality equal to " + "the number of variables in `a`, i.e., " + "`qrng.random().shape[-1]` must equal `a.shape[0]`.") + raise ValueError(message) + + rng_seed = getattr(qrng, 'rng_seed', None) + rng = stats._qmc.check_random_state(rng_seed) + + if log not in {True, False}: + message = "`log` must be boolean (`True` or `False`)." + raise TypeError(message) + + return (vfunc, a, b, n_points_int, n_estimates_int, qrng, rng, log, stats) + + +QMCQuadResult = namedtuple('QMCQuadResult', ['integral', 'standard_error']) + + +def qmc_quad(func, a, b, *, n_estimates=8, n_points=1024, qrng=None, + log=False): + """ + Compute an integral in N-dimensions using Quasi-Monte Carlo quadrature. + + Parameters + ---------- + func : callable + The integrand. Must accept a single argument ``x``, an array which + specifies the point(s) at which to evaluate the scalar-valued + integrand, and return the value(s) of the integrand. + For efficiency, the function should be vectorized to accept an array of + shape ``(d, n_points)``, where ``d`` is the number of variables (i.e. + the dimensionality of the function domain) and `n_points` is the number + of quadrature points, and return an array of shape ``(n_points,)``, + the integrand at each quadrature point. + a, b : array-like + One-dimensional arrays specifying the lower and upper integration + limits, respectively, of each of the ``d`` variables. + n_estimates, n_points : int, optional + `n_estimates` (default: 8) statistically independent QMC samples, each + of `n_points` (default: 1024) points, will be generated by `qrng`. + The total number of points at which the integrand `func` will be + evaluated is ``n_points * n_estimates``. See Notes for details. + qrng : `~scipy.stats.qmc.QMCEngine`, optional + An instance of the QMCEngine from which to sample QMC points. + The QMCEngine must be initialized to a number of dimensions ``d`` + corresponding with the number of variables ``x1, ..., xd`` passed to + `func`. + The provided QMCEngine is used to produce the first integral estimate. + If `n_estimates` is greater than one, additional QMCEngines are + spawned from the first (with scrambling enabled, if it is an option.) + If a QMCEngine is not provided, the default `scipy.stats.qmc.Halton` + will be initialized with the number of dimensions determine from + the length of `a`. + log : boolean, default: False + When set to True, `func` returns the log of the integrand, and + the result object contains the log of the integral. + + Returns + ------- + result : object + A result object with attributes: + + integral : float + The estimate of the integral. + standard_error : + The error estimate. See Notes for interpretation. + + Notes + ----- + Values of the integrand at each of the `n_points` points of a QMC sample + are used to produce an estimate of the integral. This estimate is drawn + from a population of possible estimates of the integral, the value of + which we obtain depends on the particular points at which the integral + was evaluated. We perform this process `n_estimates` times, each time + evaluating the integrand at different scrambled QMC points, effectively + drawing i.i.d. random samples from the population of integral estimates. + The sample mean :math:`m` of these integral estimates is an + unbiased estimator of the true value of the integral, and the standard + error of the mean :math:`s` of these estimates may be used to generate + confidence intervals using the t distribution with ``n_estimates - 1`` + degrees of freedom. Perhaps counter-intuitively, increasing `n_points` + while keeping the total number of function evaluation points + ``n_points * n_estimates`` fixed tends to reduce the actual error, whereas + increasing `n_estimates` tends to decrease the error estimate. + + Examples + -------- + QMC quadrature is particularly useful for computing integrals in higher + dimensions. An example integrand is the probability density function + of a multivariate normal distribution. + + >>> import numpy as np + >>> from scipy import stats + >>> dim = 8 + >>> mean = np.zeros(dim) + >>> cov = np.eye(dim) + >>> def func(x): + ... # `multivariate_normal` expects the _last_ axis to correspond with + ... # the dimensionality of the space, so `x` must be transposed + ... return stats.multivariate_normal.pdf(x.T, mean, cov) + + To compute the integral over the unit hypercube: + + >>> from scipy.integrate import qmc_quad + >>> a = np.zeros(dim) + >>> b = np.ones(dim) + >>> rng = np.random.default_rng() + >>> qrng = stats.qmc.Halton(d=dim, seed=rng) + >>> n_estimates = 8 + >>> res = qmc_quad(func, a, b, n_estimates=n_estimates, qrng=qrng) + >>> res.integral, res.standard_error + (0.00018429555666024108, 1.0389431116001344e-07) + + A two-sided, 99% confidence interval for the integral may be estimated + as: + + >>> t = stats.t(df=n_estimates-1, loc=res.integral, + ... scale=res.standard_error) + >>> t.interval(0.99) + (0.0001839319802536469, 0.00018465913306683527) + + Indeed, the value reported by `scipy.stats.multivariate_normal` is + within this range. + + >>> stats.multivariate_normal.cdf(b, mean, cov, lower_limit=a) + 0.00018430867675187443 + + """ + args = _qmc_quad_iv(func, a, b, n_points, n_estimates, qrng, log) + func, a, b, n_points, n_estimates, qrng, rng, log, stats = args + + def sum_product(integrands, dA, log=False): + if log: + return logsumexp(integrands) + np.log(dA) + else: + return np.sum(integrands * dA) + + def mean(estimates, log=False): + if log: + return logsumexp(estimates) - np.log(n_estimates) + else: + return np.mean(estimates) + + def std(estimates, m=None, ddof=0, log=False): + m = m or mean(estimates, log) + if log: + estimates, m = np.broadcast_arrays(estimates, m) + temp = np.vstack((estimates, m + np.pi * 1j)) + diff = logsumexp(temp, axis=0) + return np.real(0.5 * (logsumexp(2 * diff) + - np.log(n_estimates - ddof))) + else: + return np.std(estimates, ddof=ddof) + + def sem(estimates, m=None, s=None, log=False): + m = m or mean(estimates, log) + s = s or std(estimates, m, ddof=1, log=log) + if log: + return s - 0.5*np.log(n_estimates) + else: + return s / np.sqrt(n_estimates) + + # The sign of the integral depends on the order of the limits. Fix this by + # ensuring that lower bounds are indeed lower and setting sign of resulting + # integral manually + if np.any(a == b): + message = ("A lower limit was equal to an upper limit, so the value " + "of the integral is zero by definition.") + warnings.warn(message, stacklevel=2) + return QMCQuadResult(-np.inf if log else 0, 0) + + i_swap = b < a + sign = (-1)**(i_swap.sum(axis=-1)) # odd # of swaps -> negative + a[i_swap], b[i_swap] = b[i_swap], a[i_swap] + + A = np.prod(b - a) + dA = A / n_points + + estimates = np.zeros(n_estimates) + rngs = _rng_spawn(qrng.rng, n_estimates) + for i in range(n_estimates): + # Generate integral estimate + sample = qrng.random(n_points) + # The rationale for transposing is that this allows users to easily + # unpack `x` into separate variables, if desired. This is consistent + # with the `xx` array passed into the `scipy.integrate.nquad` `func`. + x = stats.qmc.scale(sample, a, b).T # (n_dim, n_points) + integrands = func(x) + estimates[i] = sum_product(integrands, dA, log) + + # Get a new, independently-scrambled QRNG for next time + qrng = type(qrng)(seed=rngs[i], **qrng._init_quad) + + integral = mean(estimates, log) + standard_error = sem(estimates, m=integral, log=log) + integral = integral + np.pi*1j if (log and sign < 0) else integral*sign + return QMCQuadResult(integral, standard_error) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py b/parrot/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py new file mode 100644 index 0000000000000000000000000000000000000000..28f17cc65bc561f389640805969de3eb7c36a784 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/_tanhsinh.py @@ -0,0 +1,1231 @@ +# mypy: disable-error-code="attr-defined" +import numpy as np +from scipy import special +import scipy._lib._elementwise_iterative_method as eim +from scipy._lib._util import _RichResult + +# todo: +# figure out warning situation +# address https://github.com/scipy/scipy/pull/18650#discussion_r1233032521 +# without `minweight`, we are also suppressing infinities within the interval. +# Is that OK? If so, we can probably get rid of `status=3`. +# Add heuristic to stop when improvement is too slow / antithrashing +# support singularities? interval subdivision? this feature will be added +# eventually, but do we adjust the interface now? +# When doing log-integration, should the tolerances control the error of the +# log-integral or the error of the integral? The trouble is that `log` +# inherently looses some precision so it may not be possible to refine +# the integral further. Example: 7th moment of stats.f(15, 20) +# respect function evaluation limit? +# make public? + + +def _tanhsinh(f, a, b, *, args=(), log=False, maxfun=None, maxlevel=None, + minlevel=2, atol=None, rtol=None, preserve_shape=False, + callback=None): + """Evaluate a convergent integral numerically using tanh-sinh quadrature. + + In practice, tanh-sinh quadrature achieves quadratic convergence for + many integrands: the number of accurate *digits* scales roughly linearly + with the number of function evaluations [1]_. + + Either or both of the limits of integration may be infinite, and + singularities at the endpoints are acceptable. Divergent integrals and + integrands with non-finite derivatives or singularities within an interval + are out of scope, but the latter may be evaluated be calling `_tanhsinh` on + each sub-interval separately. + + Parameters + ---------- + f : callable + The function to be integrated. The signature must be:: + func(x: ndarray, *fargs) -> ndarray + where each element of ``x`` is a finite real and ``fargs`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. ``func`` must be an elementwise-scalar function; see + documentation of parameter `preserve_shape` for details. + If ``func`` returns a value with complex dtype when evaluated at + either endpoint, subsequent arguments ``x`` will have complex dtype + (but zero imaginary part). + a, b : array_like + Real lower and upper limits of integration. Must be broadcastable. + Elements may be infinite. + args : tuple, optional + Additional positional arguments to be passed to `func`. Must be arrays + broadcastable with `a` and `b`. If the callable to be integrated + requires arguments that are not broadcastable with `a` and `b`, wrap + that callable with `f`. See Examples. + log : bool, default: False + Setting to True indicates that `f` returns the log of the integrand + and that `atol` and `rtol` are expressed as the logs of the absolute + and relative errors. In this case, the result object will contain the + log of the integral and error. This is useful for integrands for which + numerical underflow or overflow would lead to inaccuracies. + When ``log=True``, the integrand (the exponential of `f`) must be real, + but it may be negative, in which case the log of the integrand is a + complex number with an imaginary part that is an odd multiple of π. + maxlevel : int, default: 10 + The maximum refinement level of the algorithm. + + At the zeroth level, `f` is called once, performing 16 function + evaluations. At each subsequent level, `f` is called once more, + approximately doubling the number of function evaluations that have + been performed. Accordingly, for many integrands, each successive level + will double the number of accurate digits in the result (up to the + limits of floating point precision). + + The algorithm will terminate after completing level `maxlevel` or after + another termination condition is satisfied, whichever comes first. + minlevel : int, default: 2 + The level at which to begin iteration (default: 2). This does not + change the total number of function evaluations or the abscissae at + which the function is evaluated; it changes only the *number of times* + `f` is called. If ``minlevel=k``, then the integrand is evaluated at + all abscissae from levels ``0`` through ``k`` in a single call. + Note that if `minlevel` exceeds `maxlevel`, the provided `minlevel` is + ignored, and `minlevel` is set equal to `maxlevel`. + atol, rtol : float, optional + Absolute termination tolerance (default: 0) and relative termination + tolerance (default: ``eps**0.75``, where ``eps`` is the precision of + the result dtype), respectively. The error estimate is as + described in [1]_ Section 5. While not theoretically rigorous or + conservative, it is said to work well in practice. Must be non-negative + and finite if `log` is False, and must be expressed as the log of a + non-negative and finite number if `log` is True. + preserve_shape : bool, default: False + In the following, "arguments of `f`" refers to the array ``x`` and + any arrays within ``fargs``. Let ``shape`` be the broadcasted shape + of `a`, `b`, and all elements of `args` (which is conceptually + distinct from ``fargs`` passed into `f`). + + - When ``preserve_shape=False`` (default), `f` must accept arguments + of *any* broadcastable shapes. + + - When ``preserve_shape=True``, `f` must accept arguments of shape + ``shape`` *or* ``shape + (n,)``, where ``(n,)`` is the number of + abscissae at which the function is being evaluated. + + In either case, for each scalar element ``xi`` within `x`, the array + returned by `f` must include the scalar ``f(xi)`` at the same index. + Consequently, the shape of the output is always the shape of the input + ``x``. + + See Examples. + + callback : callable, optional + An optional user-supplied function to be called before the first + iteration and after each iteration. + Called as ``callback(res)``, where ``res`` is a ``_RichResult`` + similar to that returned by `_differentiate` (but containing the + current iterate's values of all variables). If `callback` raises a + ``StopIteration``, the algorithm will terminate immediately and + `_tanhsinh` will return a result object. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + arrays of the same shape.) + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : (unused) + ``-2`` : The maximum number of iterations was reached. + ``-3`` : A non-finite value was encountered. + ``-4`` : Iteration was terminated by `callback`. + ``1`` : The algorithm is proceeding normally (in `callback` only). + integral : float + An estimate of the integral + error : float + An estimate of the error. Only available if level two or higher + has been completed; otherwise NaN. + maxlevel : int + The maximum refinement level used. + nfev : int + The number of points at which `func` was evaluated. + + See Also + -------- + quad, quadrature + + Notes + ----- + Implements the algorithm as described in [1]_ with minor adaptations for + finite-precision arithmetic, including some described by [2]_ and [3]_. The + tanh-sinh scheme was originally introduced in [4]_. + + Due to floating-point error in the abscissae, the function may be evaluated + at the endpoints of the interval during iterations. The values returned by + the function at the endpoints will be ignored. + + References + ---------- + [1] Bailey, David H., Karthik Jeyabalan, and Xiaoye S. Li. "A comparison of + three high-precision quadrature schemes." Experimental Mathematics 14.3 + (2005): 317-329. + [2] Vanherck, Joren, Bart Sorée, and Wim Magnus. "Tanh-sinh quadrature for + single and multiple integration using floating-point arithmetic." + arXiv preprint arXiv:2007.15057 (2020). + [3] van Engelen, Robert A. "Improving the Double Exponential Quadrature + Tanh-Sinh, Sinh-Sinh and Exp-Sinh Formulas." + https://www.genivia.com/files/qthsh.pdf + [4] Takahasi, Hidetosi, and Masatake Mori. "Double exponential formulas for + numerical integration." Publications of the Research Institute for + Mathematical Sciences 9.3 (1974): 721-741. + + Example + ------- + Evaluate the Gaussian integral: + + >>> import numpy as np + >>> from scipy.integrate._tanhsinh import _tanhsinh + >>> def f(x): + ... return np.exp(-x**2) + >>> res = _tanhsinh(f, -np.inf, np.inf) + >>> res.integral # true value is np.sqrt(np.pi), 1.7724538509055159 + 1.7724538509055159 + >>> res.error # actual error is 0 + 4.0007963937534104e-16 + + The value of the Gaussian function (bell curve) is nearly zero for + arguments sufficiently far from zero, so the value of the integral + over a finite interval is nearly the same. + + >>> _tanhsinh(f, -20, 20).integral + 1.772453850905518 + + However, with unfavorable integration limits, the integration scheme + may not be able to find the important region. + + >>> _tanhsinh(f, -np.inf, 1000).integral + 4.500490856620352 + + In such cases, or when there are singularities within the interval, + break the integral into parts with endpoints at the important points. + + >>> _tanhsinh(f, -np.inf, 0).integral + _tanhsinh(f, 0, 1000).integral + 1.772453850905404 + + For integration involving very large or very small magnitudes, use + log-integration. (For illustrative purposes, the following example shows a + case in which both regular and log-integration work, but for more extreme + limits of integration, log-integration would avoid the underflow + experienced when evaluating the integral normally.) + + >>> res = _tanhsinh(f, 20, 30, rtol=1e-10) + >>> res.integral, res.error + 4.7819613911309014e-176, 4.670364401645202e-187 + >>> def log_f(x): + ... return -x**2 + >>> np.exp(res.integral), np.exp(res.error) + 4.7819613911306924e-176, 4.670364401645093e-187 + + The limits of integration and elements of `args` may be broadcastable + arrays, and integration is performed elementwise. + + >>> from scipy import stats + >>> dist = stats.gausshyper(13.8, 3.12, 2.51, 5.18) + >>> a, b = dist.support() + >>> x = np.linspace(a, b, 100) + >>> res = _tanhsinh(dist.pdf, a, x) + >>> ref = dist.cdf(x) + >>> np.allclose(res.integral, ref) + + By default, `preserve_shape` is False, and therefore the callable + `f` may be called with arrays of any broadcastable shapes. + For example: + + >>> shapes = [] + >>> def f(x, c): + ... shape = np.broadcast_shapes(x.shape, c.shape) + ... shapes.append(shape) + ... return np.sin(c*x) + >>> + >>> c = [1, 10, 30, 100] + >>> res = _tanhsinh(f, 0, 1, args=(c,), minlevel=1) + >>> shapes + [(4,), (4, 66), (3, 64), (2, 128), (1, 256)] + + To understand where these shapes are coming from - and to better + understand how `_tanhsinh` computes accurate results - note that + higher values of ``c`` correspond with higher frequency sinusoids. + The higher frequency sinusoids make the integrand more complicated, + so more function evaluations are required to achieve the target + accuracy: + + >>> res.nfev + array([ 67, 131, 259, 515]) + + The initial ``shape``, ``(4,)``, corresponds with evaluating the + integrand at a single abscissa and all four frequencies; this is used + for input validation and to determine the size and dtype of the arrays + that store results. The next shape corresponds with evaluating the + integrand at an initial grid of abscissae and all four frequencies. + Successive calls to the function double the total number of abscissae at + which the function has been evaluated. However, in later function + evaluations, the integrand is evaluated at fewer frequencies because + the corresponding integral has already converged to the required + tolerance. This saves function evaluations to improve performance, but + it requires the function to accept arguments of any shape. + + "Vector-valued" integrands, such as those written for use with + `scipy.integrate.quad_vec`, are unlikely to satisfy this requirement. + For example, consider + + >>> def f(x): + ... return [x, np.sin(10*x), np.cos(30*x), x*np.sin(100*x)**2] + + This integrand is not compatible with `_tanhsinh` as written; for instance, + the shape of the output will not be the same as the shape of ``x``. Such a + function *could* be converted to a compatible form with the introduction of + additional parameters, but this would be inconvenient. In such cases, + a simpler solution would be to use `preserve_shape`. + + >>> shapes = [] + >>> def f(x): + ... shapes.append(x.shape) + ... x0, x1, x2, x3 = x + ... return [x0, np.sin(10*x1), np.cos(30*x2), x3*np.sin(100*x3)] + >>> + >>> a = np.zeros(4) + >>> res = _tanhsinh(f, a, 1, preserve_shape=True) + >>> shapes + [(4,), (4, 66), (4, 64), (4, 128), (4, 256)] + + Here, the broadcasted shape of `a` and `b` is ``(4,)``. With + ``preserve_shape=True``, the function may be called with argument + ``x`` of shape ``(4,)`` or ``(4, n)``, and this is what we observe. + + """ + (f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback) = _tanhsinh_iv( + f, a, b, log, maxfun, maxlevel, minlevel, atol, + rtol, args, preserve_shape, callback) + + # Initialization + # `eim._initialize` does several important jobs, including + # ensuring that limits, each of the `args`, and the output of `f` + # broadcast correctly and are of consistent types. To save a function + # evaluation, I pass the midpoint of the integration interval. This comes + # at a cost of some gymnastics to ensure that the midpoint has the right + # shape and dtype. Did you know that 0d and >0d arrays follow different + # type promotion rules? + with np.errstate(over='ignore', invalid='ignore', divide='ignore'): + c = ((a.ravel() + b.ravel())/2).reshape(a.shape) + inf_a, inf_b = np.isinf(a), np.isinf(b) + c[inf_a] = b[inf_a] - 1 # takes care of infinite a + c[inf_b] = a[inf_b] + 1 # takes care of infinite b + c[inf_a & inf_b] = 0 # takes care of infinite a and b + temp = eim._initialize(f, (c,), args, complex_ok=True, + preserve_shape=preserve_shape) + f, xs, fs, args, shape, dtype, xp = temp + a = np.broadcast_to(a, shape).astype(dtype).ravel() + b = np.broadcast_to(b, shape).astype(dtype).ravel() + + # Transform improper integrals + a, b, a0, negative, abinf, ainf, binf = _transform_integrals(a, b) + + # Define variables we'll need + nit, nfev = 0, 1 # one function evaluation performed above + zero = -np.inf if log else 0 + pi = dtype.type(np.pi) + maxiter = maxlevel - minlevel + 1 + eps = np.finfo(dtype).eps + if rtol is None: + rtol = 0.75*np.log(eps) if log else eps**0.75 + + Sn = np.full(shape, zero, dtype=dtype).ravel() # latest integral estimate + Sn[np.isnan(a) | np.isnan(b) | np.isnan(fs[0])] = np.nan + Sk = np.empty_like(Sn).reshape(-1, 1)[:, 0:0] # all integral estimates + aerr = np.full(shape, np.nan, dtype=dtype).ravel() # absolute error + status = np.full(shape, eim._EINPROGRESS, dtype=int).ravel() + h0 = np.real(_get_base_step(dtype=dtype)) # base step + + # For term `d4` of error estimate ([1] Section 5), we need to keep the + # most extreme abscissae and corresponding `fj`s, `wj`s in Euler-Maclaurin + # sum. Here, we initialize these variables. + xr0 = np.full(shape, -np.inf, dtype=dtype).ravel() + fr0 = np.full(shape, np.nan, dtype=dtype).ravel() + wr0 = np.zeros(shape, dtype=dtype).ravel() + xl0 = np.full(shape, np.inf, dtype=dtype).ravel() + fl0 = np.full(shape, np.nan, dtype=dtype).ravel() + wl0 = np.zeros(shape, dtype=dtype).ravel() + d4 = np.zeros(shape, dtype=dtype).ravel() + + work = _RichResult( + Sn=Sn, Sk=Sk, aerr=aerr, h=h0, log=log, dtype=dtype, pi=pi, eps=eps, + a=a.reshape(-1, 1), b=b.reshape(-1, 1), # integration limits + n=minlevel, nit=nit, nfev=nfev, status=status, # iter/eval counts + xr0=xr0, fr0=fr0, wr0=wr0, xl0=xl0, fl0=fl0, wl0=wl0, d4=d4, # err est + ainf=ainf, binf=binf, abinf=abinf, a0=a0.reshape(-1, 1)) # transforms + # Constant scalars don't need to be put in `work` unless they need to be + # passed outside `tanhsinh`. Examples: atol, rtol, h0, minlevel. + + # Correspondence between terms in the `work` object and the result + res_work_pairs = [('status', 'status'), ('integral', 'Sn'), + ('error', 'aerr'), ('nit', 'nit'), ('nfev', 'nfev')] + + def pre_func_eval(work): + # Determine abscissae at which to evaluate `f` + work.h = h0 / 2**work.n + xjc, wj = _get_pairs(work.n, h0, dtype=work.dtype, + inclusive=(work.n == minlevel)) + work.xj, work.wj = _transform_to_limits(xjc, wj, work.a, work.b) + + # Perform abscissae substitutions for infinite limits of integration + xj = work.xj.copy() + xj[work.abinf] = xj[work.abinf] / (1 - xj[work.abinf]**2) + xj[work.binf] = 1/xj[work.binf] - 1 + work.a0[work.binf] + xj[work.ainf] *= -1 + return xj + + def post_func_eval(x, fj, work): + # Weight integrand as required by substitutions for infinite limits + if work.log: + fj[work.abinf] += (np.log(1 + work.xj[work.abinf] ** 2) + - 2*np.log(1 - work.xj[work.abinf] ** 2)) + fj[work.binf] -= 2 * np.log(work.xj[work.binf]) + else: + fj[work.abinf] *= ((1 + work.xj[work.abinf]**2) / + (1 - work.xj[work.abinf]**2)**2) + fj[work.binf] *= work.xj[work.binf]**-2. + + # Estimate integral with Euler-Maclaurin Sum + fjwj, Sn = _euler_maclaurin_sum(fj, work) + if work.Sk.shape[-1]: + Snm1 = work.Sk[:, -1] + Sn = (special.logsumexp([Snm1 - np.log(2), Sn], axis=0) if log + else Snm1 / 2 + Sn) + + work.fjwj = fjwj + work.Sn = Sn + + def check_termination(work): + """Terminate due to convergence or encountering non-finite values""" + stop = np.zeros(work.Sn.shape, dtype=bool) + + # Terminate before first iteration if integration limits are equal + if work.nit == 0: + i = (work.a == work.b).ravel() # ravel singleton dimension + zero = -np.inf if log else 0 + work.Sn[i] = zero + work.aerr[i] = zero + work.status[i] = eim._ECONVERGED + stop[i] = True + else: + # Terminate if convergence criterion is met + work.rerr, work.aerr = _estimate_error(work) + i = ((work.rerr < rtol) | (work.rerr + np.real(work.Sn) < atol) if log + else (work.rerr < rtol) | (work.rerr * abs(work.Sn) < atol)) + work.status[i] = eim._ECONVERGED + stop[i] = True + + # Terminate if integral estimate becomes invalid + if log: + i = (np.isposinf(np.real(work.Sn)) | np.isnan(work.Sn)) & ~stop + else: + i = ~np.isfinite(work.Sn) & ~stop + work.status[i] = eim._EVALUEERR + stop[i] = True + + return stop + + def post_termination_check(work): + work.n += 1 + work.Sk = np.concatenate((work.Sk, work.Sn[:, np.newaxis]), axis=-1) + return + + def customize_result(res, shape): + # If the integration limits were such that b < a, we reversed them + # to perform the calculation, and the final result needs to be negated. + if log and np.any(negative): + pi = res['integral'].dtype.type(np.pi) + j = np.complex64(1j) # minimum complex type + res['integral'] = res['integral'] + negative*pi*j + else: + res['integral'][negative] *= -1 + + # For this algorithm, it seems more appropriate to report the maximum + # level rather than the number of iterations in which it was performed. + res['maxlevel'] = minlevel + res['nit'] - 1 + res['maxlevel'][res['nit'] == 0] = -1 + del res['nit'] + return shape + + # Suppress all warnings initially, since there are many places in the code + # for which this is expected behavior. + with np.errstate(over='ignore', invalid='ignore', divide='ignore'): + res = eim._loop(work, callback, shape, maxiter, f, args, dtype, pre_func_eval, + post_func_eval, check_termination, post_termination_check, + customize_result, res_work_pairs, xp, preserve_shape) + return res + + +def _get_base_step(dtype=np.float64): + # Compute the base step length for the provided dtype. Theoretically, the + # Euler-Maclaurin sum is infinite, but it gets cut off when either the + # weights underflow or the abscissae cannot be distinguished from the + # limits of integration. The latter happens to occur first for float32 and + # float64, and it occurs when `xjc` (the abscissa complement) + # in `_compute_pair` underflows. We can solve for the argument `tmax` at + # which it will underflow using [2] Eq. 13. + fmin = 4*np.finfo(dtype).tiny # stay a little away from the limit + tmax = np.arcsinh(np.log(2/fmin - 1) / np.pi) + + # Based on this, we can choose a base step size `h` for level 0. + # The number of function evaluations will be `2 + m*2^(k+1)`, where `k` is + # the level and `m` is an integer we get to choose. I choose + # m = _N_BASE_STEPS = `8` somewhat arbitrarily, but a rationale is that a + # power of 2 makes floating point arithmetic more predictable. It also + # results in a base step size close to `1`, which is what [1] uses (and I + # used here until I found [2] and these ideas settled). + h0 = tmax / _N_BASE_STEPS + return h0.astype(dtype) + + +_N_BASE_STEPS = 8 + + +def _compute_pair(k, h0): + # Compute the abscissa-weight pairs for each level k. See [1] page 9. + + # For now, we compute and store in 64-bit precision. If higher-precision + # data types become better supported, it would be good to compute these + # using the highest precision available. Or, once there is an Array API- + # compatible arbitrary precision array, we can compute at the required + # precision. + + # "....each level k of abscissa-weight pairs uses h = 2 **-k" + # We adapt to floating point arithmetic using ideas of [2]. + h = h0 / 2**k + max = _N_BASE_STEPS * 2**k + + # For iterations after the first, "....the integrand function needs to be + # evaluated only at the odd-indexed abscissas at each level." + j = np.arange(max+1) if k == 0 else np.arange(1, max+1, 2) + jh = j * h + + # "In this case... the weights wj = u1/cosh(u2)^2, where..." + pi_2 = np.pi / 2 + u1 = pi_2*np.cosh(jh) + u2 = pi_2*np.sinh(jh) + # Denominators get big here. Overflow then underflow doesn't need warning. + # with np.errstate(under='ignore', over='ignore'): + wj = u1 / np.cosh(u2)**2 + # "We actually store 1-xj = 1/(...)." + xjc = 1 / (np.exp(u2) * np.cosh(u2)) # complement of xj = np.tanh(u2) + + # When level k == 0, the zeroth xj corresponds with xj = 0. To simplify + # code, the function will be evaluated there twice; each gets half weight. + wj[0] = wj[0] / 2 if k == 0 else wj[0] + + return xjc, wj # store at full precision + + +def _pair_cache(k, h0): + # Cache the abscissa-weight pairs up to a specified level. + # Abscissae and weights of consecutive levels are concatenated. + # `index` records the indices that correspond with each level: + # `xjc[index[k]:index[k+1]` extracts the level `k` abscissae. + if h0 != _pair_cache.h0: + _pair_cache.xjc = np.empty(0) + _pair_cache.wj = np.empty(0) + _pair_cache.indices = [0] + + xjcs = [_pair_cache.xjc] + wjs = [_pair_cache.wj] + + for i in range(len(_pair_cache.indices)-1, k + 1): + xjc, wj = _compute_pair(i, h0) + xjcs.append(xjc) + wjs.append(wj) + _pair_cache.indices.append(_pair_cache.indices[-1] + len(xjc)) + + _pair_cache.xjc = np.concatenate(xjcs) + _pair_cache.wj = np.concatenate(wjs) + _pair_cache.h0 = h0 + +_pair_cache.xjc = np.empty(0) +_pair_cache.wj = np.empty(0) +_pair_cache.indices = [0] +_pair_cache.h0 = None + + +def _get_pairs(k, h0, inclusive=False, dtype=np.float64): + # Retrieve the specified abscissa-weight pairs from the cache + # If `inclusive`, return all up to and including the specified level + if len(_pair_cache.indices) <= k+2 or h0 != _pair_cache.h0: + _pair_cache(k, h0) + + xjc = _pair_cache.xjc + wj = _pair_cache.wj + indices = _pair_cache.indices + + start = 0 if inclusive else indices[k] + end = indices[k+1] + + return xjc[start:end].astype(dtype), wj[start:end].astype(dtype) + + +def _transform_to_limits(xjc, wj, a, b): + # Transform integral according to user-specified limits. This is just + # math that follows from the fact that the standard limits are (-1, 1). + # Note: If we had stored xj instead of xjc, we would have + # xj = alpha * xj + beta, where beta = (a + b)/2 + alpha = (b - a) / 2 + xj = np.concatenate((-alpha * xjc + b, alpha * xjc + a), axis=-1) + wj = wj*alpha # arguments get broadcasted, so we can't use *= + wj = np.concatenate((wj, wj), axis=-1) + + # Points at the boundaries can be generated due to finite precision + # arithmetic, but these function values aren't supposed to be included in + # the Euler-Maclaurin sum. Ideally we wouldn't evaluate the function at + # these points; however, we can't easily filter out points since this + # function is vectorized. Instead, zero the weights. + invalid = (xj <= a) | (xj >= b) + wj[invalid] = 0 + return xj, wj + + +def _euler_maclaurin_sum(fj, work): + # Perform the Euler-Maclaurin Sum, [1] Section 4 + + # The error estimate needs to know the magnitude of the last term + # omitted from the Euler-Maclaurin sum. This is a bit involved because + # it may have been computed at a previous level. I sure hope it's worth + # all the trouble. + xr0, fr0, wr0 = work.xr0, work.fr0, work.wr0 + xl0, fl0, wl0 = work.xl0, work.fl0, work.wl0 + + # It is much more convenient to work with the transposes of our work + # variables here. + xj, fj, wj = work.xj.T, fj.T, work.wj.T + n_x, n_active = xj.shape # number of abscissae, number of active elements + + # We'll work with the left and right sides separately + xr, xl = xj.reshape(2, n_x // 2, n_active).copy() # this gets modified + fr, fl = fj.reshape(2, n_x // 2, n_active) + wr, wl = wj.reshape(2, n_x // 2, n_active) + + invalid_r = ~np.isfinite(fr) | (wr == 0) + invalid_l = ~np.isfinite(fl) | (wl == 0) + + # integer index of the maximum abscissa at this level + xr[invalid_r] = -np.inf + ir = np.argmax(xr, axis=0, keepdims=True) + # abscissa, function value, and weight at this index + xr_max = np.take_along_axis(xr, ir, axis=0)[0] + fr_max = np.take_along_axis(fr, ir, axis=0)[0] + wr_max = np.take_along_axis(wr, ir, axis=0)[0] + # boolean indices at which maximum abscissa at this level exceeds + # the incumbent maximum abscissa (from all previous levels) + j = xr_max > xr0 + # Update record of the incumbent abscissa, function value, and weight + xr0[j] = xr_max[j] + fr0[j] = fr_max[j] + wr0[j] = wr_max[j] + + # integer index of the minimum abscissa at this level + xl[invalid_l] = np.inf + il = np.argmin(xl, axis=0, keepdims=True) + # abscissa, function value, and weight at this index + xl_min = np.take_along_axis(xl, il, axis=0)[0] + fl_min = np.take_along_axis(fl, il, axis=0)[0] + wl_min = np.take_along_axis(wl, il, axis=0)[0] + # boolean indices at which minimum abscissa at this level is less than + # the incumbent minimum abscissa (from all previous levels) + j = xl_min < xl0 + # Update record of the incumbent abscissa, function value, and weight + xl0[j] = xl_min[j] + fl0[j] = fl_min[j] + wl0[j] = wl_min[j] + fj = fj.T + + # Compute the error estimate `d4` - the magnitude of the leftmost or + # rightmost term, whichever is greater. + flwl0 = fl0 + np.log(wl0) if work.log else fl0 * wl0 # leftmost term + frwr0 = fr0 + np.log(wr0) if work.log else fr0 * wr0 # rightmost term + magnitude = np.real if work.log else np.abs + work.d4 = np.maximum(magnitude(flwl0), magnitude(frwr0)) + + # There are two approaches to dealing with function values that are + # numerically infinite due to approaching a singularity - zero them, or + # replace them with the function value at the nearest non-infinite point. + # [3] pg. 22 suggests the latter, so let's do that given that we have the + # information. + fr0b = np.broadcast_to(fr0[np.newaxis, :], fr.shape) + fl0b = np.broadcast_to(fl0[np.newaxis, :], fl.shape) + fr[invalid_r] = fr0b[invalid_r] + fl[invalid_l] = fl0b[invalid_l] + + # When wj is zero, log emits a warning + # with np.errstate(divide='ignore'): + fjwj = fj + np.log(work.wj) if work.log else fj * work.wj + + # update integral estimate + Sn = (special.logsumexp(fjwj + np.log(work.h), axis=-1) if work.log + else np.sum(fjwj, axis=-1) * work.h) + + work.xr0, work.fr0, work.wr0 = xr0, fr0, wr0 + work.xl0, work.fl0, work.wl0 = xl0, fl0, wl0 + + return fjwj, Sn + + +def _estimate_error(work): + # Estimate the error according to [1] Section 5 + + if work.n == 0 or work.nit == 0: + # The paper says to use "one" as the error before it can be calculated. + # NaN seems to be more appropriate. + nan = np.full_like(work.Sn, np.nan) + return nan, nan + + indices = _pair_cache.indices + + n_active = len(work.Sn) # number of active elements + axis_kwargs = dict(axis=-1, keepdims=True) + + # With a jump start (starting at level higher than 0), we haven't + # explicitly calculated the integral estimate at lower levels. But we have + # all the function value-weight products, so we can compute the + # lower-level estimates. + if work.Sk.shape[-1] == 0: + h = 2 * work.h # step size at this level + n_x = indices[work.n] # number of abscissa up to this level + # The right and left fjwj terms from all levels are concatenated along + # the last axis. Get out only the terms up to this level. + fjwj_rl = work.fjwj.reshape(n_active, 2, -1) + fjwj = fjwj_rl[:, :, :n_x].reshape(n_active, 2*n_x) + # Compute the Euler-Maclaurin sum at this level + Snm1 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log + else np.sum(fjwj, **axis_kwargs) * h) + work.Sk = np.concatenate((Snm1, work.Sk), axis=-1) + + if work.n == 1: + nan = np.full_like(work.Sn, np.nan) + return nan, nan + + # The paper says not to calculate the error for n<=2, but it's not clear + # about whether it starts at level 0 or level 1. We start at level 0, so + # why not compute the error beginning in level 2? + if work.Sk.shape[-1] < 2: + h = 4 * work.h # step size at this level + n_x = indices[work.n-1] # number of abscissa up to this level + # The right and left fjwj terms from all levels are concatenated along + # the last axis. Get out only the terms up to this level. + fjwj_rl = work.fjwj.reshape(len(work.Sn), 2, -1) + fjwj = fjwj_rl[..., :n_x].reshape(n_active, 2*n_x) + # Compute the Euler-Maclaurin sum at this level + Snm2 = (special.logsumexp(fjwj, **axis_kwargs) + np.log(h) if work.log + else np.sum(fjwj, **axis_kwargs) * h) + work.Sk = np.concatenate((Snm2, work.Sk), axis=-1) + + Snm2 = work.Sk[..., -2] + Snm1 = work.Sk[..., -1] + + e1 = work.eps + + if work.log: + log_e1 = np.log(e1) + # Currently, only real integrals are supported in log-scale. All + # complex values have imaginary part in increments of pi*j, which just + # carries sign information of the original integral, so use of + # `np.real` here is equivalent to absolute value in real scale. + d1 = np.real(special.logsumexp([work.Sn, Snm1 + work.pi*1j], axis=0)) + d2 = np.real(special.logsumexp([work.Sn, Snm2 + work.pi*1j], axis=0)) + d3 = log_e1 + np.max(np.real(work.fjwj), axis=-1) + d4 = work.d4 + aerr = np.max([d1 ** 2 / d2, 2 * d1, d3, d4], axis=0) + rerr = np.maximum(log_e1, aerr - np.real(work.Sn)) + else: + # Note: explicit computation of log10 of each of these is unnecessary. + d1 = np.abs(work.Sn - Snm1) + d2 = np.abs(work.Sn - Snm2) + d3 = e1 * np.max(np.abs(work.fjwj), axis=-1) + d4 = work.d4 + # If `d1` is 0, no need to warn. This does the right thing. + # with np.errstate(divide='ignore'): + aerr = np.max([d1**(np.log(d1)/np.log(d2)), d1**2, d3, d4], axis=0) + rerr = np.maximum(e1, aerr/np.abs(work.Sn)) + return rerr, aerr.reshape(work.Sn.shape) + + +def _transform_integrals(a, b): + # Transform integrals to a form with finite a < b + # For b < a, we reverse the limits and will multiply the final result by -1 + # For infinite limit on the right, we use the substitution x = 1/t - 1 + a + # For infinite limit on the left, we substitute x = -x and treat as above + # For infinite limits, we substitute x = t / (1-t**2) + + negative = b < a + a[negative], b[negative] = b[negative], a[negative] + + abinf = np.isinf(a) & np.isinf(b) + a[abinf], b[abinf] = -1, 1 + + ainf = np.isinf(a) + a[ainf], b[ainf] = -b[ainf], -a[ainf] + + binf = np.isinf(b) + a0 = a.copy() + a[binf], b[binf] = 0, 1 + + return a, b, a0, negative, abinf, ainf, binf + + +def _tanhsinh_iv(f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback): + # Input validation and standardization + + message = '`f` must be callable.' + if not callable(f): + raise ValueError(message) + + message = 'All elements of `a` and `b` must be real numbers.' + a, b = np.broadcast_arrays(a, b) + if np.any(np.iscomplex(a)) or np.any(np.iscomplex(b)): + raise ValueError(message) + + message = '`log` must be True or False.' + if log not in {True, False}: + raise ValueError(message) + log = bool(log) + + if atol is None: + atol = -np.inf if log else 0 + + rtol_temp = rtol if rtol is not None else 0. + + params = np.asarray([atol, rtol_temp, 0.]) + message = "`atol` and `rtol` must be real numbers." + if not np.issubdtype(params.dtype, np.floating): + raise ValueError(message) + + if log: + message = '`atol` and `rtol` may not be positive infinity.' + if np.any(np.isposinf(params)): + raise ValueError(message) + else: + message = '`atol` and `rtol` must be non-negative and finite.' + if np.any(params < 0) or np.any(np.isinf(params)): + raise ValueError(message) + atol = params[0] + rtol = rtol if rtol is None else params[1] + + BIGINT = float(2**62) + if maxfun is None and maxlevel is None: + maxlevel = 10 + + maxfun = BIGINT if maxfun is None else maxfun + maxlevel = BIGINT if maxlevel is None else maxlevel + + message = '`maxfun`, `maxlevel`, and `minlevel` must be integers.' + params = np.asarray([maxfun, maxlevel, minlevel]) + if not (np.issubdtype(params.dtype, np.number) + and np.all(np.isreal(params)) + and np.all(params.astype(np.int64) == params)): + raise ValueError(message) + message = '`maxfun`, `maxlevel`, and `minlevel` must be non-negative.' + if np.any(params < 0): + raise ValueError(message) + maxfun, maxlevel, minlevel = params.astype(np.int64) + minlevel = min(minlevel, maxlevel) + + if not np.iterable(args): + args = (args,) + + message = '`preserve_shape` must be True or False.' + if preserve_shape not in {True, False}: + raise ValueError(message) + + if callback is not None and not callable(callback): + raise ValueError('`callback` must be callable.') + + return (f, a, b, log, maxfun, maxlevel, minlevel, + atol, rtol, args, preserve_shape, callback) + + +def _logsumexp(x, axis=0): + # logsumexp raises with empty array + x = np.asarray(x) + shape = list(x.shape) + if shape[axis] == 0: + shape.pop(axis) + return np.full(shape, fill_value=-np.inf, dtype=x.dtype) + else: + return special.logsumexp(x, axis=axis) + + +def _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol): + # Input validation and standardization + + message = '`f` must be callable.' + if not callable(f): + raise ValueError(message) + + message = 'All elements of `a`, `b`, and `step` must be real numbers.' + a, b, step = np.broadcast_arrays(a, b, step) + dtype = np.result_type(a.dtype, b.dtype, step.dtype) + if not np.issubdtype(dtype, np.number) or np.issubdtype(dtype, np.complexfloating): + raise ValueError(message) + + valid_a = np.isfinite(a) + valid_b = b >= a # NaNs will be False + valid_step = np.isfinite(step) & (step > 0) + valid_abstep = valid_a & valid_b & valid_step + + message = '`log` must be True or False.' + if log not in {True, False}: + raise ValueError(message) + + if atol is None: + atol = -np.inf if log else 0 + + rtol_temp = rtol if rtol is not None else 0. + + params = np.asarray([atol, rtol_temp, 0.]) + message = "`atol` and `rtol` must be real numbers." + if not np.issubdtype(params.dtype, np.floating): + raise ValueError(message) + + if log: + message = '`atol`, `rtol` may not be positive infinity or NaN.' + if np.any(np.isposinf(params) | np.isnan(params)): + raise ValueError(message) + else: + message = '`atol`, and `rtol` must be non-negative and finite.' + if np.any((params < 0) | (~np.isfinite(params))): + raise ValueError(message) + atol = params[0] + rtol = rtol if rtol is None else params[1] + + maxterms_int = int(maxterms) + if maxterms_int != maxterms or maxterms < 0: + message = "`maxterms` must be a non-negative integer." + raise ValueError(message) + + if not np.iterable(args): + args = (args,) + + return f, a, b, step, valid_abstep, args, log, maxterms_int, atol, rtol + + +def _nsum(f, a, b, step=1, args=(), log=False, maxterms=int(2**20), atol=None, + rtol=None): + r"""Evaluate a convergent sum. + + For finite `b`, this evaluates:: + + f(a + np.arange(n)*step).sum() + + where ``n = int((b - a) / step) + 1``. If `f` is smooth, positive, and + monotone decreasing, `b` may be infinite, in which case the infinite sum + is approximated using integration. + + Parameters + ---------- + f : callable + The function that evaluates terms to be summed. The signature must be:: + + f(x: ndarray, *args) -> ndarray + + where each element of ``x`` is a finite real and ``args`` is a tuple, + which may contain an arbitrary number of arrays that are broadcastable + with `x`. `f` must represent a smooth, positive, and monotone decreasing + function of `x`; `_nsum` performs no checks to verify that these conditions + are met and may return erroneous results if they are violated. + a, b : array_like + Real lower and upper limits of summed terms. Must be broadcastable. + Each element of `a` must be finite and less than the corresponding + element in `b`, but elements of `b` may be infinite. + step : array_like + Finite, positive, real step between summed terms. Must be broadcastable + with `a` and `b`. + args : tuple, optional + Additional positional arguments to be passed to `f`. Must be arrays + broadcastable with `a`, `b`, and `step`. If the callable to be summed + requires arguments that are not broadcastable with `a`, `b`, and `step`, + wrap that callable with `f`. See Examples. + log : bool, default: False + Setting to True indicates that `f` returns the log of the terms + and that `atol` and `rtol` are expressed as the logs of the absolute + and relative errors. In this case, the result object will contain the + log of the sum and error. This is useful for summands for which + numerical underflow or overflow would lead to inaccuracies. + maxterms : int, default: 2**32 + The maximum number of terms to evaluate when summing directly. + Additional function evaluations may be performed for input + validation and integral evaluation. + atol, rtol : float, optional + Absolute termination tolerance (default: 0) and relative termination + tolerance (default: ``eps**0.5``, where ``eps`` is the precision of + the result dtype), respectively. Must be non-negative + and finite if `log` is False, and must be expressed as the log of a + non-negative and finite number if `log` is True. + + Returns + ------- + res : _RichResult + An instance of `scipy._lib._util._RichResult` with the following + attributes. (The descriptions are written as though the values will be + scalars; however, if `func` returns an array, the outputs will be + + arrays of the same shape.) + success : bool + ``True`` when the algorithm terminated successfully (status ``0``). + status : int + An integer representing the exit status of the algorithm. + ``0`` : The algorithm converged to the specified tolerances. + ``-1`` : Element(s) of `a`, `b`, or `step` are invalid + ``-2`` : Numerical integration reached its iteration limit; the sum may be divergent. + ``-3`` : A non-finite value was encountered. + sum : float + An estimate of the sum. + error : float + An estimate of the absolute error, assuming all terms are non-negative. + nfev : int + The number of points at which `func` was evaluated. + + See Also + -------- + tanhsinh + + Notes + ----- + The method implemented for infinite summation is related to the integral + test for convergence of an infinite series: assuming `step` size 1 for + simplicity of exposition, the sum of a monotone decreasing function is bounded by + + .. math:: + + \int_u^\infty f(x) dx \leq \sum_{k=u}^\infty f(k) \leq \int_u^\infty f(x) dx + f(u) + + Let :math:`a` represent `a`, :math:`n` represent `maxterms`, :math:`\epsilon_a` + represent `atol`, and :math:`\epsilon_r` represent `rtol`. + The implementation first evaluates the integral :math:`S_l=\int_a^\infty f(x) dx` + as a lower bound of the infinite sum. Then, it seeks a value :math:`c > a` such + that :math:`f(c) < \epsilon_a + S_l \epsilon_r`, if it exists; otherwise, + let :math:`c = a + n`. Then the infinite sum is approximated as + + .. math:: + + \sum_{k=a}^{c-1} f(k) + \int_c^\infty f(x) dx + f(c)/2 + + and the reported error is :math:`f(c)/2` plus the error estimate of + numerical integration. The approach described above is generalized for non-unit + `step` and finite `b` that is too large for direct evaluation of the sum, + i.e. ``b - a + 1 > maxterms``. + + References + ---------- + [1] Wikipedia. "Integral test for convergence." + https://en.wikipedia.org/wiki/Integral_test_for_convergence + + Examples + -------- + Compute the infinite sum of the reciprocals of squared integers. + + >>> import numpy as np + >>> from scipy.integrate._tanhsinh import _nsum + >>> res = _nsum(lambda k: 1/k**2, 1, np.inf, maxterms=1e3) + >>> ref = np.pi**2/6 # true value + >>> res.error # estimated error + 4.990014980029223e-07 + >>> (res.sum - ref)/ref # true error + -1.0101760641302586e-10 + >>> res.nfev # number of points at which callable was evaluated + 1142 + + Compute the infinite sums of the reciprocals of integers raised to powers ``p``. + + >>> from scipy import special + >>> p = np.arange(2, 10) + >>> res = _nsum(lambda k, p: 1/k**p, 1, np.inf, maxterms=1e3, args=(p,)) + >>> ref = special.zeta(p, 1) + >>> np.allclose(res.sum, ref) + True + + """ # noqa: E501 + # Potential future work: + # - more careful testing of when `b` is slightly less than `a` plus an + # integer multiple of step (needed before this is public) + # - improve error estimate of `_direct` sum + # - add other methods for convergence acceleration (Richardson, epsilon) + # - support infinite lower limit? + # - support negative monotone increasing functions? + # - b < a / negative step? + # - complex-valued function? + # - check for violations of monotonicity? + + # Function-specific input validation / standardization + tmp = _nsum_iv(f, a, b, step, args, log, maxterms, atol, rtol) + f, a, b, step, valid_abstep, args, log, maxterms, atol, rtol = tmp + + # Additional elementwise algorithm input validation / standardization + tmp = eim._initialize(f, (a,), args, complex_ok=False) + f, xs, fs, args, shape, dtype, xp = tmp + + # Finish preparing `a`, `b`, and `step` arrays + a = xs[0] + b = np.broadcast_to(b, shape).ravel().astype(dtype) + step = np.broadcast_to(step, shape).ravel().astype(dtype) + valid_abstep = np.broadcast_to(valid_abstep, shape).ravel() + nterms = np.floor((b - a) / step) + b = a + nterms*step + + # Define constants + eps = np.finfo(dtype).eps + zero = np.asarray(-np.inf if log else 0, dtype=dtype)[()] + if rtol is None: + rtol = 0.5*np.log(eps) if log else eps**0.5 + constants = (dtype, log, eps, zero, rtol, atol, maxterms) + + # Prepare result arrays + S = np.empty_like(a) + E = np.empty_like(a) + status = np.zeros(len(a), dtype=int) + nfev = np.ones(len(a), dtype=int) # one function evaluation above + + # Branch for direct sum evaluation / integral approximation / invalid input + i1 = (nterms + 1 <= maxterms) & valid_abstep + i2 = (nterms + 1 > maxterms) & valid_abstep + i3 = ~valid_abstep + + if np.any(i1): + args_direct = [arg[i1] for arg in args] + tmp = _direct(f, a[i1], b[i1], step[i1], args_direct, constants) + S[i1], E[i1] = tmp[:-1] + nfev[i1] += tmp[-1] + status[i1] = -3 * (~np.isfinite(S[i1])) + + if np.any(i2): + args_indirect = [arg[i2] for arg in args] + tmp = _integral_bound(f, a[i2], b[i2], step[i2], args_indirect, constants) + S[i2], E[i2], status[i2] = tmp[:-1] + nfev[i2] += tmp[-1] + + if np.any(i3): + S[i3], E[i3] = np.nan, np.nan + status[i3] = -1 + + # Return results + S, E = S.reshape(shape)[()], E.reshape(shape)[()] + status, nfev = status.reshape(shape)[()], nfev.reshape(shape)[()] + return _RichResult(sum=S, error=E, status=status, success=status == 0, + nfev=nfev) + + +def _direct(f, a, b, step, args, constants, inclusive=True): + # Directly evaluate the sum. + + # When used in the context of distributions, `args` would contain the + # distribution parameters. We have broadcasted for simplicity, but we could + # reduce function evaluations when distribution parameters are the same but + # sum limits differ. Roughly: + # - compute the function at all points between min(a) and max(b), + # - compute the cumulative sum, + # - take the difference between elements of the cumulative sum + # corresponding with b and a. + # This is left to future enhancement + + dtype, log, eps, zero, _, _, _ = constants + + # To allow computation in a single vectorized call, find the maximum number + # of points (over all slices) at which the function needs to be evaluated. + # Note: if `inclusive` is `True`, then we want `1` more term in the sum. + # I didn't think it was great style to use `True` as `1` in Python, so I + # explicitly converted it to an `int` before using it. + inclusive_adjustment = int(inclusive) + steps = np.round((b - a) / step) + inclusive_adjustment + # Equivalently, steps = np.round((b - a) / step) + inclusive + max_steps = int(np.max(steps)) + + # In each slice, the function will be evaluated at the same number of points, + # but excessive points (those beyond the right sum limit `b`) are replaced + # with NaN to (potentially) reduce the time of these unnecessary calculations. + # Use a new last axis for these calculations for consistency with other + # elementwise algorithms. + a2, b2, step2 = a[:, np.newaxis], b[:, np.newaxis], step[:, np.newaxis] + args2 = [arg[:, np.newaxis] for arg in args] + ks = a2 + np.arange(max_steps, dtype=dtype) * step2 + i_nan = ks >= (b2 + inclusive_adjustment*step2/2) + ks[i_nan] = np.nan + fs = f(ks, *args2) + + # The function evaluated at NaN is NaN, and NaNs are zeroed in the sum. + # In some cases it may be faster to loop over slices than to vectorize + # like this. This is an optimization that can be added later. + fs[i_nan] = zero + nfev = max_steps - i_nan.sum(axis=-1) + S = _logsumexp(fs, axis=-1) if log else np.sum(fs, axis=-1) + # Rough, non-conservative error estimate. See gh-19667 for improvement ideas. + E = np.real(S) + np.log(eps) if log else eps * abs(S) + return S, E, nfev + + +def _integral_bound(f, a, b, step, args, constants): + # Estimate the sum with integral approximation + dtype, log, _, _, rtol, atol, maxterms = constants + log2 = np.log(2, dtype=dtype) + + # Get a lower bound on the sum and compute effective absolute tolerance + lb = _tanhsinh(f, a, b, args=args, atol=atol, rtol=rtol, log=log) + tol = np.broadcast_to(atol, lb.integral.shape) + tol = _logsumexp((tol, rtol + lb.integral)) if log else tol + rtol*lb.integral + i_skip = lb.status < 0 # avoid unnecessary f_evals if integral is divergent + tol[i_skip] = np.nan + status = lb.status + + # As in `_direct`, we'll need a temporary new axis for points + # at which to evaluate the function. Append axis at the end for + # consistency with other elementwise algorithms. + a2 = a[..., np.newaxis] + step2 = step[..., np.newaxis] + args2 = [arg[..., np.newaxis] for arg in args] + + # Find the location of a term that is less than the tolerance (if possible) + log2maxterms = np.floor(np.log2(maxterms)) if maxterms else 0 + n_steps = np.concatenate([2**np.arange(0, log2maxterms), [maxterms]], dtype=dtype) + nfev = len(n_steps) + ks = a2 + n_steps * step2 + fks = f(ks, *args2) + nt = np.minimum(np.sum(fks > tol[:, np.newaxis], axis=-1), n_steps.shape[-1]-1) + n_steps = n_steps[nt] + + # Directly evaluate the sum up to this term + k = a + n_steps * step + left, left_error, left_nfev = _direct(f, a, k, step, args, + constants, inclusive=False) + i_skip |= np.isposinf(left) # if sum is not finite, no sense in continuing + status[np.isposinf(left)] = -3 + k[i_skip] = np.nan + + # Use integration to estimate the remaining sum + # Possible optimization for future work: if there were no terms less than + # the tolerance, there is no need to compute the integral to better accuracy. + # Something like: + # atol = np.maximum(atol, np.minimum(fk/2 - fb/2)) + # rtol = np.maximum(rtol, np.minimum((fk/2 - fb/2)/left)) + # where `fk`/`fb` are currently calculated below. + right = _tanhsinh(f, k, b, args=args, atol=atol, rtol=rtol, log=log) + + # Calculate the full estimate and error from the pieces + fk = fks[np.arange(len(fks)), nt] + fb = f(b, *args) + nfev += 1 + if log: + log_step = np.log(step) + S_terms = (left, right.integral - log_step, fk - log2, fb - log2) + S = _logsumexp(S_terms, axis=0) + E_terms = (left_error, right.error - log_step, fk-log2, fb-log2+np.pi*1j) + E = _logsumexp(E_terms, axis=0).real + else: + S = left + right.integral/step + fk/2 + fb/2 + E = left_error + right.error/step + fk/2 - fb/2 + status[~i_skip] = right.status[~i_skip] + return S, E, status, left_nfev + right.nfev + nfev + lb.nfev diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/dop.py b/parrot/lib/python3.10/site-packages/scipy/integrate/dop.py new file mode 100644 index 0000000000000000000000000000000000000000..bf67a9a35b7d2959c2617aadc5638b577a45b9b5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/dop.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__: list[str] = [] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="dop", + private_modules=["_dop"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/lsoda.py b/parrot/lib/python3.10/site-packages/scipy/integrate/lsoda.py new file mode 100644 index 0000000000000000000000000000000000000000..1bc1f1da3c4f0aefad9da73b6405b957ce9335b4 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/lsoda.py @@ -0,0 +1,15 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = ['lsoda'] # noqa: F822 + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="lsoda", + private_modules=["_lsoda"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/quadpack.py b/parrot/lib/python3.10/site-packages/scipy/integrate/quadpack.py new file mode 100644 index 0000000000000000000000000000000000000000..144584988095c8855da8c34253c045f1a3940572 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/quadpack.py @@ -0,0 +1,23 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.integrate` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + "quad", + "dblquad", + "tplquad", + "nquad", + "IntegrationWarning", +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="integrate", module="quadpack", + private_modules=["_quadpack_py"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..bf88a75bd1fa358d9c7ba2849e5bef224c32bf5b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f7061bed02768ef3f975d89b58fa260524608c42 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test__quad_vec.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..43fac5fff43e5561b80fa0c33e2cdb4a676edfcf Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_odeint_jac.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..68a56f86531b77224da5e1fabac4ed8e7950be4b Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/__pycache__/test_quadrature.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py new file mode 100644 index 0000000000000000000000000000000000000000..c88650ca1010b3543f4577dbb6d24cdbba36f18e --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test__quad_vec.py @@ -0,0 +1,215 @@ +import pytest + +import numpy as np +from numpy.testing import assert_allclose + +from scipy.integrate import quad_vec + +from multiprocessing.dummy import Pool + + +quadrature_params = pytest.mark.parametrize( + 'quadrature', [None, "gk15", "gk21", "trapezoid"]) + + +@quadrature_params +def test_quad_vec_simple(quadrature): + n = np.arange(10) + def f(x): + return x ** n + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(epsabs=epsabs, quadrature=quadrature) + + exact = 2**(n+1)/(n + 1) + + res, err = quad_vec(f, 0, 2, norm='max', **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err = quad_vec(f, 0, 2, norm='2', **kwargs) + assert np.linalg.norm(res - exact) < epsabs + + res, err = quad_vec(f, 0, 2, norm='max', points=(0.5, 1.0), **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + res, err, *rest = quad_vec(f, 0, 2, norm='max', + epsrel=1e-8, + full_output=True, + limit=10000, + **kwargs) + assert_allclose(res, exact, rtol=0, atol=epsabs) + + +@quadrature_params +def test_quad_vec_simple_inf(quadrature): + def f(x): + return 1 / (1 + np.float64(x) ** 2) + + for epsabs in [0.1, 1e-3, 1e-6]: + if quadrature == 'trapezoid' and epsabs < 1e-4: + # slow: skip + continue + + kwargs = dict(norm='max', epsabs=epsabs, quadrature=quadrature) + + res, err = quad_vec(f, 0, np.inf, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, -np.inf, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, 0, **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, 0, **kwargs) + assert_allclose(res, -np.pi/2, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, np.inf, **kwargs) + assert_allclose(res, np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, -np.inf, **kwargs) + assert_allclose(res, -np.pi, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, np.inf, np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, -np.inf, -np.inf, **kwargs) + assert_allclose(res, 0, rtol=0, atol=max(epsabs, err)) + + res, err = quad_vec(f, 0, np.inf, points=(1.0, 2.0), **kwargs) + assert_allclose(res, np.pi/2, rtol=0, atol=max(epsabs, err)) + + def f(x): + return np.sin(x + 2) / (1 + x ** 2) + exact = np.pi / np.e * np.sin(2) + epsabs = 1e-5 + + res, err, info = quad_vec(f, -np.inf, np.inf, limit=1000, norm='max', epsabs=epsabs, + quadrature=quadrature, full_output=True) + assert info.status == 1 + assert_allclose(res, exact, rtol=0, atol=max(epsabs, 1.5 * err)) + + +def test_quad_vec_args(): + def f(x, a): + return x * (x + a) * np.arange(3) + a = 2 + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=(a,)) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +def _lorenzian(x): + return 1 / (1 + x**2) + + +@pytest.mark.fail_slow(5) +def test_quad_vec_pool(): + f = _lorenzian + res, err = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=4) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + with Pool(10) as pool: + def f(x): + return 1 / (1 + x ** 2) + res, _ = quad_vec(f, -np.inf, np.inf, norm='max', epsabs=1e-4, workers=pool.map) + assert_allclose(res, np.pi, rtol=0, atol=1e-4) + + +def _func_with_args(x, a): + return x * (x + a) * np.arange(3) + + +@pytest.mark.fail_slow(5) +@pytest.mark.parametrize('extra_args', [2, (2,)]) +@pytest.mark.parametrize('workers', [1, 10]) +def test_quad_vec_pool_args(extra_args, workers): + f = _func_with_args + exact = np.array([0, 4/3, 8/3]) + + res, err = quad_vec(f, 0, 1, args=extra_args, workers=workers) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + with Pool(workers) as pool: + res, err = quad_vec(f, 0, 1, args=extra_args, workers=pool.map) + assert_allclose(res, exact, rtol=0, atol=1e-4) + + +@quadrature_params +def test_num_eval(quadrature): + def f(x): + count[0] += 1 + return x**5 + + count = [0] + res = quad_vec(f, 0, 1, norm='max', full_output=True, quadrature=quadrature) + assert res[2].neval == count[0] + + +def test_info(): + def f(x): + return np.ones((3, 2, 1)) + + res, err, info = quad_vec(f, 0, 1, norm='max', full_output=True) + + assert info.success is True + assert info.status == 0 + assert info.message == 'Target precision reached.' + assert info.neval > 0 + assert info.intervals.shape[1] == 2 + assert info.integrals.shape == (info.intervals.shape[0], 3, 2, 1) + assert info.errors.shape == (info.intervals.shape[0],) + + +def test_nan_inf(): + def f_nan(x): + return np.nan + + def f_inf(x): + return np.inf if x < 0.1 else 1/x + + res, err, info = quad_vec(f_nan, 0, 1, full_output=True) + assert info.status == 3 + + res, err, info = quad_vec(f_inf, 0, 1, full_output=True) + assert info.status == 3 + + +@pytest.mark.parametrize('a,b', [(0, 1), (0, np.inf), (np.inf, 0), + (-np.inf, np.inf), (np.inf, -np.inf)]) +def test_points(a, b): + # Check that initial interval splitting is done according to + # `points`, by checking that consecutive sets of 15 point (for + # gk15) function evaluations lie between `points` + + points = (0, 0.25, 0.5, 0.75, 1.0) + points += tuple(-x for x in points) + + quadrature_points = 15 + interval_sets = [] + count = 0 + + def f(x): + nonlocal count + + if count % quadrature_points == 0: + interval_sets.append(set()) + + count += 1 + interval_sets[-1].add(float(x)) + return 0.0 + + quad_vec(f, a, b, points=points, quadrature='gk15', limit=0) + + # Check that all point sets lie in a single `points` interval + for p in interval_sets: + j = np.searchsorted(sorted(points), tuple(p)) + assert np.all(j == j[0]) + +def test_trapz_deprecation(): + with pytest.deprecated_call(match="`quadrature='trapz'`"): + quad_vec(lambda x: x, 0, 1, quadrature="trapz") diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py new file mode 100644 index 0000000000000000000000000000000000000000..f34d45d94fd754bc8d2c90609ac308f6d3e4706b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_banded_ode_solvers.py @@ -0,0 +1,218 @@ +import itertools +import numpy as np +from numpy.testing import assert_allclose +from scipy.integrate import ode + + +def _band_count(a): + """Returns ml and mu, the lower and upper band sizes of a.""" + nrows, ncols = a.shape + ml = 0 + for k in range(-nrows+1, 0): + if np.diag(a, k).any(): + ml = -k + break + mu = 0 + for k in range(nrows-1, 0, -1): + if np.diag(a, k).any(): + mu = k + break + return ml, mu + + +def _linear_func(t, y, a): + """Linear system dy/dt = a * y""" + return a.dot(y) + + +def _linear_jac(t, y, a): + """Jacobian of a * y is a.""" + return a + + +def _linear_banded_jac(t, y, a): + """Banded Jacobian.""" + ml, mu = _band_count(a) + bjac = [np.r_[[0] * k, np.diag(a, k)] for k in range(mu, 0, -1)] + bjac.append(np.diag(a)) + for k in range(-1, -ml-1, -1): + bjac.append(np.r_[np.diag(a, k), [0] * (-k)]) + return bjac + + +def _solve_linear_sys(a, y0, tend=1, dt=0.1, + solver=None, method='bdf', use_jac=True, + with_jacobian=False, banded=False): + """Use scipy.integrate.ode to solve a linear system of ODEs. + + a : square ndarray + Matrix of the linear system to be solved. + y0 : ndarray + Initial condition + tend : float + Stop time. + dt : float + Step size of the output. + solver : str + If not None, this must be "vode", "lsoda" or "zvode". + method : str + Either "bdf" or "adams". + use_jac : bool + Determines if the jacobian function is passed to ode(). + with_jacobian : bool + Passed to ode.set_integrator(). + banded : bool + Determines whether a banded or full jacobian is used. + If `banded` is True, `lband` and `uband` are determined by the + values in `a`. + """ + if banded: + lband, uband = _band_count(a) + else: + lband = None + uband = None + + if use_jac: + if banded: + r = ode(_linear_func, _linear_banded_jac) + else: + r = ode(_linear_func, _linear_jac) + else: + r = ode(_linear_func) + + if solver is None: + if np.iscomplexobj(a): + solver = "zvode" + else: + solver = "vode" + + r.set_integrator(solver, + with_jacobian=with_jacobian, + method=method, + lband=lband, uband=uband, + rtol=1e-9, atol=1e-10, + ) + t0 = 0 + r.set_initial_value(y0, t0) + r.set_f_params(a) + r.set_jac_params(a) + + t = [t0] + y = [y0] + while r.successful() and r.t < tend: + r.integrate(r.t + dt) + t.append(r.t) + y.append(r.y) + + t = np.array(t) + y = np.array(y) + return t, y + + +def _analytical_solution(a, y0, t): + """ + Analytical solution to the linear differential equations dy/dt = a*y. + + The solution is only valid if `a` is diagonalizable. + + Returns a 2-D array with shape (len(t), len(y0)). + """ + lam, v = np.linalg.eig(a) + c = np.linalg.solve(v, y0) + e = c * np.exp(lam * t.reshape(-1, 1)) + sol = e.dot(v.T) + return sol + + +def test_banded_ode_solvers(): + # Test the "lsoda", "vode" and "zvode" solvers of the `ode` class + # with a system that has a banded Jacobian matrix. + + t_exact = np.linspace(0, 1.0, 5) + + # --- Real arrays for testing the "lsoda" and "vode" solvers --- + + # lband = 2, uband = 1: + a_real = np.array([[-0.6, 0.1, 0.0, 0.0, 0.0], + [0.2, -0.5, 0.9, 0.0, 0.0], + [0.1, 0.1, -0.4, 0.1, 0.0], + [0.0, 0.3, -0.1, -0.9, -0.3], + [0.0, 0.0, 0.1, 0.1, -0.7]]) + + # lband = 0, uband = 1: + a_real_upper = np.triu(a_real) + + # lband = 2, uband = 0: + a_real_lower = np.tril(a_real) + + # lband = 0, uband = 0: + a_real_diag = np.triu(a_real_lower) + + real_matrices = [a_real, a_real_upper, a_real_lower, a_real_diag] + real_solutions = [] + + for a in real_matrices: + y0 = np.arange(1, a.shape[0] + 1) + y_exact = _analytical_solution(a, y0, t_exact) + real_solutions.append((y0, t_exact, y_exact)) + + def check_real(idx, solver, meth, use_jac, with_jac, banded): + a = real_matrices[idx] + y0, t_exact, y_exact = real_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(real_matrices)): + p = [['vode', 'lsoda'], # solver + ['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for solver, meth, use_jac, with_jac, banded in itertools.product(*p): + check_real(idx, solver, meth, use_jac, with_jac, banded) + + # --- Complex arrays for testing the "zvode" solver --- + + # complex, lband = 2, uband = 1: + a_complex = a_real - 0.5j * a_real + + # complex, lband = 0, uband = 0: + a_complex_diag = np.diag(np.diag(a_complex)) + + complex_matrices = [a_complex, a_complex_diag] + complex_solutions = [] + + for a in complex_matrices: + y0 = np.arange(1, a.shape[0] + 1) + 1j + y_exact = _analytical_solution(a, y0, t_exact) + complex_solutions.append((y0, t_exact, y_exact)) + + def check_complex(idx, solver, meth, use_jac, with_jac, banded): + a = complex_matrices[idx] + y0, t_exact, y_exact = complex_solutions[idx] + t, y = _solve_linear_sys(a, y0, + tend=t_exact[-1], + dt=t_exact[1] - t_exact[0], + solver=solver, + method=meth, + use_jac=use_jac, + with_jacobian=with_jac, + banded=banded) + assert_allclose(t, t_exact) + assert_allclose(y, y_exact) + + for idx in range(len(complex_matrices)): + p = [['bdf', 'adams'], # method + [False, True], # use_jac + [False, True], # with_jacobian + [False, True]] # banded + for meth, use_jac, with_jac, banded in itertools.product(*p): + check_complex(idx, "zvode", meth, use_jac, with_jac, banded) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py new file mode 100644 index 0000000000000000000000000000000000000000..7d28ccc93f4444f3f2e0b71da01c573d4f903dbc --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_odeint_jac.py @@ -0,0 +1,74 @@ +import numpy as np +from numpy.testing import assert_equal, assert_allclose +from scipy.integrate import odeint +import scipy.integrate._test_odeint_banded as banded5x5 + + +def rhs(y, t): + dydt = np.zeros_like(y) + banded5x5.banded5x5(t, y, dydt) + return dydt + + +def jac(y, t): + n = len(y) + jac = np.zeros((n, n), order='F') + banded5x5.banded5x5_jac(t, y, 1, 1, jac) + return jac + + +def bjac(y, t): + n = len(y) + bjac = np.zeros((4, n), order='F') + banded5x5.banded5x5_bjac(t, y, 1, 1, bjac) + return bjac + + +JACTYPE_FULL = 1 +JACTYPE_BANDED = 4 + + +def check_odeint(jactype): + if jactype == JACTYPE_FULL: + ml = None + mu = None + jacobian = jac + elif jactype == JACTYPE_BANDED: + ml = 2 + mu = 1 + jacobian = bjac + else: + raise ValueError(f"invalid jactype: {jactype!r}") + + y0 = np.arange(1.0, 6.0) + # These tolerances must match the tolerances used in banded5x5.f. + rtol = 1e-11 + atol = 1e-13 + dt = 0.125 + nsteps = 64 + t = dt * np.arange(nsteps+1) + + sol, info = odeint(rhs, y0, t, + Dfun=jacobian, ml=ml, mu=mu, + atol=atol, rtol=rtol, full_output=True) + yfinal = sol[-1] + odeint_nst = info['nst'][-1] + odeint_nfe = info['nfe'][-1] + odeint_nje = info['nje'][-1] + + y1 = y0.copy() + # Pure Fortran solution. y1 is modified in-place. + nst, nfe, nje = banded5x5.banded5x5_solve(y1, nsteps, dt, jactype) + + # It is likely that yfinal and y1 are *exactly* the same, but + # we'll be cautious and use assert_allclose. + assert_allclose(yfinal, y1, rtol=1e-12) + assert_equal((odeint_nst, odeint_nfe, odeint_nje), (nst, nfe, nje)) + + +def test_odeint_full_jac(): + check_odeint(JACTYPE_FULL) + + +def test_odeint_banded_jac(): + check_odeint(JACTYPE_BANDED) diff --git a/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py new file mode 100644 index 0000000000000000000000000000000000000000..084385cf9b4a0ddbb145aa99e9b22c381216ca33 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/integrate/tests/test_tanhsinh.py @@ -0,0 +1,947 @@ +# mypy: disable-error-code="attr-defined" +import os +import pytest + +import numpy as np +from numpy.testing import assert_allclose, assert_equal + +import scipy._lib._elementwise_iterative_method as eim +from scipy import special, stats +from scipy.integrate import quad_vec +from scipy.integrate._tanhsinh import _tanhsinh, _pair_cache, _nsum +from scipy.stats._discrete_distns import _gen_harmonic_gt1 + +class TestTanhSinh: + + # Test problems from [1] Section 6 + def f1(self, t): + return t * np.log(1 + t) + + f1.ref = 0.25 + f1.b = 1 + + def f2(self, t): + return t ** 2 * np.arctan(t) + + f2.ref = (np.pi - 2 + 2 * np.log(2)) / 12 + f2.b = 1 + + def f3(self, t): + return np.exp(t) * np.cos(t) + + f3.ref = (np.exp(np.pi / 2) - 1) / 2 + f3.b = np.pi / 2 + + def f4(self, t): + a = np.sqrt(2 + t ** 2) + return np.arctan(a) / ((1 + t ** 2) * a) + + f4.ref = 5 * np.pi ** 2 / 96 + f4.b = 1 + + def f5(self, t): + return np.sqrt(t) * np.log(t) + + f5.ref = -4 / 9 + f5.b = 1 + + def f6(self, t): + return np.sqrt(1 - t ** 2) + + f6.ref = np.pi / 4 + f6.b = 1 + + def f7(self, t): + return np.sqrt(t) / np.sqrt(1 - t ** 2) + + f7.ref = 2 * np.sqrt(np.pi) * special.gamma(3 / 4) / special.gamma(1 / 4) + f7.b = 1 + + def f8(self, t): + return np.log(t) ** 2 + + f8.ref = 2 + f8.b = 1 + + def f9(self, t): + return np.log(np.cos(t)) + + f9.ref = -np.pi * np.log(2) / 2 + f9.b = np.pi / 2 + + def f10(self, t): + return np.sqrt(np.tan(t)) + + f10.ref = np.pi * np.sqrt(2) / 2 + f10.b = np.pi / 2 + + def f11(self, t): + return 1 / (1 + t ** 2) + + f11.ref = np.pi / 2 + f11.b = np.inf + + def f12(self, t): + return np.exp(-t) / np.sqrt(t) + + f12.ref = np.sqrt(np.pi) + f12.b = np.inf + + def f13(self, t): + return np.exp(-t ** 2 / 2) + + f13.ref = np.sqrt(np.pi / 2) + f13.b = np.inf + + def f14(self, t): + return np.exp(-t) * np.cos(t) + + f14.ref = 0.5 + f14.b = np.inf + + def f15(self, t): + return np.sin(t) / t + + f15.ref = np.pi / 2 + f15.b = np.inf + + def error(self, res, ref, log=False): + err = abs(res - ref) + + if not log: + return err + + with np.errstate(divide='ignore'): + return np.log10(err) + + def test_input_validation(self): + f = self.f1 + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(42, 0, f.b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 1+1j, f.b) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol='ekki') + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=pytest) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol=np.inf) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, rtol=np.inf, log=True) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, atol=np.inf, log=True) + + message = '...must be integers.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxlevel=object()) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxfun=1+1j) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, minlevel="migratory coconut") + + message = '...must be non-negative.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxlevel=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, maxfun=-1) + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, minlevel=-1) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, preserve_shape=2) + + message = '...must be callable.' + with pytest.raises(ValueError, match=message): + _tanhsinh(f, 0, f.b, callback='elderberry') + + @pytest.mark.parametrize("limits, ref", [ + [(0, np.inf), 0.5], # b infinite + [(-np.inf, 0), 0.5], # a infinite + [(-np.inf, np.inf), 1], # a and b infinite + [(np.inf, -np.inf), -1], # flipped limits + [(1, -1), stats.norm.cdf(-1) - stats.norm.cdf(1)], # flipped limits + ]) + def test_integral_transforms(self, limits, ref): + # Check that the integral transforms are behaving for both normal and + # log integration + dist = stats.norm() + + res = _tanhsinh(dist.pdf, *limits) + assert_allclose(res.integral, ref) + + logres = _tanhsinh(dist.logpdf, *limits, log=True) + assert_allclose(np.exp(logres.integral), ref) + # Transformation should not make the result complex unnecessarily + assert (np.issubdtype(logres.integral.dtype, np.floating) if ref > 0 + else np.issubdtype(logres.integral.dtype, np.complexfloating)) + + assert_allclose(np.exp(logres.error), res.error, atol=1e-16) + + # 15 skipped intentionally; it's very difficult numerically + @pytest.mark.parametrize('f_number', range(1, 15)) + def test_basic(self, f_number): + f = getattr(self, f"f{f_number}") + rtol = 2e-8 + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert_allclose(res.integral, f.ref, rtol=rtol) + if f_number not in {14}: # mildly underestimates error here + true_error = abs(self.error(res.integral, f.ref)/res.integral) + assert true_error < res.error + + if f_number in {7, 10, 12}: # succeeds, but doesn't know it + return + + assert res.success + assert res.status == 0 + + @pytest.mark.parametrize('ref', (0.5, [0.4, 0.6])) + @pytest.mark.parametrize('case', stats._distr_params.distcont) + def test_accuracy(self, ref, case): + distname, params = case + if distname in {'dgamma', 'dweibull', 'laplace', 'kstwo'}: + # should split up interval at first-derivative discontinuity + pytest.skip('tanh-sinh is not great for non-smooth integrands') + if (distname in {'studentized_range', 'levy_stable'} + and not int(os.getenv('SCIPY_XSLOW', 0))): + pytest.skip('This case passes, but it is too slow.') + dist = getattr(stats, distname)(*params) + x = dist.interval(ref) + res = _tanhsinh(dist.pdf, *x) + assert_allclose(res.integral, ref) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = rng.random(shape) + b = rng.random(shape) + p = rng.random(shape) + n = np.prod(shape) + + def f(x, p): + f.ncall += 1 + f.feval += 1 if (x.size == n or x.ndim <=1) else x.shape[-1] + return x**p + f.ncall = 0 + f.feval = 0 + + @np.vectorize + def _tanhsinh_single(a, b, p): + return _tanhsinh(lambda x: x**p, a, b) + + res = _tanhsinh(f, a, b, args=(p,)) + refs = _tanhsinh_single(a, b, p).ravel() + + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert np.issubdtype(res.maxlevel.dtype, np.integer) + assert_equal(np.max(res.nfev), f.feval) + # maxlevel = 2 -> 3 function calls (2 initialization, 1 work) + assert np.max(res.maxlevel) >= 2 + assert_equal(np.max(res.maxlevel), f.ncall) + + def test_flags(self): + # Test cases that should produce different status flags; show that all + # can be produced simultaneously. + def f(xs, js): + f.nit += 1 + funcs = [lambda x: np.exp(-x**2), # converges + lambda x: np.exp(x), # reaches maxiter due to order=2 + lambda x: np.full_like(x, np.nan)[()]] # stops due to NaN + res = [funcs[j](x) for x, j in zip(xs, js.ravel())] + return res + f.nit = 0 + + args = (np.arange(3, dtype=np.int64),) + res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, args=args) + ref_flags = np.array([0, -2, -3]) + assert_equal(res.status, ref_flags) + + def test_flags_preserve_shape(self): + # Same test as above but using `preserve_shape` option to simplify. + def f(x): + return [np.exp(-x[0]**2), # converges + np.exp(x[1]), # reaches maxiter due to order=2 + np.full_like(x[2], np.nan)[()]] # stops due to NaN + + res = _tanhsinh(f, [np.inf]*3, [-np.inf]*3, maxlevel=5, preserve_shape=True) + ref_flags = np.array([0, -2, -3]) + assert_equal(res.status, ref_flags) + + def test_preserve_shape(self): + # Test `preserve_shape` option + def f(x): + return np.asarray([[x, np.sin(10 * x)], + [np.cos(30 * x), x * np.sin(100 * x)]]) + + ref = quad_vec(f, 0, 1) + res = _tanhsinh(f, 0, 1, preserve_shape=True) + assert_allclose(res.integral, ref[0]) + + def test_convergence(self): + # demonstrate that number of accurate digits doubles each iteration + f = self.f1 + last_logerr = 0 + for i in range(4): + res = _tanhsinh(f, 0, f.b, minlevel=0, maxlevel=i) + logerr = self.error(res.integral, f.ref, log=True) + assert (logerr < last_logerr * 2 or logerr < -15.5) + last_logerr = logerr + + def test_options_and_result_attributes(self): + # demonstrate that options are behaving as advertised and status + # messages are as intended + def f(x): + f.calls += 1 + f.feval += np.size(x) + return self.f2(x) + f.ref = self.f2.ref + f.b = self.f2.b + default_rtol = 1e-12 + default_atol = f.ref * default_rtol # effective default absolute tol + + # Test default options + f.feval, f.calls = 0, 0 + ref = _tanhsinh(f, 0, f.b) + assert self.error(ref.integral, f.ref) < ref.error < default_atol + assert ref.nfev == f.feval + ref.calls = f.calls # reference number of function calls + assert ref.success + assert ref.status == 0 + + # Test `maxlevel` equal to required max level + # We should get all the same results + f.feval, f.calls = 0, 0 + maxlevel = ref.maxlevel + res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel) + res.calls = f.calls + assert res == ref + + # Now reduce the maximum level. We won't meet tolerances. + f.feval, f.calls = 0, 0 + maxlevel -= 1 + assert maxlevel >= 2 # can't compare errors otherwise + res = _tanhsinh(f, 0, f.b, maxlevel=maxlevel) + assert self.error(res.integral, f.ref) < res.error > default_atol + assert res.nfev == f.feval < ref.nfev + assert f.calls == ref.calls - 1 + assert not res.success + assert res.status == eim._ECONVERR + + # `maxfun` is currently not enforced + + # # Test `maxfun` equal to required number of function evaluations + # # We should get all the same results + # f.feval, f.calls = 0, 0 + # maxfun = ref.nfev + # res = _tanhsinh(f, 0, f.b, maxfun = maxfun) + # assert res == ref + # + # # Now reduce `maxfun`. We won't meet tolerances. + # f.feval, f.calls = 0, 0 + # maxfun -= 1 + # res = _tanhsinh(f, 0, f.b, maxfun=maxfun) + # assert self.error(res.integral, f.ref) < res.error > default_atol + # assert res.nfev == f.feval < ref.nfev + # assert f.calls == ref.calls - 1 + # assert not res.success + # assert res.status == 2 + + # Take this result to be the new reference + ref = res + ref.calls = f.calls + + # Test `atol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + atol = np.nextafter(ref.error, np.inf) + res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + atol = np.nextafter(ref.error, -np.inf) + res = _tanhsinh(f, 0, f.b, rtol=0, atol=atol) + assert self.error(res.integral, f.ref) < res.error < atol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + # Test `rtol` + f.feval, f.calls = 0, 0 + # With this tolerance, we should get the exact same result as ref + rtol = np.nextafter(ref.error/ref.integral, np.inf) + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert res.integral == ref.integral + assert res.error == ref.error + assert res.nfev == f.feval == ref.nfev + assert f.calls == ref.calls + # Except the result is considered to be successful + assert res.success + assert res.status == 0 + + f.feval, f.calls = 0, 0 + # With a tighter tolerance, we should get a more accurate result + rtol = np.nextafter(ref.error/ref.integral, -np.inf) + res = _tanhsinh(f, 0, f.b, rtol=rtol) + assert self.error(res.integral, f.ref)/f.ref < res.error/res.integral < rtol + assert res.nfev == f.feval > ref.nfev + assert f.calls > ref.calls + assert res.success + assert res.status == 0 + + @pytest.mark.parametrize('rtol', [1e-4, 1e-14]) + def test_log(self, rtol): + # Test equivalence of log-integration and regular integration + dist = stats.norm() + + test_tols = dict(atol=1e-18, rtol=1e-15) + + # Positive integrand (real log-integrand) + res = _tanhsinh(dist.logpdf, -1, 2, log=True, rtol=np.log(rtol)) + ref = _tanhsinh(dist.pdf, -1, 2, rtol=rtol) + assert_allclose(np.exp(res.integral), ref.integral, **test_tols) + assert_allclose(np.exp(res.error), ref.error, **test_tols) + assert res.nfev == ref.nfev + + # Real integrand (complex log-integrand) + def f(x): + return -dist.logpdf(x)*dist.pdf(x) + + def logf(x): + return np.log(dist.logpdf(x) + 0j) + dist.logpdf(x) + np.pi * 1j + + res = _tanhsinh(logf, -np.inf, np.inf, log=True) + ref = _tanhsinh(f, -np.inf, np.inf) + # In gh-19173, we saw `invalid` warnings on one CI platform. + # Silencing `all` because I can't reproduce locally and don't want + # to risk the need to run CI again. + with np.errstate(all='ignore'): + assert_allclose(np.exp(res.integral), ref.integral, **test_tols) + assert_allclose(np.exp(res.error), ref.error, **test_tols) + assert res.nfev == ref.nfev + + def test_complex(self): + # Test integration of complex integrand + # Finite limits + def f(x): + return np.exp(1j * x) + + res = _tanhsinh(f, 0, np.pi/4) + ref = np.sqrt(2)/2 + (1-np.sqrt(2)/2)*1j + assert_allclose(res.integral, ref) + + # Infinite limits + dist1 = stats.norm(scale=1) + dist2 = stats.norm(scale=2) + def f(x): + return dist1.pdf(x) + 1j*dist2.pdf(x) + + res = _tanhsinh(f, np.inf, -np.inf) + assert_allclose(res.integral, -(1+1j)) + + @pytest.mark.parametrize("maxlevel", range(4)) + def test_minlevel(self, maxlevel): + # Verify that minlevel does not change the values at which the + # integrand is evaluated or the integral/error estimates, only the + # number of function calls + def f(x): + f.calls += 1 + f.feval += np.size(x) + f.x = np.concatenate((f.x, x.ravel())) + return self.f2(x) + f.feval, f.calls, f.x = 0, 0, np.array([]) + + ref = _tanhsinh(f, 0, self.f2.b, minlevel=0, maxlevel=maxlevel) + ref_x = np.sort(f.x) + + for minlevel in range(0, maxlevel + 1): + f.feval, f.calls, f.x = 0, 0, np.array([]) + options = dict(minlevel=minlevel, maxlevel=maxlevel) + res = _tanhsinh(f, 0, self.f2.b, **options) + # Should be very close; all that has changed is the order of values + assert_allclose(res.integral, ref.integral, rtol=4e-16) + # Difference in absolute errors << magnitude of integral + assert_allclose(res.error, ref.error, atol=4e-16 * ref.integral) + assert res.nfev == f.feval == len(f.x) + assert f.calls == maxlevel - minlevel + 1 + 1 # 1 validation call + assert res.status == ref.status + assert_equal(ref_x, np.sort(f.x)) + + def test_improper_integrals(self): + # Test handling of infinite limits of integration (mixed with finite limits) + def f(x): + x[np.isinf(x)] = np.nan + return np.exp(-x**2) + a = [-np.inf, 0, -np.inf, np.inf, -20, -np.inf, -20] + b = [np.inf, np.inf, 0, -np.inf, 20, 20, np.inf] + ref = np.sqrt(np.pi) + res = _tanhsinh(f, a, b) + assert_allclose(res.integral, [ref, ref/2, ref/2, -ref, ref, ref, ref]) + + @pytest.mark.parametrize("limits", ((0, 3), ([-np.inf, 0], [3, 3]))) + @pytest.mark.parametrize("dtype", (np.float32, np.float64)) + def test_dtype(self, limits, dtype): + # Test that dtypes are preserved + a, b = np.asarray(limits, dtype=dtype)[()] + + def f(x): + assert x.dtype == dtype + return np.exp(x) + + rtol = 1e-12 if dtype == np.float64 else 1e-5 + res = _tanhsinh(f, a, b, rtol=rtol) + assert res.integral.dtype == dtype + assert res.error.dtype == dtype + assert np.all(res.success) + assert_allclose(res.integral, np.exp(b)-np.exp(a), rtol=rtol) + + def test_maxiter_callback(self): + # Test behavior of `maxiter` parameter and `callback` interface + a, b = -np.inf, np.inf + def f(x): + return np.exp(-x*x) + + minlevel, maxlevel = 0, 2 + maxiter = maxlevel - minlevel + 1 + kwargs = dict(minlevel=minlevel, maxlevel=maxlevel, rtol=1e-15) + res = _tanhsinh(f, a, b, **kwargs) + assert not res.success + assert res.maxlevel == maxlevel + + def callback(res): + callback.iter += 1 + callback.res = res + assert hasattr(res, 'integral') + assert res.status == 1 + if callback.iter == maxiter: + raise StopIteration + callback.iter = -1 # callback called once before first iteration + callback.res = None + + del kwargs['maxlevel'] + res2 = _tanhsinh(f, a, b, **kwargs, callback=callback) + # terminating with callback is identical to terminating due to maxiter + # (except for `status`) + for key in res.keys(): + if key == 'status': + assert callback.res[key] == 1 + assert res[key] == -2 + assert res2[key] == -4 + else: + assert res2[key] == callback.res[key] == res[key] + + def test_jumpstart(self): + # The intermediate results at each level i should be the same as the + # final results when jumpstarting at level i; i.e. minlevel=maxlevel=i + a, b = -np.inf, np.inf + def f(x): + return np.exp(-x*x) + + def callback(res): + callback.integrals.append(res.integral) + callback.errors.append(res.error) + callback.integrals = [] + callback.errors = [] + + maxlevel = 4 + _tanhsinh(f, a, b, minlevel=0, maxlevel=maxlevel, callback=callback) + + integrals = [] + errors = [] + for i in range(maxlevel + 1): + res = _tanhsinh(f, a, b, minlevel=i, maxlevel=i) + integrals.append(res.integral) + errors.append(res.error) + + assert_allclose(callback.integrals[1:], integrals, rtol=1e-15) + assert_allclose(callback.errors[1:], errors, rtol=1e-15, atol=1e-16) + + def test_special_cases(self): + # Test edge cases and other special cases + + # Test that integers are not passed to `f` + # (otherwise this would overflow) + def f(x): + assert np.issubdtype(x.dtype, np.floating) + return x ** 99 + + res = _tanhsinh(f, 0, 1) + assert res.success + assert_allclose(res.integral, 1/100) + + # Test levels 0 and 1; error is NaN + res = _tanhsinh(f, 0, 1, maxlevel=0) + assert res.integral > 0 + assert_equal(res.error, np.nan) + res = _tanhsinh(f, 0, 1, maxlevel=1) + assert res.integral > 0 + assert_equal(res.error, np.nan) + + # Tes equal left and right integration limits + res = _tanhsinh(f, 1, 1) + assert res.success + assert res.maxlevel == -1 + assert_allclose(res.integral, 0) + + # Test scalar `args` (not in tuple) + def f(x, c): + return x**c + + res = _tanhsinh(f, 0, 1, args=99) + assert_allclose(res.integral, 1/100) + + # Test NaNs + a = [np.nan, 0, 0, 0] + b = [1, np.nan, 1, 1] + c = [1, 1, np.nan, 1] + res = _tanhsinh(f, a, b, args=(c,)) + assert_allclose(res.integral, [np.nan, np.nan, np.nan, 0.5]) + assert_allclose(res.error[:3], np.nan) + assert_equal(res.status, [-3, -3, -3, 0]) + assert_equal(res.success, [False, False, False, True]) + assert_equal(res.nfev[:3], 1) + + # Test complex integral followed by real integral + # Previously, h0 was of the result dtype. If the `dtype` were complex, + # this could lead to complex cached abscissae/weights. If these get + # cast to real dtype for a subsequent real integral, we would get a + # ComplexWarning. Check that this is avoided. + _pair_cache.xjc = np.empty(0) + _pair_cache.wj = np.empty(0) + _pair_cache.indices = [0] + _pair_cache.h0 = None + res = _tanhsinh(lambda x: x*1j, 0, 1) + assert_allclose(res.integral, 0.5*1j) + res = _tanhsinh(lambda x: x, 0, 1) + assert_allclose(res.integral, 0.5) + + # Test zero-size + shape = (0, 3) + res = _tanhsinh(lambda x: x, 0, np.zeros(shape)) + attrs = ['integral', 'error', 'success', 'status', 'nfev', 'maxlevel'] + for attr in attrs: + assert_equal(res[attr].shape, shape) + + +class TestNSum: + rng = np.random.default_rng(5895448232066142650) + p = rng.uniform(1, 10, size=10) + + def f1(self, k): + # Integers are never passed to `f1`; if they were, we'd get + # integer to negative integer power error + return k**(-2) + + f1.ref = np.pi**2/6 + f1.a = 1 + f1.b = np.inf + f1.args = tuple() + + def f2(self, k, p): + return 1 / k**p + + f2.ref = special.zeta(p, 1) + f2.a = 1 + f2.b = np.inf + f2.args = (p,) + + def f3(self, k, p): + return 1 / k**p + + f3.a = 1 + f3.b = rng.integers(5, 15, size=(3, 1)) + f3.ref = _gen_harmonic_gt1(f3.b, p) + f3.args = (p,) + + def test_input_validation(self): + f = self.f1 + + message = '`f` must be callable.' + with pytest.raises(ValueError, match=message): + _nsum(42, f.a, f.b) + + message = '...must be True or False.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, log=2) + + message = '...must be real numbers.' + with pytest.raises(ValueError, match=message): + _nsum(f, 1+1j, f.b) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, None) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, step=object()) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol='ekki') + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=pytest) + + with np.errstate(all='ignore'): + res = _nsum(f, [np.nan, -np.inf, np.inf], 1) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + res = _nsum(f, 10, [np.nan, 1]) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + res = _nsum(f, 1, 10, step=[np.nan, -np.inf, np.inf, -1, 0]) + assert np.all((res.status == -1) & np.isnan(res.sum) + & np.isnan(res.error) & ~res.success & res.nfev == 1) + + message = '...must be non-negative and finite.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=-1) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol=np.inf) + + message = '...may not be positive infinity.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, rtol=np.inf, log=True) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, atol=np.inf, log=True) + + message = '...must be a non-negative integer.' + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, maxterms=3.5) + with pytest.raises(ValueError, match=message): + _nsum(f, f.a, f.b, maxterms=-2) + + @pytest.mark.parametrize('f_number', range(1, 4)) + def test_basic(self, f_number): + f = getattr(self, f"f{f_number}") + res = _nsum(f, f.a, f.b, args=f.args) + assert_allclose(res.sum, f.ref) + assert_equal(res.status, 0) + assert_equal(res.success, True) + + with np.errstate(divide='ignore'): + logres = _nsum(lambda *args: np.log(f(*args)), + f.a, f.b, log=True, args=f.args) + assert_allclose(np.exp(logres.sum), res.sum) + assert_allclose(np.exp(logres.error), res.error) + assert_equal(logres.status, 0) + assert_equal(logres.success, True) + + @pytest.mark.parametrize('maxterms', [0, 1, 10, 20, 100]) + def test_integral(self, maxterms): + # test precise behavior of integral approximation + f = self.f1 + + def logf(x): + return -2*np.log(x) + + def F(x): + return -1 / x + + a = np.asarray([1, 5])[:, np.newaxis] + b = np.asarray([20, 100, np.inf])[:, np.newaxis, np.newaxis] + step = np.asarray([0.5, 1, 2]).reshape((-1, 1, 1, 1)) + nsteps = np.floor((b - a)/step) + b_original = b + b = a + nsteps*step + + k = a + maxterms*step + # partial sum + direct = f(a + np.arange(maxterms)*step).sum(axis=-1, keepdims=True) + integral = (F(b) - F(k))/step # integral approximation of remainder + low = direct + integral + f(b) # theoretical lower bound + high = direct + integral + f(k) # theoretical upper bound + ref_sum = (low + high)/2 # _nsum uses average of the two + ref_err = (high - low)/2 # error (assuming perfect quadrature) + + # correct reference values where number of terms < maxterms + a, b, step = np.broadcast_arrays(a, b, step) + for i in np.ndindex(a.shape): + ai, bi, stepi = a[i], b[i], step[i] + if (bi - ai)/stepi + 1 <= maxterms: + direct = f(np.arange(ai, bi+stepi, stepi)).sum() + ref_sum[i] = direct + ref_err[i] = direct * np.finfo(direct).eps + + rtol = 1e-12 + res = _nsum(f, a, b_original, step=step, maxterms=maxterms, rtol=rtol) + assert_allclose(res.sum, ref_sum, rtol=10*rtol) + assert_allclose(res.error, ref_err, rtol=100*rtol) + assert_equal(res.status, 0) + assert_equal(res.success, True) + + i = ((b_original - a)/step + 1 <= maxterms) + assert_allclose(res.sum[i], ref_sum[i], rtol=1e-15) + assert_allclose(res.error[i], ref_err[i], rtol=1e-15) + + logres = _nsum(logf, a, b_original, step=step, log=True, + rtol=np.log(rtol), maxterms=maxterms) + assert_allclose(np.exp(logres.sum), res.sum) + assert_allclose(np.exp(logres.error), res.error) + assert_equal(logres.status, 0) + assert_equal(logres.success, True) + + @pytest.mark.parametrize('shape', [tuple(), (12,), (3, 4), (3, 2, 2)]) + def test_vectorization(self, shape): + # Test for correct functionality, output shapes, and dtypes for various + # input shapes. + rng = np.random.default_rng(82456839535679456794) + a = rng.integers(1, 10, size=shape) + # when the sum can be computed directly or `maxterms` is large enough + # to meet `atol`, there are slight differences (for good reason) + # between vectorized call and looping. + b = np.inf + p = rng.random(shape) + 1 + n = np.prod(shape) + + def f(x, p): + f.feval += 1 if (x.size == n or x.ndim <= 1) else x.shape[-1] + return 1 / x ** p + + f.feval = 0 + + @np.vectorize + def _nsum_single(a, b, p, maxterms): + return _nsum(lambda x: 1 / x**p, a, b, maxterms=maxterms) + + res = _nsum(f, a, b, maxterms=1000, args=(p,)) + refs = _nsum_single(a, b, p, maxterms=1000).ravel() + + attrs = ['sum', 'error', 'success', 'status', 'nfev'] + for attr in attrs: + ref_attr = [getattr(ref, attr) for ref in refs] + res_attr = getattr(res, attr) + assert_allclose(res_attr.ravel(), ref_attr, rtol=1e-15) + assert_equal(res_attr.shape, shape) + + assert np.issubdtype(res.success.dtype, np.bool_) + assert np.issubdtype(res.status.dtype, np.integer) + assert np.issubdtype(res.nfev.dtype, np.integer) + assert_equal(np.max(res.nfev), f.feval) + + def test_status(self): + f = self.f2 + + p = [2, 2, 0.9, 1.1] + a = [0, 0, 1, 1] + b = [10, np.inf, np.inf, np.inf] + ref = special.zeta(p, 1) + + with np.errstate(divide='ignore'): # intentionally dividing by zero + res = _nsum(f, a, b, args=(p,)) + + assert_equal(res.success, [False, False, False, True]) + assert_equal(res.status, [-3, -3, -2, 0]) + assert_allclose(res.sum[res.success], ref[res.success]) + + def test_nfev(self): + def f(x): + f.nfev += np.size(x) + return 1 / x**2 + + f.nfev = 0 + res = _nsum(f, 1, 10) + assert_equal(res.nfev, f.nfev) + + f.nfev = 0 + res = _nsum(f, 1, np.inf, atol=1e-6) + assert_equal(res.nfev, f.nfev) + + def test_inclusive(self): + # There was an edge case off-by one bug when `_direct` was called with + # `inclusive=True`. Check that this is resolved. + res = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf, maxterms=500, atol=0.1) + ref = _nsum(lambda k: 1 / k ** 2, [1, 4], np.inf) + assert np.all(res.sum > (ref.sum - res.error)) + assert np.all(res.sum < (ref.sum + res.error)) + + def test_special_case(self): + # test equal lower/upper limit + f = self.f1 + a = b = 2 + res = _nsum(f, a, b) + assert_equal(res.sum, f(a)) + + # Test scalar `args` (not in tuple) + res = _nsum(self.f2, 1, np.inf, args=2) + assert_allclose(res.sum, self.f1.ref) # f1.ref is correct w/ args=2 + + # Test 0 size input + a = np.empty((3, 1, 1)) # arbitrary broadcastable shapes + b = np.empty((0, 1)) # could use Hypothesis + p = np.empty(4) # but it's overkill + shape = np.broadcast_shapes(a.shape, b.shape, p.shape) + res = _nsum(self.f2, a, b, args=(p,)) + assert res.sum.shape == shape + assert res.status.shape == shape + assert res.nfev.shape == shape + + # Test maxterms=0 + def f(x): + with np.errstate(divide='ignore'): + return 1 / x + + res = _nsum(f, 0, 10, maxterms=0) + assert np.isnan(res.sum) + assert np.isnan(res.error) + assert res.status == -2 + + res = _nsum(f, 0, 10, maxterms=1) + assert np.isnan(res.sum) + assert np.isnan(res.error) + assert res.status == -3 + + # Test NaNs + # should skip both direct and integral methods if there are NaNs + a = [np.nan, 1, 1, 1] + b = [np.inf, np.nan, np.inf, np.inf] + p = [2, 2, np.nan, 2] + res = _nsum(self.f2, a, b, args=(p,)) + assert_allclose(res.sum, [np.nan, np.nan, np.nan, self.f1.ref]) + assert_allclose(res.error[:3], np.nan) + assert_equal(res.status, [-1, -1, -3, 0]) + assert_equal(res.success, [False, False, False, True]) + # Ideally res.nfev[2] would be 1, but `tanhsinh` has some function evals + assert_equal(res.nfev[:2], 1) + + @pytest.mark.parametrize('dtype', [np.float32, np.float64]) + def test_dtype(self, dtype): + def f(k): + assert k.dtype == dtype + return 1 / k ** np.asarray(2, dtype=dtype)[()] + + a = np.asarray(1, dtype=dtype) + b = np.asarray([10, np.inf], dtype=dtype) + res = _nsum(f, a, b) + assert res.sum.dtype == dtype + assert res.error.dtype == dtype + + rtol = 1e-12 if dtype == np.float64 else 1e-6 + ref = _gen_harmonic_gt1(b, 2) + assert_allclose(res.sum, ref, rtol=rtol) diff --git a/parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so b/parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so new file mode 100644 index 0000000000000000000000000000000000000000..fe017bf76563d221c96e9cafd026f23c92d89356 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/linalg/_solve_toeplitz.cpython-310-x86_64-linux-gnu.so @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c2f5db6e6c40228f4d37552624bde26b3a2392e63902889f48f5ee20825b9e1 +size 300112 diff --git a/parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7ebc5b3846996dff3e139bdf216e9e53a74a1658 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/_common.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/_common.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0b6500c1c8ddd4baab82aec8f2604c0951806d76 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/misc/__pycache__/_common.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..24cec27818bfb42ef1b9f88c355686d146294a77 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/misc/tests/__pycache__/test_config.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/ndimage/fourier.py b/parrot/lib/python3.10/site-packages/scipy/ndimage/fourier.py new file mode 100644 index 0000000000000000000000000000000000000000..73c49bd52d9a446ce0fe25d9e15b8de68fbd46fb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/ndimage/fourier.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.ndimage` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + + +__all__ = [ # noqa: F822 + 'fourier_gaussian', 'fourier_uniform', + 'fourier_ellipsoid', 'fourier_shift' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package='ndimage', module='fourier', + private_modules=['_fourier'], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/__init__.py b/parrot/lib/python3.10/site-packages/scipy/odr/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..a44a8c133b674aea416efeb4da469241b50a547f --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/odr/__init__.py @@ -0,0 +1,131 @@ +""" +================================================= +Orthogonal distance regression (:mod:`scipy.odr`) +================================================= + +.. currentmodule:: scipy.odr + +Package Content +=============== + +.. autosummary:: + :toctree: generated/ + + Data -- The data to fit. + RealData -- Data with weights as actual std. dev.s and/or covariances. + Model -- Stores information about the function to be fit. + ODR -- Gathers all info & manages the main fitting routine. + Output -- Result from the fit. + odr -- Low-level function for ODR. + + OdrWarning -- Warning about potential problems when running ODR. + OdrError -- Error exception. + OdrStop -- Stop exception. + + polynomial -- Factory function for a general polynomial model. + exponential -- Exponential model + multilinear -- Arbitrary-dimensional linear model + unilinear -- Univariate linear model + quadratic -- Quadratic model + +Usage information +================= + +Introduction +------------ + +Why Orthogonal Distance Regression (ODR)? Sometimes one has +measurement errors in the explanatory (a.k.a., "independent") +variable(s), not just the response (a.k.a., "dependent") variable(s). +Ordinary Least Squares (OLS) fitting procedures treat the data for +explanatory variables as fixed, i.e., not subject to error of any kind. +Furthermore, OLS procedures require that the response variables be an +explicit function of the explanatory variables; sometimes making the +equation explicit is impractical and/or introduces errors. ODR can +handle both of these cases with ease, and can even reduce to the OLS +case if that is sufficient for the problem. + +ODRPACK is a FORTRAN-77 library for performing ODR with possibly +non-linear fitting functions. It uses a modified trust-region +Levenberg-Marquardt-type algorithm [1]_ to estimate the function +parameters. The fitting functions are provided by Python functions +operating on NumPy arrays. The required derivatives may be provided +by Python functions as well, or may be estimated numerically. ODRPACK +can do explicit or implicit ODR fits, or it can do OLS. Input and +output variables may be multidimensional. Weights can be provided to +account for different variances of the observations, and even +covariances between dimensions of the variables. + +The `scipy.odr` package offers an object-oriented interface to +ODRPACK, in addition to the low-level `odr` function. + +Additional background information about ODRPACK can be found in the +`ODRPACK User's Guide +`_, reading +which is recommended. + +Basic usage +----------- + +1. Define the function you want to fit against.:: + + def f(B, x): + '''Linear function y = m*x + b''' + # B is a vector of the parameters. + # x is an array of the current x values. + # x is in the same format as the x passed to Data or RealData. + # + # Return an array in the same format as y passed to Data or RealData. + return B[0]*x + B[1] + +2. Create a Model.:: + + linear = Model(f) + +3. Create a Data or RealData instance.:: + + mydata = Data(x, y, wd=1./power(sx,2), we=1./power(sy,2)) + + or, when the actual covariances are known:: + + mydata = RealData(x, y, sx=sx, sy=sy) + +4. Instantiate ODR with your data, model and initial parameter estimate.:: + + myodr = ODR(mydata, linear, beta0=[1., 2.]) + +5. Run the fit.:: + + myoutput = myodr.run() + +6. Examine output.:: + + myoutput.pprint() + + +References +---------- +.. [1] P. T. Boggs and J. E. Rogers, "Orthogonal Distance Regression," + in "Statistical analysis of measurement error models and + applications: proceedings of the AMS-IMS-SIAM joint summer research + conference held June 10-16, 1989," Contemporary Mathematics, + vol. 112, pg. 186, 1990. + +""" +# version: 0.7 +# author: Robert Kern +# date: 2006-09-21 + +from ._odrpack import * +from ._models import * +from . import _add_newdocs + +# Deprecated namespaces, to be removed in v2.0.0 +from . import models, odrpack + +__all__ = [s for s in dir() + if not (s.startswith('_') or s in ('odr_stop', 'odr_error'))] + +from scipy._lib._testutils import PytestTester +test = PytestTester(__name__) +del PytestTester diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7bae3146f33916c9b63f850eb8793abb176f34a2 Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/odr/__pycache__/_models.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py b/parrot/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py new file mode 100644 index 0000000000000000000000000000000000000000..e09fb6cc8c5f1523dfbeaef466a5b76bd22c01bb --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/odr/_add_newdocs.py @@ -0,0 +1,34 @@ +from numpy.lib import add_newdoc + +add_newdoc('scipy.odr', 'odr', + """ + odr(fcn, beta0, y, x, we=None, wd=None, fjacb=None, fjacd=None, extra_args=None, + ifixx=None, ifixb=None, job=0, iprint=0, errfile=None, rptfile=None, ndigit=0, + taufac=0.0, sstol=-1.0, partol=-1.0, maxit=-1, stpb=None, stpd=None, sclb=None, + scld=None, work=None, iwork=None, full_output=0) + + Low-level function for ODR. + + See Also + -------- + ODR : The ODR class gathers all information and coordinates the running of the + main fitting routine. + Model : The Model class stores information about the function you wish to fit. + Data : The data to fit. + RealData : Data with weights as actual std. dev.s and/or covariances. + + Notes + ----- + This is a function performing the same operation as the `ODR`, + `Model`, and `Data` classes together. The parameters of this + function are explained in the class documentation. + + """) + +add_newdoc('scipy.odr.__odrpack', '_set_exceptions', + """ + _set_exceptions(odr_error, odr_stop) + + Internal function: set exception classes. + + """) diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/_models.py b/parrot/lib/python3.10/site-packages/scipy/odr/_models.py new file mode 100644 index 0000000000000000000000000000000000000000..e0a8d2275dcc4698a9ea61be5871d62069be2599 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/odr/_models.py @@ -0,0 +1,315 @@ +""" Collection of Model instances for use with the odrpack fitting package. +""" +import numpy as np +from scipy.odr._odrpack import Model + +__all__ = ['Model', 'exponential', 'multilinear', 'unilinear', 'quadratic', + 'polynomial'] + + +def _lin_fcn(B, x): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + (x*b).sum(axis=0) + + +def _lin_fjb(B, x): + a = np.ones(x.shape[-1], float) + res = np.concatenate((a, x.ravel())) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _lin_fjd(B, x): + b = B[1:] + b = np.repeat(b, (x.shape[-1],)*b.shape[-1], axis=0) + b.shape = x.shape + return b + + +def _lin_est(data): + # Eh. The answer is analytical, so just return all ones. + # Don't return zeros since that will interfere with + # ODRPACK's auto-scaling procedures. + + if len(data.x.shape) == 2: + m = data.x.shape[0] + else: + m = 1 + + return np.ones((m + 1,), float) + + +def _poly_fcn(B, x, powers): + a, b = B[0], B[1:] + b.shape = (b.shape[0], 1) + + return a + np.sum(b * np.power(x, powers), axis=0) + + +def _poly_fjacb(B, x, powers): + res = np.concatenate((np.ones(x.shape[-1], float), + np.power(x, powers).flat)) + res.shape = (B.shape[-1], x.shape[-1]) + return res + + +def _poly_fjacd(B, x, powers): + b = B[1:] + b.shape = (b.shape[0], 1) + + b = b * powers + + return np.sum(b * np.power(x, powers-1), axis=0) + + +def _exp_fcn(B, x): + return B[0] + np.exp(B[1] * x) + + +def _exp_fjd(B, x): + return B[1] * np.exp(B[1] * x) + + +def _exp_fjb(B, x): + res = np.concatenate((np.ones(x.shape[-1], float), x * np.exp(B[1] * x))) + res.shape = (2, x.shape[-1]) + return res + + +def _exp_est(data): + # Eh. + return np.array([1., 1.]) + + +class _MultilinearModel(Model): + r""" + Arbitrary-dimensional linear model + + This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i` + + Examples + -------- + We can calculate orthogonal distance regression with an arbitrary + dimensional linear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 10.0 + 5.0 * x + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.multilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [10. 5.] + + """ + + def __init__(self): + super().__init__( + _lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est, + meta={'name': 'Arbitrary-dimensional Linear', + 'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]', + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'}) + + +multilinear = _MultilinearModel() + + +def polynomial(order): + """ + Factory function for a general polynomial model. + + Parameters + ---------- + order : int or sequence + If an integer, it becomes the order of the polynomial to fit. If + a sequence of numbers, then these are the explicit powers in the + polynomial. + A constant term (power 0) is always included, so don't include 0. + Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)). + + Returns + ------- + polynomial : Model instance + Model instance. + + Examples + -------- + We can fit an input data using orthogonal distance regression (ODR) with + a polynomial model: + + >>> import numpy as np + >>> import matplotlib.pyplot as plt + >>> from scipy import odr + >>> x = np.linspace(0.0, 5.0) + >>> y = np.sin(x) + >>> poly_model = odr.polynomial(3) # using third order polynomial model + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, poly_model) + >>> output = odr_obj.run() # running ODR fitting + >>> poly = np.poly1d(output.beta[::-1]) + >>> poly_y = poly(x) + >>> plt.plot(x, y, label="input data") + >>> plt.plot(x, poly_y, label="polynomial ODR") + >>> plt.legend() + >>> plt.show() + + """ + + powers = np.asarray(order) + if powers.shape == (): + # Scalar. + powers = np.arange(1, powers + 1) + + powers.shape = (len(powers), 1) + len_beta = len(powers) + 1 + + def _poly_est(data, len_beta=len_beta): + # Eh. Ignore data and return all ones. + return np.ones((len_beta,), float) + + return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb, + estimate=_poly_est, extra_args=(powers,), + meta={'name': 'Sorta-general Polynomial', + 'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1), + 'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' % + (len_beta-1)}) + + +class _ExponentialModel(Model): + r""" + Exponential model + + This model is defined by :math:`y=\beta_0 + e^{\beta_1 x}` + + Examples + -------- + We can calculate orthogonal distance regression with an exponential model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = -10.0 + np.exp(0.5*x) + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.exponential) + >>> output = odr_obj.run() + >>> print(output.beta) + [-10. 0.5] + + """ + + def __init__(self): + super().__init__(_exp_fcn, fjacd=_exp_fjd, fjacb=_exp_fjb, + estimate=_exp_est, + meta={'name': 'Exponential', + 'equ': 'y= B_0 + exp(B_1 * x)', + 'TeXequ': r'$y=\beta_0 + e^{\beta_1 x}$'}) + + +exponential = _ExponentialModel() + + +def _unilin(B, x): + return x*B[0] + B[1] + + +def _unilin_fjd(B, x): + return np.ones(x.shape, float) * B[0] + + +def _unilin_fjb(B, x): + _ret = np.concatenate((x, np.ones(x.shape, float))) + _ret.shape = (2,) + x.shape + + return _ret + + +def _unilin_est(data): + return (1., 1.) + + +def _quadratic(B, x): + return x*(x*B[0] + B[1]) + B[2] + + +def _quad_fjd(B, x): + return 2*x*B[0] + B[1] + + +def _quad_fjb(B, x): + _ret = np.concatenate((x*x, x, np.ones(x.shape, float))) + _ret.shape = (3,) + x.shape + + return _ret + + +def _quad_est(data): + return (1.,1.,1.) + + +class _UnilinearModel(Model): + r""" + Univariate linear model + + This model is defined by :math:`y = \beta_0 x + \beta_1` + + Examples + -------- + We can calculate orthogonal distance regression with an unilinear model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x + 2.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.unilinear) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2.] + + """ + + def __init__(self): + super().__init__(_unilin, fjacd=_unilin_fjd, fjacb=_unilin_fjb, + estimate=_unilin_est, + meta={'name': 'Univariate Linear', + 'equ': 'y = B_0 * x + B_1', + 'TeXequ': '$y = \\beta_0 x + \\beta_1$'}) + + +unilinear = _UnilinearModel() + + +class _QuadraticModel(Model): + r""" + Quadratic model + + This model is defined by :math:`y = \beta_0 x^2 + \beta_1 x + \beta_2` + + Examples + -------- + We can calculate orthogonal distance regression with a quadratic model: + + >>> from scipy import odr + >>> import numpy as np + >>> x = np.linspace(0.0, 5.0) + >>> y = 1.0 * x ** 2 + 2.0 * x + 3.0 + >>> data = odr.Data(x, y) + >>> odr_obj = odr.ODR(data, odr.quadratic) + >>> output = odr_obj.run() + >>> print(output.beta) + [1. 2. 3.] + + """ + + def __init__(self): + super().__init__( + _quadratic, fjacd=_quad_fjd, fjacb=_quad_fjb, estimate=_quad_est, + meta={'name': 'Quadratic', + 'equ': 'y = B_0*x**2 + B_1*x + B_2', + 'TeXequ': '$y = \\beta_0 x^2 + \\beta_1 x + \\beta_2'}) + + +quadratic = _QuadraticModel() diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/_odrpack.py b/parrot/lib/python3.10/site-packages/scipy/odr/_odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..609c2c77835befa7e2edbc35357a8eef05c2d55c --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/odr/_odrpack.py @@ -0,0 +1,1151 @@ +""" +Python wrappers for Orthogonal Distance Regression (ODRPACK). + +Notes +===== + +* Array formats -- FORTRAN stores its arrays in memory column first, i.e., an + array element A(i, j, k) will be next to A(i+1, j, k). In C and, consequently, + NumPy, arrays are stored row first: A[i, j, k] is next to A[i, j, k+1]. For + efficiency and convenience, the input and output arrays of the fitting + function (and its Jacobians) are passed to FORTRAN without transposition. + Therefore, where the ODRPACK documentation says that the X array is of shape + (N, M), it will be passed to the Python function as an array of shape (M, N). + If M==1, the 1-D case, then nothing matters; if M>1, then your + Python functions will be dealing with arrays that are indexed in reverse of + the ODRPACK documentation. No real issue, but watch out for your indexing of + the Jacobians: the i,jth elements (@f_i/@x_j) evaluated at the nth + observation will be returned as jacd[j, i, n]. Except for the Jacobians, it + really is easier to deal with x[0] and x[1] than x[:,0] and x[:,1]. Of course, + you can always use the transpose() function from SciPy explicitly. + +* Examples -- See the accompanying file test/test.py for examples of how to set + up fits of your own. Some are taken from the User's Guide; some are from + other sources. + +* Models -- Some common models are instantiated in the accompanying module + models.py . Contributions are welcome. + +Credits +======= + +* Thanks to Arnold Moene and Gerard Vermeulen for fixing some killer bugs. + +Robert Kern +robert.kern@gmail.com + +""" +import os + +import numpy as np +from warnings import warn +from scipy.odr import __odrpack + +__all__ = ['odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop'] + +odr = __odrpack.odr + + +class OdrWarning(UserWarning): + """ + Warning indicating that the data passed into + ODR will cause problems when passed into 'odr' + that the user should be aware of. + """ + pass + + +class OdrError(Exception): + """ + Exception indicating an error in fitting. + + This is raised by `~scipy.odr.odr` if an error occurs during fitting. + """ + pass + + +class OdrStop(Exception): + """ + Exception stopping fitting. + + You can raise this exception in your objective function to tell + `~scipy.odr.odr` to stop fitting. + """ + pass + + +# Backwards compatibility +odr_error = OdrError +odr_stop = OdrStop + +__odrpack._set_exceptions(OdrError, OdrStop) + + +def _conv(obj, dtype=None): + """ Convert an object to the preferred form for input to the odr routine. + """ + + if obj is None: + return obj + else: + if dtype is None: + obj = np.asarray(obj) + else: + obj = np.asarray(obj, dtype) + if obj.shape == (): + # Scalar. + return obj.dtype.type(obj) + else: + return obj + + +def _report_error(info): + """ Interprets the return code of the odr routine. + + Parameters + ---------- + info : int + The return code of the odr routine. + + Returns + ------- + problems : list(str) + A list of messages about why the odr() routine stopped. + """ + + stopreason = ('Blank', + 'Sum of squares convergence', + 'Parameter convergence', + 'Both sum of squares and parameter convergence', + 'Iteration limit reached')[info % 5] + + if info >= 5: + # questionable results or fatal error + + I = (info//10000 % 10, + info//1000 % 10, + info//100 % 10, + info//10 % 10, + info % 10) + problems = [] + + if I[0] == 0: + if I[1] != 0: + problems.append('Derivatives possibly not correct') + if I[2] != 0: + problems.append('Error occurred in callback') + if I[3] != 0: + problems.append('Problem is not full rank at solution') + problems.append(stopreason) + elif I[0] == 1: + if I[1] != 0: + problems.append('N < 1') + if I[2] != 0: + problems.append('M < 1') + if I[3] != 0: + problems.append('NP < 1 or NP > N') + if I[4] != 0: + problems.append('NQ < 1') + elif I[0] == 2: + if I[1] != 0: + problems.append('LDY and/or LDX incorrect') + if I[2] != 0: + problems.append('LDWE, LD2WE, LDWD, and/or LD2WD incorrect') + if I[3] != 0: + problems.append('LDIFX, LDSTPD, and/or LDSCLD incorrect') + if I[4] != 0: + problems.append('LWORK and/or LIWORK too small') + elif I[0] == 3: + if I[1] != 0: + problems.append('STPB and/or STPD incorrect') + if I[2] != 0: + problems.append('SCLB and/or SCLD incorrect') + if I[3] != 0: + problems.append('WE incorrect') + if I[4] != 0: + problems.append('WD incorrect') + elif I[0] == 4: + problems.append('Error in derivatives') + elif I[0] == 5: + problems.append('Error occurred in callback') + elif I[0] == 6: + problems.append('Numerical error detected') + + return problems + + else: + return [stopreason] + + +class Data: + """ + The data to fit. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + we : array_like, optional + If `we` is a scalar, then that value is used for all data points (and + all dimensions of the response variable). + If `we` is a rank-1 array of length q (the dimensionality of the + response variable), then this vector is the diagonal of the covariant + weighting matrix for all data points. + If `we` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the i'th response variable + observation (single-dimensional only). + If `we` is a rank-2 array of shape (q, q), then this is the full + covariant weighting matrix broadcast to each observation. + If `we` is a rank-2 array of shape (q, n), then `we[:,i]` is the + diagonal of the covariant weighting matrix for the i'th observation. + If `we` is a rank-3 array of shape (q, q, n), then `we[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + If the fit is implicit, then only a positive scalar value is used. + wd : array_like, optional + If `wd` is a scalar, then that value is used for all data points + (and all dimensions of the input variable). If `wd` = 0, then the + covariant weighting matrix for each observation is set to the identity + matrix (so each dimension of each observation has the same weight). + If `wd` is a rank-1 array of length m (the dimensionality of the input + variable), then this vector is the diagonal of the covariant weighting + matrix for all data points. + If `wd` is a rank-1 array of length n (the number of data points), then + the i'th element is the weight for the ith input variable observation + (single-dimensional only). + If `wd` is a rank-2 array of shape (m, m), then this is the full + covariant weighting matrix broadcast to each observation. + If `wd` is a rank-2 array of shape (m, n), then `wd[:,i]` is the + diagonal of the covariant weighting matrix for the ith observation. + If `wd` is a rank-3 array of shape (m, m, n), then `wd[:,:,i]` is the + full specification of the covariant weighting matrix for each + observation. + fix : array_like of ints, optional + The `fix` argument is the same as ifixx in the class ODR. It is an + array of integers with the same shape as data.x that determines which + input observations are treated as fixed. One can use a sequence of + length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + Each argument is attached to the member of the instance of the same name. + The structures of `x` and `y` are described in the Model class docstring. + If `y` is an integer, then the Data instance can only be used to fit with + implicit models where the dimensionality of the response is equal to the + specified value of `y`. + + The `we` argument weights the effect a deviation in the response variable + has on the fit. The `wd` argument weights the effect a deviation in the + input variable has on the fit. To handle multidimensional inputs and + responses easily, the structure of these arguments has the n'th + dimensional axis first. These arguments heavily use the structured + arguments feature of ODRPACK to conveniently and flexibly support all + options. See the ODRPACK User's Guide for a full explanation of how these + weights are used in the algorithm. Basically, a higher value of the weight + for a particular data point makes a deviation at that point more + detrimental to the fit. + + """ + + def __init__(self, x, y=None, we=None, wd=None, fix=None, meta=None): + self.x = _conv(x) + + if not isinstance(self.x, np.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.we = _conv(we) + self.wd = _conv(wd) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + by keywords. + + Examples + -------- + :: + + data.set_meta(lab="Ph 7; Lab 26", title="Ag110 + Ag108 Decay") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata dictionary. + """ + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class RealData(Data): + """ + The data, with weightings as actual standard deviations and/or + covariances. + + Parameters + ---------- + x : array_like + Observed data for the independent variable of the regression + y : array_like, optional + If array-like, observed data for the dependent variable of the + regression. A scalar input implies that the model to be used on + the data is implicit. + sx : array_like, optional + Standard deviations of `x`. + `sx` are standard deviations of `x` and are converted to weights by + dividing 1.0 by their squares. + sy : array_like, optional + Standard deviations of `y`. + `sy` are standard deviations of `y` and are converted to weights by + dividing 1.0 by their squares. + covx : array_like, optional + Covariance of `x` + `covx` is an array of covariance matrices of `x` and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + covy : array_like, optional + Covariance of `y` + `covy` is an array of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's + covariance matrix. + fix : array_like, optional + The argument and member fix is the same as Data.fix and ODR.ifixx: + It is an array of integers with the same shape as `x` that + determines which input observations are treated as fixed. One can + use a sequence of length m (the dimensionality of the input + observations) to fix some dimensions for all observations. A value + of 0 fixes the observation, a value > 0 makes it free. + meta : dict, optional + Free-form dictionary for metadata. + + Notes + ----- + The weights `wd` and `we` are computed from provided values as follows: + + `sx` and `sy` are converted to weights by dividing 1.0 by their squares. + For example, ``wd = 1./np.power(`sx`, 2)``. + + `covx` and `covy` are arrays of covariance matrices and are converted to + weights by performing a matrix inversion on each observation's covariance + matrix. For example, ``we[i] = np.linalg.inv(covy[i])``. + + These arguments follow the same structured argument conventions as wd and + we only restricted by their natures: `sx` and `sy` can't be rank-3, but + `covx` and `covy` can be. + + Only set *either* `sx` or `covx` (not both). Setting both will raise an + exception. Same with `sy` and `covy`. + + """ + + def __init__(self, x, y=None, sx=None, sy=None, covx=None, covy=None, + fix=None, meta=None): + if (sx is not None) and (covx is not None): + raise ValueError("cannot set both sx and covx") + if (sy is not None) and (covy is not None): + raise ValueError("cannot set both sy and covy") + + # Set flags for __getattr__ + self._ga_flags = {} + if sx is not None: + self._ga_flags['wd'] = 'sx' + else: + self._ga_flags['wd'] = 'covx' + if sy is not None: + self._ga_flags['we'] = 'sy' + else: + self._ga_flags['we'] = 'covy' + + self.x = _conv(x) + + if not isinstance(self.x, np.ndarray): + raise ValueError("Expected an 'ndarray' of data for 'x', " + f"but instead got data of type '{type(self.x).__name__}'") + + self.y = _conv(y) + self.sx = _conv(sx) + self.sy = _conv(sy) + self.covx = _conv(covx) + self.covy = _conv(covy) + self.fix = _conv(fix) + self.meta = {} if meta is None else meta + + def _sd2wt(self, sd): + """ Convert standard deviation to weights. + """ + + return 1./np.power(sd, 2) + + def _cov2wt(self, cov): + """ Convert covariance matrix(-ices) to weights. + """ + + from scipy.linalg import inv + + if len(cov.shape) == 2: + return inv(cov) + else: + weights = np.zeros(cov.shape, float) + + for i in range(cov.shape[-1]): # n + weights[:,:,i] = inv(cov[:,:,i]) + + return weights + + def __getattr__(self, attr): + + if attr not in ('wd', 'we'): + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + else: + lookup_tbl = {('wd', 'sx'): (self._sd2wt, self.sx), + ('wd', 'covx'): (self._cov2wt, self.covx), + ('we', 'sy'): (self._sd2wt, self.sy), + ('we', 'covy'): (self._cov2wt, self.covy)} + + func, arg = lookup_tbl[(attr, self._ga_flags[attr])] + + if arg is not None: + return func(*(arg,)) + else: + return None + + +class Model: + """ + The Model class stores information about the function you wish to fit. + + It stores the function itself, at the least, and optionally stores + functions which compute the Jacobians used during fitting. Also, one + can provide a function that will provide reasonable starting values + for the fit parameters possibly given the set of data. + + Parameters + ---------- + fcn : function + fcn(beta, x) --> y + fjacb : function + Jacobian of fcn wrt the fit parameters beta. + + fjacb(beta, x) --> @f_i(x,B)/@B_j + fjacd : function + Jacobian of fcn wrt the (possibly multidimensional) input + variable. + + fjacd(beta, x) --> @f_i(x,B)/@x_j + extra_args : tuple, optional + If specified, `extra_args` should be a tuple of extra + arguments to pass to `fcn`, `fjacb`, and `fjacd`. Each will be called + by `apply(fcn, (beta, x) + extra_args)` + estimate : array_like of rank-1 + Provides estimates of the fit parameters from the data + + estimate(data) --> estbeta + implicit : boolean + If TRUE, specifies that the model + is implicit; i.e `fcn(beta, x)` ~= 0 and there is no y data to fit + against + meta : dict, optional + freeform dictionary of metadata for the model + + Notes + ----- + Note that the `fcn`, `fjacb`, and `fjacd` operate on NumPy arrays and + return a NumPy array. The `estimate` object takes an instance of the + Data class. + + Here are the rules for the shapes of the argument and return + arrays of the callback functions: + + `x` + if the input data is single-dimensional, then `x` is rank-1 + array; i.e., ``x = array([1, 2, 3, ...]); x.shape = (n,)`` + If the input data is multi-dimensional, then `x` is a rank-2 array; + i.e., ``x = array([[1, 2, ...], [2, 4, ...]]); x.shape = (m, n)``. + In all cases, it has the same shape as the input data array passed to + `~scipy.odr.odr`. `m` is the dimensionality of the input data, + `n` is the number of observations. + `y` + if the response variable is single-dimensional, then `y` is a + rank-1 array, i.e., ``y = array([2, 4, ...]); y.shape = (n,)``. + If the response variable is multi-dimensional, then `y` is a rank-2 + array, i.e., ``y = array([[2, 4, ...], [3, 6, ...]]); y.shape = + (q, n)`` where `q` is the dimensionality of the response variable. + `beta` + rank-1 array of length `p` where `p` is the number of parameters; + i.e. ``beta = array([B_1, B_2, ..., B_p])`` + `fjacb` + if the response variable is multi-dimensional, then the + return array's shape is `(q, p, n)` such that ``fjacb(x,beta)[l,k,i] = + d f_l(X,B)/d B_k`` evaluated at the ith data point. If `q == 1`, then + the return array is only rank-2 and with shape `(p, n)`. + `fjacd` + as with fjacb, only the return array's shape is `(q, m, n)` + such that ``fjacd(x,beta)[l,j,i] = d f_l(X,B)/d X_j`` at the ith data + point. If `q == 1`, then the return array's shape is `(m, n)`. If + `m == 1`, the shape is (q, n). If `m == q == 1`, the shape is `(n,)`. + + """ + + def __init__(self, fcn, fjacb=None, fjacd=None, + extra_args=None, estimate=None, implicit=0, meta=None): + + self.fcn = fcn + self.fjacb = fjacb + self.fjacd = fjacd + + if extra_args is not None: + extra_args = tuple(extra_args) + + self.extra_args = extra_args + self.estimate = estimate + self.implicit = implicit + self.meta = meta if meta is not None else {} + + def set_meta(self, **kwds): + """ Update the metadata dictionary with the keywords and data provided + here. + + Examples + -------- + set_meta(name="Exponential", equation="y = a exp(b x) + c") + """ + + self.meta.update(kwds) + + def __getattr__(self, attr): + """ Dispatch attribute access to the metadata. + """ + + if attr != "meta" and attr in self.meta: + return self.meta[attr] + else: + raise AttributeError("'%s' not in metadata" % attr) + + +class Output: + """ + The Output class stores the output of an ODR run. + + Attributes + ---------- + beta : ndarray + Estimated parameter values, of shape (q,). + sd_beta : ndarray + Standard deviations of the estimated parameters, of shape (p,). + cov_beta : ndarray + Covariance matrix of the estimated parameters, of shape (p,p). + Note that this `cov_beta` is not scaled by the residual variance + `res_var`, whereas `sd_beta` is. This means + ``np.sqrt(np.diag(output.cov_beta * output.res_var))`` is the same + result as `output.sd_beta`. + delta : ndarray, optional + Array of estimated errors in input variables, of same shape as `x`. + eps : ndarray, optional + Array of estimated errors in response variables, of same shape as `y`. + xplus : ndarray, optional + Array of ``x + delta``. + y : ndarray, optional + Array ``y = fcn(x + delta)``. + res_var : float, optional + Residual variance. + sum_square : float, optional + Sum of squares error. + sum_square_delta : float, optional + Sum of squares of delta error. + sum_square_eps : float, optional + Sum of squares of eps error. + inv_condnum : float, optional + Inverse condition number (cf. ODRPACK UG p. 77). + rel_error : float, optional + Relative error in function values computed within fcn. + work : ndarray, optional + Final work array. + work_ind : dict, optional + Indices into work for drawing out values (cf. ODRPACK UG p. 83). + info : int, optional + Reason for returning, as output by ODRPACK (cf. ODRPACK UG p. 38). + stopreason : list of str, optional + `info` interpreted into English. + + Notes + ----- + Takes one argument for initialization, the return value from the + function `~scipy.odr.odr`. The attributes listed as "optional" above are + only present if `~scipy.odr.odr` was run with ``full_output=1``. + + """ + + def __init__(self, output): + self.beta = output[0] + self.sd_beta = output[1] + self.cov_beta = output[2] + + if len(output) == 4: + # full output + self.__dict__.update(output[3]) + self.stopreason = _report_error(self.info) + + def pprint(self): + """ Pretty-print important results. + """ + + print('Beta:', self.beta) + print('Beta Std Error:', self.sd_beta) + print('Beta Covariance:', self.cov_beta) + if hasattr(self, 'info'): + print('Residual Variance:',self.res_var) + print('Inverse Condition #:', self.inv_condnum) + print('Reason(s) for Halting:') + for r in self.stopreason: + print(' %s' % r) + + +class ODR: + """ + The ODR class gathers all information and coordinates the running of the + main fitting routine. + + Members of instances of the ODR class have the same names as the arguments + to the initialization routine. + + Parameters + ---------- + data : Data class instance + instance of the Data class + model : Model class instance + instance of the Model class + + Other Parameters + ---------------- + beta0 : array_like of rank-1 + a rank-1 sequence of initial parameter values. Optional if + model provides an "estimate" function to estimate these values. + delta0 : array_like of floats of rank-1, optional + a (double-precision) float array to hold the initial values of + the errors in the input variables. Must be same shape as data.x + ifixb : array_like of ints of rank-1, optional + sequence of integers with the same length as beta0 that determines + which parameters are held fixed. A value of 0 fixes the parameter, + a value > 0 makes the parameter free. + ifixx : array_like of ints with same shape as data.x, optional + an array of integers with the same shape as data.x that determines + which input observations are treated as fixed. One can use a sequence + of length m (the dimensionality of the input observations) to fix some + dimensions for all observations. A value of 0 fixes the observation, + a value > 0 makes it free. + job : int, optional + an integer telling ODRPACK what tasks to perform. See p. 31 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_job post-initialization for a more readable interface. + iprint : int, optional + an integer telling ODRPACK what to print. See pp. 33-34 of the + ODRPACK User's Guide if you absolutely must set the value here. Use the + method set_iprint post-initialization for a more readable interface. + errfile : str, optional + string with the filename to print ODRPACK errors to. If the file already + exists, an error will be thrown. The `overwrite` argument can be used to + prevent this. *Do Not Open This File Yourself!* + rptfile : str, optional + string with the filename to print ODRPACK summaries to. If the file + already exists, an error will be thrown. The `overwrite` argument can be + used to prevent this. *Do Not Open This File Yourself!* + ndigit : int, optional + integer specifying the number of reliable digits in the computation + of the function. + taufac : float, optional + float specifying the initial trust region. The default value is 1. + The initial trust region is equal to taufac times the length of the + first computed Gauss-Newton step. taufac must be less than 1. + sstol : float, optional + float specifying the tolerance for convergence based on the relative + change in the sum-of-squares. The default value is eps**(1/2) where eps + is the smallest value such that 1 + eps > 1 for double precision + computation on the machine. sstol must be less than 1. + partol : float, optional + float specifying the tolerance for convergence based on the relative + change in the estimated parameters. The default value is eps**(2/3) for + explicit models and ``eps**(1/3)`` for implicit models. partol must be less + than 1. + maxit : int, optional + integer specifying the maximum number of iterations to perform. For + first runs, maxit is the total number of iterations performed and + defaults to 50. For restarts, maxit is the number of additional + iterations to perform and defaults to 10. + stpb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of relative step sizes to compute + finite difference derivatives wrt the parameters. + stpd : optional + array (``stpd.shape == data.x.shape`` or ``stpd.shape == (m,)``) of relative + step sizes to compute finite difference derivatives wrt the input + variable errors. If stpd is a rank-1 array with length m (the + dimensionality of the input variable), then the values are broadcast to + all observations. + sclb : array_like, optional + sequence (``len(stpb) == len(beta0)``) of scaling factors for the + parameters. The purpose of these scaling factors are to scale all of + the parameters to around unity. Normally appropriate scaling factors + are computed if this argument is not specified. Specify them yourself + if the automatic procedure goes awry. + scld : array_like, optional + array (scld.shape == data.x.shape or scld.shape == (m,)) of scaling + factors for the *errors* in the input variables. Again, these factors + are automatically computed if you do not provide them. If scld.shape == + (m,), then the scaling factors are broadcast to all observations. + work : ndarray, optional + array to hold the double-valued working data for ODRPACK. When + restarting, takes the value of self.output.work. + iwork : ndarray, optional + array to hold the integer-valued working data for ODRPACK. When + restarting, takes the value of self.output.iwork. + overwrite : bool, optional + If it is True, output files defined by `errfile` and `rptfile` are + overwritten. The default is False. + + Attributes + ---------- + data : Data + The data for this fit + model : Model + The model used in fit + output : Output + An instance if the Output class containing all of the returned + data from an invocation of ODR.run() or ODR.restart() + + """ + + def __init__(self, data, model, beta0=None, delta0=None, ifixb=None, + ifixx=None, job=None, iprint=None, errfile=None, rptfile=None, + ndigit=None, taufac=None, sstol=None, partol=None, maxit=None, + stpb=None, stpd=None, sclb=None, scld=None, work=None, iwork=None, + overwrite=False): + + self.data = data + self.model = model + + if beta0 is None: + if self.model.estimate is not None: + self.beta0 = _conv(self.model.estimate(self.data)) + else: + raise ValueError( + "must specify beta0 or provide an estimator with the model" + ) + else: + self.beta0 = _conv(beta0) + + if ifixx is None and data.fix is not None: + ifixx = data.fix + + if overwrite: + # remove output files for overwriting. + if rptfile is not None and os.path.exists(rptfile): + os.remove(rptfile) + if errfile is not None and os.path.exists(errfile): + os.remove(errfile) + + self.delta0 = _conv(delta0) + # These really are 32-bit integers in FORTRAN (gfortran), even on 64-bit + # platforms. + # XXX: some other FORTRAN compilers may not agree. + self.ifixx = _conv(ifixx, dtype=np.int32) + self.ifixb = _conv(ifixb, dtype=np.int32) + self.job = job + self.iprint = iprint + self.errfile = errfile + self.rptfile = rptfile + self.ndigit = ndigit + self.taufac = taufac + self.sstol = sstol + self.partol = partol + self.maxit = maxit + self.stpb = _conv(stpb) + self.stpd = _conv(stpd) + self.sclb = _conv(sclb) + self.scld = _conv(scld) + self.work = _conv(work) + self.iwork = _conv(iwork) + + self.output = None + + self._check() + + def _check(self): + """ Check the inputs for consistency, but don't bother checking things + that the builtin function odr will check. + """ + + x_s = list(self.data.x.shape) + + if isinstance(self.data.y, np.ndarray): + y_s = list(self.data.y.shape) + if self.model.implicit: + raise OdrError("an implicit model cannot use response data") + else: + # implicit model with q == self.data.y + y_s = [self.data.y, x_s[-1]] + if not self.model.implicit: + raise OdrError("an explicit model needs response data") + self.set_job(fit_type=1) + + if x_s[-1] != y_s[-1]: + raise OdrError("number of observations do not match") + + n = x_s[-1] + + if len(x_s) == 2: + m = x_s[0] + else: + m = 1 + if len(y_s) == 2: + q = y_s[0] + else: + q = 1 + + p = len(self.beta0) + + # permissible output array shapes + + fcn_perms = [(q, n)] + fjacd_perms = [(q, m, n)] + fjacb_perms = [(q, p, n)] + + if q == 1: + fcn_perms.append((n,)) + fjacd_perms.append((m, n)) + fjacb_perms.append((p, n)) + if m == 1: + fjacd_perms.append((q, n)) + if p == 1: + fjacb_perms.append((q, n)) + if m == q == 1: + fjacd_perms.append((n,)) + if p == q == 1: + fjacb_perms.append((n,)) + + # try evaluating the supplied functions to make sure they provide + # sensible outputs + + arglist = (self.beta0, self.data.x) + if self.model.extra_args is not None: + arglist = arglist + self.model.extra_args + res = self.model.fcn(*arglist) + + if res.shape not in fcn_perms: + print(res.shape) + print(fcn_perms) + raise OdrError("fcn does not output %s-shaped array" % y_s) + + if self.model.fjacd is not None: + res = self.model.fjacd(*arglist) + if res.shape not in fjacd_perms: + raise OdrError( + "fjacd does not output %s-shaped array" % repr((q, m, n))) + if self.model.fjacb is not None: + res = self.model.fjacb(*arglist) + if res.shape not in fjacb_perms: + raise OdrError( + "fjacb does not output %s-shaped array" % repr((q, p, n))) + + # check shape of delta0 + + if self.delta0 is not None and self.delta0.shape != self.data.x.shape: + raise OdrError( + "delta0 is not a %s-shaped array" % repr(self.data.x.shape)) + + if self.data.x.size == 0: + warn("Empty data detected for ODR instance. " + "Do not expect any fitting to occur", + OdrWarning, stacklevel=3) + + def _gen_work(self): + """ Generate a suitable work array if one does not already exist. + """ + + n = self.data.x.shape[-1] + p = self.beta0.shape[0] + + if len(self.data.x.shape) == 2: + m = self.data.x.shape[0] + else: + m = 1 + + if self.model.implicit: + q = self.data.y + elif len(self.data.y.shape) == 2: + q = self.data.y.shape[0] + else: + q = 1 + + if self.data.we is None: + ldwe = ld2we = 1 + elif len(self.data.we.shape) == 3: + ld2we, ldwe = self.data.we.shape[1:] + else: + we = self.data.we + ldwe = 1 + ld2we = 1 + if we.ndim == 1 and q == 1: + ldwe = n + elif we.ndim == 2: + if we.shape == (q, q): + ld2we = q + elif we.shape == (q, n): + ldwe = n + + if self.job % 10 < 2: + # ODR not OLS + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 6*n*m + 2*n*q*p + + 2*n*q*m + q*q + 5*q + q*(p+m) + ldwe*ld2we*q) + else: + # OLS not ODR + lwork = (18 + 11*p + p*p + m + m*m + 4*n*q + 2*n*m + 2*n*q*p + + 5*q + q*(p+m) + ldwe*ld2we*q) + + if isinstance(self.work, np.ndarray) and self.work.shape == (lwork,)\ + and self.work.dtype.str.endswith('f8'): + # the existing array is fine + return + else: + self.work = np.zeros((lwork,), float) + + def set_job(self, fit_type=None, deriv=None, var_calc=None, + del_init=None, restart=None): + """ + Sets the "job" parameter is a hopefully comprehensible way. + + If an argument is not specified, then the value is left as is. The + default value from class initialization is for all of these options set + to 0. + + Parameters + ---------- + fit_type : {0, 1, 2} int + 0 -> explicit ODR + + 1 -> implicit ODR + + 2 -> ordinary least-squares + deriv : {0, 1, 2, 3} int + 0 -> forward finite differences + + 1 -> central finite differences + + 2 -> user-supplied derivatives (Jacobians) with results + checked by ODRPACK + + 3 -> user-supplied derivatives, no checking + var_calc : {0, 1, 2} int + 0 -> calculate asymptotic covariance matrix and fit + parameter uncertainties (V_B, s_B) using derivatives + recomputed at the final solution + + 1 -> calculate V_B and s_B using derivatives from last iteration + + 2 -> do not calculate V_B and s_B + del_init : {0, 1} int + 0 -> initial input variable offsets set to 0 + + 1 -> initial offsets provided by user in variable "work" + restart : {0, 1} int + 0 -> fit is not a restart + + 1 -> fit is a restart + + Notes + ----- + The permissible values are different from those given on pg. 31 of the + ODRPACK User's Guide only in that one cannot specify numbers greater than + the last value for each variable. + + If one does not supply functions to compute the Jacobians, the fitting + procedure will change deriv to 0, finite differences, as a default. To + initialize the input variable offsets by yourself, set del_init to 1 and + put the offsets into the "work" variable correctly. + + """ + + if self.job is None: + job_l = [0, 0, 0, 0, 0] + else: + job_l = [self.job // 10000 % 10, + self.job // 1000 % 10, + self.job // 100 % 10, + self.job // 10 % 10, + self.job % 10] + + if fit_type in (0, 1, 2): + job_l[4] = fit_type + if deriv in (0, 1, 2, 3): + job_l[3] = deriv + if var_calc in (0, 1, 2): + job_l[2] = var_calc + if del_init in (0, 1): + job_l[1] = del_init + if restart in (0, 1): + job_l[0] = restart + + self.job = (job_l[0]*10000 + job_l[1]*1000 + + job_l[2]*100 + job_l[3]*10 + job_l[4]) + + def set_iprint(self, init=None, so_init=None, + iter=None, so_iter=None, iter_step=None, final=None, so_final=None): + """ Set the iprint parameter for the printing of computation reports. + + If any of the arguments are specified here, then they are set in the + iprint member. If iprint is not set manually or with this method, then + ODRPACK defaults to no printing. If no filename is specified with the + member rptfile, then ODRPACK prints to stdout. One can tell ODRPACK to + print to stdout in addition to the specified filename by setting the + so_* arguments to this function, but one cannot specify to print to + stdout but not a file since one can do that by not specifying a rptfile + filename. + + There are three reports: initialization, iteration, and final reports. + They are represented by the arguments init, iter, and final + respectively. The permissible values are 0, 1, and 2 representing "no + report", "short report", and "long report" respectively. + + The argument iter_step (0 <= iter_step <= 9) specifies how often to make + the iteration report; the report will be made for every iter_step'th + iteration starting with iteration one. If iter_step == 0, then no + iteration report is made, regardless of the other arguments. + + If the rptfile is None, then any so_* arguments supplied will raise an + exception. + """ + if self.iprint is None: + self.iprint = 0 + + ip = [self.iprint // 1000 % 10, + self.iprint // 100 % 10, + self.iprint // 10 % 10, + self.iprint % 10] + + # make a list to convert iprint digits to/from argument inputs + # rptfile, stdout + ip2arg = [[0, 0], # none, none + [1, 0], # short, none + [2, 0], # long, none + [1, 1], # short, short + [2, 1], # long, short + [1, 2], # short, long + [2, 2]] # long, long + + if (self.rptfile is None and + (so_init is not None or + so_iter is not None or + so_final is not None)): + raise OdrError( + "no rptfile specified, cannot output to stdout twice") + + iprint_l = ip2arg[ip[0]] + ip2arg[ip[1]] + ip2arg[ip[3]] + + if init is not None: + iprint_l[0] = init + if so_init is not None: + iprint_l[1] = so_init + if iter is not None: + iprint_l[2] = iter + if so_iter is not None: + iprint_l[3] = so_iter + if final is not None: + iprint_l[4] = final + if so_final is not None: + iprint_l[5] = so_final + + if iter_step in range(10): + # 0..9 + ip[2] = iter_step + + ip[0] = ip2arg.index(iprint_l[0:2]) + ip[1] = ip2arg.index(iprint_l[2:4]) + ip[3] = ip2arg.index(iprint_l[4:6]) + + self.iprint = ip[0]*1000 + ip[1]*100 + ip[2]*10 + ip[3] + + def run(self): + """ Run the fitting routine with all of the information given and with ``full_output=1``. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ # noqa: E501 + + args = (self.model.fcn, self.beta0, self.data.y, self.data.x) + kwds = {'full_output': 1} + kwd_l = ['ifixx', 'ifixb', 'job', 'iprint', 'errfile', 'rptfile', + 'ndigit', 'taufac', 'sstol', 'partol', 'maxit', 'stpb', + 'stpd', 'sclb', 'scld', 'work', 'iwork'] + + if self.delta0 is not None and (self.job // 10000) % 10 == 0: + # delta0 provided and fit is not a restart + self._gen_work() + + d0 = np.ravel(self.delta0) + + self.work[:len(d0)] = d0 + + # set the kwds from other objects explicitly + if self.model.fjacb is not None: + kwds['fjacb'] = self.model.fjacb + if self.model.fjacd is not None: + kwds['fjacd'] = self.model.fjacd + if self.data.we is not None: + kwds['we'] = self.data.we + if self.data.wd is not None: + kwds['wd'] = self.data.wd + if self.model.extra_args is not None: + kwds['extra_args'] = self.model.extra_args + + # implicitly set kwds from self's members + for attr in kwd_l: + obj = getattr(self, attr) + if obj is not None: + kwds[attr] = obj + + self.output = Output(odr(*args, **kwds)) + + return self.output + + def restart(self, iter=None): + """ Restarts the run with iter more iterations. + + Parameters + ---------- + iter : int, optional + ODRPACK's default for the number of new iterations is 10. + + Returns + ------- + output : Output instance + This object is also assigned to the attribute .output . + """ + + if self.output is None: + raise OdrError("cannot restart: run() has not been called before") + + self.set_job(restart=1) + self.work = self.output.work + self.iwork = self.output.iwork + + self.maxit = iter + + return self.run() diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/models.py b/parrot/lib/python3.10/site-packages/scipy/odr/models.py new file mode 100644 index 0000000000000000000000000000000000000000..0289b59747bb68a4954e58732ac69d7df144f5f6 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/odr/models.py @@ -0,0 +1,20 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'Model', 'exponential', 'multilinear', 'unilinear', + 'quadratic', 'polynomial' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="models", + private_modules=["_models"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/odrpack.py b/parrot/lib/python3.10/site-packages/scipy/odr/odrpack.py new file mode 100644 index 0000000000000000000000000000000000000000..192fb3342b7957703996957c882d44656706e41b --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/odr/odrpack.py @@ -0,0 +1,21 @@ +# This file is not meant for public use and will be removed in SciPy v2.0.0. +# Use the `scipy.odr` namespace for importing the functions +# included below. + +from scipy._lib.deprecation import _sub_module_deprecation + +__all__ = [ # noqa: F822 + 'odr', 'OdrWarning', 'OdrError', 'OdrStop', + 'Data', 'RealData', 'Model', 'Output', 'ODR', + 'odr_error', 'odr_stop' +] + + +def __dir__(): + return __all__ + + +def __getattr__(name): + return _sub_module_deprecation(sub_package="odr", module="odrpack", + private_modules=["_odrpack"], all=__all__, + attribute=name) diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/tests/__init__.py b/parrot/lib/python3.10/site-packages/scipy/odr/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc b/parrot/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..49ade224ca44ffa33c7c16ce8f752209c820e4df Binary files /dev/null and b/parrot/lib/python3.10/site-packages/scipy/odr/tests/__pycache__/__init__.cpython-310.pyc differ diff --git a/parrot/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py b/parrot/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py new file mode 100644 index 0000000000000000000000000000000000000000..d3aa91595b7100d56bf0d9716017b0ef01a52aa5 --- /dev/null +++ b/parrot/lib/python3.10/site-packages/scipy/odr/tests/test_odr.py @@ -0,0 +1,606 @@ +import pickle +import tempfile +import shutil +import os + +import numpy as np +from numpy import pi +from numpy.testing import (assert_array_almost_equal, + assert_equal, assert_warns, + assert_allclose) +import pytest +from pytest import raises as assert_raises + +from scipy.odr import (Data, Model, ODR, RealData, OdrStop, OdrWarning, + multilinear, exponential, unilinear, quadratic, + polynomial) + + +class TestODR: + + # Bad Data for 'x' + + def test_bad_data(self): + assert_raises(ValueError, Data, 2, 1) + assert_raises(ValueError, RealData, 2, 1) + + # Empty Data for 'x' + def empty_data_func(self, B, x): + return B[0]*x + B[1] + + def test_empty_data(self): + beta0 = [0.02, 0.0] + linear = Model(self.empty_data_func) + + empty_dat = Data([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + empty_dat = RealData([], []) + assert_warns(OdrWarning, ODR, + empty_dat, linear, beta0=beta0) + + # Explicit Example + + def explicit_fcn(self, B, x): + ret = B[0] + B[1] * np.power(np.exp(B[2]*x) - 1.0, 2) + return ret + + def explicit_fjd(self, B, x): + eBx = np.exp(B[2]*x) + ret = B[1] * 2.0 * (eBx-1.0) * B[2] * eBx + return ret + + def explicit_fjb(self, B, x): + eBx = np.exp(B[2]*x) + res = np.vstack([np.ones(x.shape[-1]), + np.power(eBx-1.0, 2), + B[1]*2.0*(eBx-1.0)*eBx*x]) + return res + + def test_explicit(self): + explicit_mod = Model( + self.explicit_fcn, + fjacb=self.explicit_fjb, + fjacd=self.explicit_fjd, + meta=dict(name='Sample Explicit Model', + ref='ODRPACK UG, pg. 39'), + ) + explicit_dat = Data([0.,0.,5.,7.,7.5,10.,16.,26.,30.,34.,34.5,100.], + [1265.,1263.6,1258.,1254.,1253.,1249.8,1237.,1218.,1220.6, + 1213.8,1215.5,1212.]) + explicit_odr = ODR(explicit_dat, explicit_mod, beta0=[1500.0, -50.0, -0.1], + ifixx=[0,0,1,1,1,1,1,1,1,1,1,0]) + explicit_odr.set_job(deriv=2) + explicit_odr.set_iprint(init=0, iter=0, final=0) + + out = explicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.2646548050648876e+03, -5.4018409956678255e+01, + -8.7849712165253724e-02]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([1.0349270280543437, 1.583997785262061, 0.0063321988657267]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[4.4949592379003039e-01, -3.7421976890364739e-01, + -8.0978217468468912e-04], + [-3.7421976890364739e-01, 1.0529686462751804e+00, + -1.9453521827942002e-03], + [-8.0978217468468912e-04, -1.9453521827942002e-03, + 1.6827336938454476e-05]]), + ) + + # Implicit Example + + def implicit_fcn(self, B, x): + return (B[2]*np.power(x[0]-B[0], 2) + + 2.0*B[3]*(x[0]-B[0])*(x[1]-B[1]) + + B[4]*np.power(x[1]-B[1], 2) - 1.0) + + def test_implicit(self): + implicit_mod = Model( + self.implicit_fcn, + implicit=1, + meta=dict(name='Sample Implicit Model', + ref='ODRPACK UG, pg. 49'), + ) + implicit_dat = Data([ + [0.5,1.2,1.6,1.86,2.12,2.36,2.44,2.36,2.06,1.74,1.34,0.9,-0.28, + -0.78,-1.36,-1.9,-2.5,-2.88,-3.18,-3.44], + [-0.12,-0.6,-1.,-1.4,-2.54,-3.36,-4.,-4.75,-5.25,-5.64,-5.97,-6.32, + -6.44,-6.44,-6.41,-6.25,-5.88,-5.5,-5.24,-4.86]], + 1, + ) + implicit_odr = ODR(implicit_dat, implicit_mod, + beta0=[-1.0, -3.0, 0.09, 0.02, 0.08]) + + out = implicit_odr.run() + assert_array_almost_equal( + out.beta, + np.array([-0.9993809167281279, -2.9310484652026476, 0.0875730502693354, + 0.0162299708984738, 0.0797537982976416]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.1113840353364371, 0.1097673310686467, 0.0041060738314314, + 0.0027500347539902, 0.0034962501532468]), + ) + assert_allclose( + out.cov_beta, + np.array([[2.1089274602333052e+00, -1.9437686411979040e+00, + 7.0263550868344446e-02, -4.7175267373474862e-02, + 5.2515575927380355e-02], + [-1.9437686411979040e+00, 2.0481509222414456e+00, + -6.1600515853057307e-02, 4.6268827806232933e-02, + -5.8822307501391467e-02], + [7.0263550868344446e-02, -6.1600515853057307e-02, + 2.8659542561579308e-03, -1.4628662260014491e-03, + 1.4528860663055824e-03], + [-4.7175267373474862e-02, 4.6268827806232933e-02, + -1.4628662260014491e-03, 1.2855592885514335e-03, + -1.2692942951415293e-03], + [5.2515575927380355e-02, -5.8822307501391467e-02, + 1.4528860663055824e-03, -1.2692942951415293e-03, + 2.0778813389755596e-03]]), + rtol=1e-6, atol=2e-6, + ) + + # Multi-variable Example + + def multi_fcn(self, B, x): + if (x < 0.0).any(): + raise OdrStop + theta = pi*B[3]/2. + ctheta = np.cos(theta) + stheta = np.sin(theta) + omega = np.power(2.*pi*x*np.exp(-B[2]), B[3]) + phi = np.arctan2((omega*stheta), (1.0 + omega*ctheta)) + r = (B[0] - B[1]) * np.power(np.sqrt(np.power(1.0 + omega*ctheta, 2) + + np.power(omega*stheta, 2)), -B[4]) + ret = np.vstack([B[1] + r*np.cos(B[4]*phi), + r*np.sin(B[4]*phi)]) + return ret + + def test_multi(self): + multi_mod = Model( + self.multi_fcn, + meta=dict(name='Sample Multi-Response Model', + ref='ODRPACK UG, pg. 56'), + ) + + multi_x = np.array([30.0, 50.0, 70.0, 100.0, 150.0, 200.0, 300.0, 500.0, + 700.0, 1000.0, 1500.0, 2000.0, 3000.0, 5000.0, 7000.0, 10000.0, + 15000.0, 20000.0, 30000.0, 50000.0, 70000.0, 100000.0, 150000.0]) + multi_y = np.array([ + [4.22, 4.167, 4.132, 4.038, 4.019, 3.956, 3.884, 3.784, 3.713, + 3.633, 3.54, 3.433, 3.358, 3.258, 3.193, 3.128, 3.059, 2.984, + 2.934, 2.876, 2.838, 2.798, 2.759], + [0.136, 0.167, 0.188, 0.212, 0.236, 0.257, 0.276, 0.297, 0.309, + 0.311, 0.314, 0.311, 0.305, 0.289, 0.277, 0.255, 0.24, 0.218, + 0.202, 0.182, 0.168, 0.153, 0.139], + ]) + n = len(multi_x) + multi_we = np.zeros((2, 2, n), dtype=float) + multi_ifixx = np.ones(n, dtype=int) + multi_delta = np.zeros(n, dtype=float) + + multi_we[0,0,:] = 559.6 + multi_we[1,0,:] = multi_we[0,1,:] = -1634.0 + multi_we[1,1,:] = 8397.0 + + for i in range(n): + if multi_x[i] < 100.0: + multi_ifixx[i] = 0 + elif multi_x[i] <= 150.0: + pass # defaults are fine + elif multi_x[i] <= 1000.0: + multi_delta[i] = 25.0 + elif multi_x[i] <= 10000.0: + multi_delta[i] = 560.0 + elif multi_x[i] <= 100000.0: + multi_delta[i] = 9500.0 + else: + multi_delta[i] = 144000.0 + if multi_x[i] == 100.0 or multi_x[i] == 150.0: + multi_we[:,:,i] = 0.0 + + multi_dat = Data(multi_x, multi_y, wd=1e-4/np.power(multi_x, 2), + we=multi_we) + multi_odr = ODR(multi_dat, multi_mod, beta0=[4.,2.,7.,.4,.5], + delta0=multi_delta, ifixx=multi_ifixx) + multi_odr.set_job(deriv=1, del_init=1) + + out = multi_odr.run() + assert_array_almost_equal( + out.beta, + np.array([4.3799880305938963, 2.4333057577497703, 8.0028845899503978, + 0.5101147161764654, 0.5173902330489161]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.0130625231081944, 0.0130499785273277, 0.1167085962217757, + 0.0132642749596149, 0.0288529201353984]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0064918418231375, 0.0036159705923791, 0.0438637051470406, + -0.0058700836512467, 0.011281212888768], + [0.0036159705923791, 0.0064793789429006, 0.0517610978353126, + -0.0051181304940204, 0.0130726943624117], + [0.0438637051470406, 0.0517610978353126, 0.5182263323095322, + -0.0563083340093696, 0.1269490939468611], + [-0.0058700836512467, -0.0051181304940204, -0.0563083340093696, + 0.0066939246261263, -0.0140184391377962], + [0.011281212888768, 0.0130726943624117, 0.1269490939468611, + -0.0140184391377962, 0.0316733013820852]]), + ) + + # Pearson's Data + # K. Pearson, Philosophical Magazine, 2, 559 (1901) + + def pearson_fcn(self, B, x): + return B[0] + B[1]*x + + def test_pearson(self): + p_x = np.array([0.,.9,1.8,2.6,3.3,4.4,5.2,6.1,6.5,7.4]) + p_y = np.array([5.9,5.4,4.4,4.6,3.5,3.7,2.8,2.8,2.4,1.5]) + p_sx = np.array([.03,.03,.04,.035,.07,.11,.13,.22,.74,1.]) + p_sy = np.array([1.,.74,.5,.35,.22,.22,.12,.12,.1,.04]) + + p_dat = RealData(p_x, p_y, sx=p_sx, sy=p_sy) + + # Reverse the data to test invariance of results + pr_dat = RealData(p_y, p_x, sx=p_sy, sy=p_sx) + + p_mod = Model(self.pearson_fcn, meta=dict(name='Uni-linear Fit')) + + p_odr = ODR(p_dat, p_mod, beta0=[1.,1.]) + pr_odr = ODR(pr_dat, p_mod, beta0=[1.,1.]) + + out = p_odr.run() + assert_array_almost_equal( + out.beta, + np.array([5.4767400299231674, -0.4796082367610305]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([0.3590121690702467, 0.0706291186037444]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[0.0854275622946333, -0.0161807025443155], + [-0.0161807025443155, 0.003306337993922]]), + ) + + rout = pr_odr.run() + assert_array_almost_equal( + rout.beta, + np.array([11.4192022410781231, -2.0850374506165474]), + ) + assert_array_almost_equal( + rout.sd_beta, + np.array([0.9820231665657161, 0.3070515616198911]), + ) + assert_array_almost_equal( + rout.cov_beta, + np.array([[0.6391799462548782, -0.1955657291119177], + [-0.1955657291119177, 0.0624888159223392]]), + ) + + # Lorentz Peak + # The data is taken from one of the undergraduate physics labs I performed. + + def lorentz(self, beta, x): + return (beta[0]*beta[1]*beta[2] / np.sqrt(np.power(x*x - + beta[2]*beta[2], 2.0) + np.power(beta[1]*x, 2.0))) + + def test_lorentz(self): + l_sy = np.array([.29]*18) + l_sx = np.array([.000972971,.000948268,.000707632,.000706679, + .000706074, .000703918,.000698955,.000456856, + .000455207,.000662717,.000654619,.000652694, + .000000859202,.00106589,.00106378,.00125483, .00140818,.00241839]) + + l_dat = RealData( + [3.9094, 3.85945, 3.84976, 3.84716, 3.84551, 3.83964, 3.82608, + 3.78847, 3.78163, 3.72558, 3.70274, 3.6973, 3.67373, 3.65982, + 3.6562, 3.62498, 3.55525, 3.41886], + [652, 910.5, 984, 1000, 1007.5, 1053, 1160.5, 1409.5, 1430, 1122, + 957.5, 920, 777.5, 709.5, 698, 578.5, 418.5, 275.5], + sx=l_sx, + sy=l_sy, + ) + l_mod = Model(self.lorentz, meta=dict(name='Lorentz Peak')) + l_odr = ODR(l_dat, l_mod, beta0=(1000., .1, 3.8)) + + out = l_odr.run() + assert_array_almost_equal( + out.beta, + np.array([1.4306780846149925e+03, 1.3390509034538309e-01, + 3.7798193600109009e+00]), + ) + assert_array_almost_equal( + out.sd_beta, + np.array([7.3621186811330963e-01, 3.5068899941471650e-04, + 2.4451209281408992e-04]), + ) + assert_array_almost_equal( + out.cov_beta, + np.array([[2.4714409064597873e-01, -6.9067261911110836e-05, + -3.1236953270424990e-05], + [-6.9067261911110836e-05, 5.6077531517333009e-08, + 3.6133261832722601e-08], + [-3.1236953270424990e-05, 3.6133261832722601e-08, + 2.7261220025171730e-08]]), + ) + + def test_ticket_1253(self): + def linear(c, x): + return c[0]*x+c[1] + + c = [2.0, 3.0] + x = np.linspace(0, 10) + y = linear(c, x) + + model = Model(linear) + data = Data(x, y, wd=1.0, we=1.0) + job = ODR(data, model, beta0=[1.0, 1.0]) + result = job.run() + assert_equal(result.info, 2) + + # Verify fix for gh-9140 + + def test_ifixx(self): + x1 = [-2.01, -0.99, -0.001, 1.02, 1.98] + x2 = [3.98, 1.01, 0.001, 0.998, 4.01] + fix = np.vstack((np.zeros_like(x1, dtype=int), np.ones_like(x2, dtype=int))) + data = Data(np.vstack((x1, x2)), y=1, fix=fix) + model = Model(lambda beta, x: x[1, :] - beta[0] * x[0, :]**2., implicit=True) + + odr1 = ODR(data, model, beta0=np.array([1.])) + sol1 = odr1.run() + odr2 = ODR(data, model, beta0=np.array([1.]), ifixx=fix) + sol2 = odr2.run() + assert_equal(sol1.beta, sol2.beta) + + # verify bugfix for #11800 in #11802 + def test_ticket_11800(self): + # parameters + beta_true = np.array([1.0, 2.3, 1.1, -1.0, 1.3, 0.5]) + nr_measurements = 10 + + std_dev_x = 0.01 + x_error = np.array([[0.00063445, 0.00515731, 0.00162719, 0.01022866, + -0.01624845, 0.00482652, 0.00275988, -0.00714734, -0.00929201, -0.00687301], + [-0.00831623, -0.00821211, -0.00203459, 0.00938266, -0.00701829, + 0.0032169, 0.00259194, -0.00581017, -0.0030283, 0.01014164]]) + + std_dev_y = 0.05 + y_error = np.array([[0.05275304, 0.04519563, -0.07524086, 0.03575642, + 0.04745194, 0.03806645, 0.07061601, -0.00753604, -0.02592543, -0.02394929], + [0.03632366, 0.06642266, 0.08373122, 0.03988822, -0.0092536, + -0.03750469, -0.03198903, 0.01642066, 0.01293648, -0.05627085]]) + + beta_solution = np.array([ + 2.62920235756665876536e+00, -1.26608484996299608838e+02, + 1.29703572775403074502e+02, -1.88560985401185465804e+00, + 7.83834160771274923718e+01, -7.64124076838087091801e+01]) + + # model's function and Jacobians + def func(beta, x): + y0 = beta[0] + beta[1] * x[0, :] + beta[2] * x[1, :] + y1 = beta[3] + beta[4] * x[0, :] + beta[5] * x[1, :] + + return np.vstack((y0, y1)) + + def df_dbeta_odr(beta, x): + nr_meas = np.shape(x)[1] + zeros = np.zeros(nr_meas) + ones = np.ones(nr_meas) + + dy0 = np.array([ones, x[0, :], x[1, :], zeros, zeros, zeros]) + dy1 = np.array([zeros, zeros, zeros, ones, x[0, :], x[1, :]]) + + return np.stack((dy0, dy1)) + + def df_dx_odr(beta, x): + nr_meas = np.shape(x)[1] + ones = np.ones(nr_meas) + + dy0 = np.array([beta[1] * ones, beta[2] * ones]) + dy1 = np.array([beta[4] * ones, beta[5] * ones]) + return np.stack((dy0, dy1)) + + # do measurements with errors in independent and dependent variables + x0_true = np.linspace(1, 10, nr_measurements) + x1_true = np.linspace(1, 10, nr_measurements) + x_true = np.array([x0_true, x1_true]) + + y_true = func(beta_true, x_true) + + x_meas = x_true + x_error + y_meas = y_true + y_error + + # estimate model's parameters + model_f = Model(func, fjacb=df_dbeta_odr, fjacd=df_dx_odr) + + data = RealData(x_meas, y_meas, sx=std_dev_x, sy=std_dev_y) + + odr_obj = ODR(data, model_f, beta0=0.9 * beta_true, maxit=100) + #odr_obj.set_iprint(init=2, iter=0, iter_step=1, final=1) + odr_obj.set_job(deriv=3) + + odr_out = odr_obj.run() + + # check results + assert_equal(odr_out.info, 1) + assert_array_almost_equal(odr_out.beta, beta_solution) + + def test_multilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 10.0 + 5.0 * x + data = Data(x, y) + odr_obj = ODR(data, multilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [10.0, 5.0]) + + def test_exponential_model(self): + x = np.linspace(0.0, 5.0) + y = -10.0 + np.exp(0.5*x) + data = Data(x, y) + odr_obj = ODR(data, exponential) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [-10.0, 0.5]) + + def test_polynomial_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 + 2.0 * x + 3.0 * x ** 2 + 4.0 * x ** 3 + poly_model = polynomial(3) + data = Data(x, y) + odr_obj = ODR(data, poly_model) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0, 4.0]) + + def test_unilinear_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + odr_obj = ODR(data, unilinear) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0]) + + def test_quadratic_model(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x ** 2 + 2.0 * x + 3.0 + data = Data(x, y) + odr_obj = ODR(data, quadratic) + output = odr_obj.run() + assert_array_almost_equal(output.beta, [1.0, 2.0, 3.0]) + + def test_work_ind(self): + + def func(par, x): + b0, b1 = par + return b0 + b1 * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + # do the fitting + linear_model = Model(func) + real_data = RealData(x, y, sx=x_err, sy=y_err) + odr_obj = ODR(real_data, linear_model, beta0=[0.4, 0.4]) + odr_obj.set_job(fit_type=0) + out = odr_obj.run() + + sd_ind = out.work_ind['sd'] + assert_array_almost_equal(out.sd_beta, + out.work[sd_ind:sd_ind + len(out.sd_beta)]) + + @pytest.mark.skipif(True, reason="Fortran I/O prone to crashing so better " + "not to run this test, see gh-13127") + def test_output_file_overwrite(self): + """ + Verify fix for gh-1892 + """ + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + data = Data(np.arange(10), 12 * np.arange(10)) + tmp_dir = tempfile.mkdtemp() + error_file_path = os.path.join(tmp_dir, "error.dat") + report_file_path = os.path.join(tmp_dir, "report.dat") + try: + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path).run() + ODR(data, p, beta0=[0.1, 13], errfile=error_file_path, + rptfile=report_file_path, overwrite=True).run() + finally: + # remove output files for clean up + shutil.rmtree(tmp_dir) + + def test_odr_model_default_meta(self): + def func(b, x): + return b[0] + b[1] * x + + p = Model(func) + p.set_meta(name='Sample Model Meta', ref='ODRPACK') + assert_equal(p.meta, {'name': 'Sample Model Meta', 'ref': 'ODRPACK'}) + + def test_work_array_del_init(self): + """ + Verify fix for gh-18739 where del_init=1 fails. + """ + def func(b, x): + return b[0] + b[1] * x + + # generate some data + n_data = 4 + x = np.arange(n_data) + y = np.where(x % 2, x + 0.1, x - 0.1) + x_err = np.full(n_data, 0.1) + y_err = np.full(n_data, 0.1) + + linear_model = Model(func) + # Try various shapes of the `we` array from various `sy` and `covy` + rd0 = RealData(x, y, sx=x_err, sy=y_err) + rd1 = RealData(x, y, sx=x_err, sy=0.1) + rd2 = RealData(x, y, sx=x_err, sy=[0.1]) + rd3 = RealData(x, y, sx=x_err, sy=np.full((1, n_data), 0.1)) + rd4 = RealData(x, y, sx=x_err, covy=[[0.01]]) + rd5 = RealData(x, y, sx=x_err, covy=np.full((1, 1, n_data), 0.01)) + for rd in [rd0, rd1, rd2, rd3, rd4, rd5]: + odr_obj = ODR(rd, linear_model, beta0=[0.4, 0.4], + delta0=np.full(n_data, -0.1)) + odr_obj.set_job(fit_type=0, del_init=1) + # Just make sure that it runs without raising an exception. + odr_obj.run() + + def test_pickling_data(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = Data(x, y) + + obj_pickle = pickle.dumps(data) + del data + pickle.loads(obj_pickle) + + def test_pickling_real_data(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + data = RealData(x, y) + + obj_pickle = pickle.dumps(data) + del data + pickle.loads(obj_pickle) + + def test_pickling_model(self): + obj_pickle = pickle.dumps(unilinear) + pickle.loads(obj_pickle) + + def test_pickling_odr(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + odr_obj = ODR(Data(x, y), unilinear) + + obj_pickle = pickle.dumps(odr_obj) + del odr_obj + pickle.loads(obj_pickle) + + def test_pickling_output(self): + x = np.linspace(0.0, 5.0) + y = 1.0 * x + 2.0 + output = ODR(Data(x, y), unilinear).run + + obj_pickle = pickle.dumps(output) + del output + pickle.loads(obj_pickle) diff --git a/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..72afd670ce8e460f0b73571370993e304856025d Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/bench_discrete_log.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/bench_discrete_log.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..25faf6b717c07eed7d087fadce18d9ad0df530bd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/bench_discrete_log.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/bench_symbench.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/bench_symbench.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..310f6d76e57c230b87423451f731544e536f9265 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/benchmarks/__pycache__/bench_symbench.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/__init__.py b/vllm/lib/python3.10/site-packages/sympy/sets/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..8b909c0b5ef03b1e1e76dfbf4288f61860575da7 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/__init__.py @@ -0,0 +1,36 @@ +from .sets import (Set, Interval, Union, FiniteSet, ProductSet, + Intersection, imageset, Complement, SymmetricDifference, + DisjointUnion) + +from .fancysets import ImageSet, Range, ComplexRegion +from .contains import Contains +from .conditionset import ConditionSet +from .ordinals import Ordinal, OmegaPower, ord0 +from .powerset import PowerSet +from ..core.singleton import S +from .handlers.comparison import _eval_is_eq # noqa:F401 +Complexes = S.Complexes +EmptySet = S.EmptySet +Integers = S.Integers +Naturals = S.Naturals +Naturals0 = S.Naturals0 +Rationals = S.Rationals +Reals = S.Reals +UniversalSet = S.UniversalSet + +__all__ = [ + 'Set', 'Interval', 'Union', 'EmptySet', 'FiniteSet', 'ProductSet', + 'Intersection', 'imageset', 'Complement', 'SymmetricDifference', 'DisjointUnion', + + 'ImageSet', 'Range', 'ComplexRegion', 'Reals', + + 'Contains', + + 'ConditionSet', + + 'Ordinal', 'OmegaPower', 'ord0', + + 'PowerSet', + + 'Reals', 'Naturals', 'Naturals0', 'UniversalSet', 'Integers', 'Rationals', +] diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/__init__.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/__init__.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b32e4cb22110a184753f0207b63932df79a98883 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/__init__.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/fancysets.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/fancysets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..164b51255fa9ea8c560c67c2029464d3bca309b9 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/fancysets.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/ordinals.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/ordinals.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..83a1a68f3ab8195ee56f5390b3f6269d8cc61660 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/ordinals.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/setexpr.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/setexpr.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99d2210f767dff0b1c4fe62b617bd381ba6e39cf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/setexpr.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/sets.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/sets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..c02ac0162fb5bf9f88cec8385dc8b61c990bbf18 Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/sets/__pycache__/sets.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/conditionset.py b/vllm/lib/python3.10/site-packages/sympy/sets/conditionset.py new file mode 100644 index 0000000000000000000000000000000000000000..e847e60ce97d7e9922ce907042ace941838b0ab1 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/conditionset.py @@ -0,0 +1,246 @@ +from sympy.core.singleton import S +from sympy.core.basic import Basic +from sympy.core.containers import Tuple +from sympy.core.function import Lambda, BadSignatureError +from sympy.core.logic import fuzzy_bool +from sympy.core.relational import Eq +from sympy.core.symbol import Dummy +from sympy.core.sympify import _sympify +from sympy.logic.boolalg import And, as_Boolean +from sympy.utilities.iterables import sift, flatten, has_dups +from sympy.utilities.exceptions import sympy_deprecation_warning +from .contains import Contains +from .sets import Set, Union, FiniteSet, SetKind + + +adummy = Dummy('conditionset') + + +class ConditionSet(Set): + r""" + Set of elements which satisfies a given condition. + + .. math:: \{x \mid \textrm{condition}(x) = \texttt{True}, x \in S\} + + Examples + ======== + + >>> from sympy import Symbol, S, ConditionSet, pi, Eq, sin, Interval + >>> from sympy.abc import x, y, z + + >>> sin_sols = ConditionSet(x, Eq(sin(x), 0), Interval(0, 2*pi)) + >>> 2*pi in sin_sols + True + >>> pi/2 in sin_sols + False + >>> 3*pi in sin_sols + False + >>> 5 in ConditionSet(x, x**2 > 4, S.Reals) + True + + If the value is not in the base set, the result is false: + + >>> 5 in ConditionSet(x, x**2 > 4, Interval(2, 4)) + False + + Notes + ===== + + Symbols with assumptions should be avoided or else the + condition may evaluate without consideration of the set: + + >>> n = Symbol('n', negative=True) + >>> cond = (n > 0); cond + False + >>> ConditionSet(n, cond, S.Integers) + EmptySet + + Only free symbols can be changed by using `subs`: + + >>> c = ConditionSet(x, x < 1, {x, z}) + >>> c.subs(x, y) + ConditionSet(x, x < 1, {y, z}) + + To check if ``pi`` is in ``c`` use: + + >>> pi in c + False + + If no base set is specified, the universal set is implied: + + >>> ConditionSet(x, x < 1).base_set + UniversalSet + + Only symbols or symbol-like expressions can be used: + + >>> ConditionSet(x + 1, x + 1 < 1, S.Integers) + Traceback (most recent call last): + ... + ValueError: non-symbol dummy not recognized in condition + + When the base set is a ConditionSet, the symbols will be + unified if possible with preference for the outermost symbols: + + >>> ConditionSet(x, x < y, ConditionSet(z, z + y < 2, S.Integers)) + ConditionSet(x, (x < y) & (x + y < 2), Integers) + + """ + def __new__(cls, sym, condition, base_set=S.UniversalSet): + sym = _sympify(sym) + flat = flatten([sym]) + if has_dups(flat): + raise BadSignatureError("Duplicate symbols detected") + base_set = _sympify(base_set) + if not isinstance(base_set, Set): + raise TypeError( + 'base set should be a Set object, not %s' % base_set) + condition = _sympify(condition) + + if isinstance(condition, FiniteSet): + condition_orig = condition + temp = (Eq(lhs, 0) for lhs in condition) + condition = And(*temp) + sympy_deprecation_warning( + f""" +Using a set for the condition in ConditionSet is deprecated. Use a boolean +instead. + +In this case, replace + + {condition_orig} + +with + + {condition} +""", + deprecated_since_version='1.5', + active_deprecations_target="deprecated-conditionset-set", + ) + + condition = as_Boolean(condition) + + if condition is S.true: + return base_set + + if condition is S.false: + return S.EmptySet + + if base_set is S.EmptySet: + return S.EmptySet + + # no simple answers, so now check syms + for i in flat: + if not getattr(i, '_diff_wrt', False): + raise ValueError('`%s` is not symbol-like' % i) + + if base_set.contains(sym) is S.false: + raise TypeError('sym `%s` is not in base_set `%s`' % (sym, base_set)) + + know = None + if isinstance(base_set, FiniteSet): + sifted = sift( + base_set, lambda _: fuzzy_bool(condition.subs(sym, _))) + if sifted[None]: + know = FiniteSet(*sifted[True]) + base_set = FiniteSet(*sifted[None]) + else: + return FiniteSet(*sifted[True]) + + if isinstance(base_set, cls): + s, c, b = base_set.args + def sig(s): + return cls(s, Eq(adummy, 0)).as_dummy().sym + sa, sb = map(sig, (sym, s)) + if sa != sb: + raise BadSignatureError('sym does not match sym of base set') + reps = dict(zip(flatten([sym]), flatten([s]))) + if s == sym: + condition = And(condition, c) + base_set = b + elif not c.free_symbols & sym.free_symbols: + reps = {v: k for k, v in reps.items()} + condition = And(condition, c.xreplace(reps)) + base_set = b + elif not condition.free_symbols & s.free_symbols: + sym = sym.xreplace(reps) + condition = And(condition.xreplace(reps), c) + base_set = b + + # flatten ConditionSet(Contains(ConditionSet())) expressions + if isinstance(condition, Contains) and (sym == condition.args[0]): + if isinstance(condition.args[1], Set): + return condition.args[1].intersect(base_set) + + rv = Basic.__new__(cls, sym, condition, base_set) + return rv if know is None else Union(know, rv) + + sym = property(lambda self: self.args[0]) + condition = property(lambda self: self.args[1]) + base_set = property(lambda self: self.args[2]) + + @property + def free_symbols(self): + cond_syms = self.condition.free_symbols - self.sym.free_symbols + return cond_syms | self.base_set.free_symbols + + @property + def bound_symbols(self): + return flatten([self.sym]) + + def _contains(self, other): + def ok_sig(a, b): + tuples = [isinstance(i, Tuple) for i in (a, b)] + c = tuples.count(True) + if c == 1: + return False + if c == 0: + return True + return len(a) == len(b) and all( + ok_sig(i, j) for i, j in zip(a, b)) + if not ok_sig(self.sym, other): + return S.false + + # try doing base_cond first and return + # False immediately if it is False + base_cond = Contains(other, self.base_set) + if base_cond is S.false: + return S.false + + # Substitute other into condition. This could raise e.g. for + # ConditionSet(x, 1/x >= 0, Reals).contains(0) + lamda = Lambda((self.sym,), self.condition) + try: + lambda_cond = lamda(other) + except TypeError: + return None + else: + return And(base_cond, lambda_cond) + + def as_relational(self, other): + f = Lambda(self.sym, self.condition) + if isinstance(self.sym, Tuple): + f = f(*other) + else: + f = f(other) + return And(f, self.base_set.contains(other)) + + def _eval_subs(self, old, new): + sym, cond, base = self.args + dsym = sym.subs(old, adummy) + insym = dsym.has(adummy) + # prioritize changing a symbol in the base + newbase = base.subs(old, new) + if newbase != base: + if not insym: + cond = cond.subs(old, new) + return self.func(sym, cond, newbase) + if insym: + pass # no change of bound symbols via subs + elif getattr(new, '_diff_wrt', False): + cond = cond.subs(old, new) + else: + pass # let error about the symbol raise from __new__ + return self.func(sym, cond, base) + + def _kind(self): + return SetKind(self.sym.kind) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/contains.py b/vllm/lib/python3.10/site-packages/sympy/sets/contains.py new file mode 100644 index 0000000000000000000000000000000000000000..403d4875279d718724a898efa5cba41bc7bed6ea --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/contains.py @@ -0,0 +1,63 @@ +from sympy.core import S +from sympy.core.sympify import sympify +from sympy.core.relational import Eq, Ne +from sympy.core.parameters import global_parameters +from sympy.logic.boolalg import Boolean +from sympy.utilities.misc import func_name +from .sets import Set + + +class Contains(Boolean): + """ + Asserts that x is an element of the set S. + + Examples + ======== + + >>> from sympy import Symbol, Integer, S, Contains + >>> Contains(Integer(2), S.Integers) + True + >>> Contains(Integer(-2), S.Naturals) + False + >>> i = Symbol('i', integer=True) + >>> Contains(i, S.Naturals) + Contains(i, Naturals) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Element_%28mathematics%29 + """ + def __new__(cls, x, s, evaluate=None): + x = sympify(x) + s = sympify(s) + + if evaluate is None: + evaluate = global_parameters.evaluate + + if not isinstance(s, Set): + raise TypeError('expecting Set, not %s' % func_name(s)) + + if evaluate: + # _contains can return symbolic booleans that would be returned by + # s.contains(x) but here for Contains(x, s) we only evaluate to + # true, false or return the unevaluated Contains. + result = s._contains(x) + + if isinstance(result, Boolean): + if result in (S.true, S.false): + return result + elif result is not None: + raise TypeError("_contains() should return Boolean or None") + + return super().__new__(cls, x, s) + + @property + def binary_symbols(self): + return set().union(*[i.binary_symbols + for i in self.args[1].args + if i.is_Boolean or i.is_Symbol or + isinstance(i, (Eq, Ne))]) + + def as_set(self): + return self.args[1] diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/fancysets.py b/vllm/lib/python3.10/site-packages/sympy/sets/fancysets.py new file mode 100644 index 0000000000000000000000000000000000000000..aa06ff6f4d978cc2fbbb5aea326b12402e805cdf --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/fancysets.py @@ -0,0 +1,1523 @@ +from functools import reduce +from itertools import product + +from sympy.core.basic import Basic +from sympy.core.containers import Tuple +from sympy.core.expr import Expr +from sympy.core.function import Lambda +from sympy.core.logic import fuzzy_not, fuzzy_or, fuzzy_and +from sympy.core.mod import Mod +from sympy.core.intfunc import igcd +from sympy.core.numbers import oo, Rational +from sympy.core.relational import Eq, is_eq +from sympy.core.kind import NumberKind +from sympy.core.singleton import Singleton, S +from sympy.core.symbol import Dummy, symbols, Symbol +from sympy.core.sympify import _sympify, sympify, _sympy_converter +from sympy.functions.elementary.integers import ceiling, floor +from sympy.functions.elementary.trigonometric import sin, cos +from sympy.logic.boolalg import And, Or +from .sets import tfn, Set, Interval, Union, FiniteSet, ProductSet, SetKind +from sympy.utilities.misc import filldedent + + +class Rationals(Set, metaclass=Singleton): + """ + Represents the rational numbers. This set is also available as + the singleton ``S.Rationals``. + + Examples + ======== + + >>> from sympy import S + >>> S.Half in S.Rationals + True + >>> iterable = iter(S.Rationals) + >>> [next(iterable) for i in range(12)] + [0, 1, -1, 1/2, 2, -1/2, -2, 1/3, 3, -1/3, -3, 2/3] + """ + + is_iterable = True + _inf = S.NegativeInfinity + _sup = S.Infinity + is_empty = False + is_finite_set = False + + def _contains(self, other): + if not isinstance(other, Expr): + return S.false + return tfn[other.is_rational] + + def __iter__(self): + yield S.Zero + yield S.One + yield S.NegativeOne + d = 2 + while True: + for n in range(d): + if igcd(n, d) == 1: + yield Rational(n, d) + yield Rational(d, n) + yield Rational(-n, d) + yield Rational(-d, n) + d += 1 + + @property + def _boundary(self): + return S.Reals + + def _kind(self): + return SetKind(NumberKind) + + +class Naturals(Set, metaclass=Singleton): + """ + Represents the natural numbers (or counting numbers) which are all + positive integers starting from 1. This set is also available as + the singleton ``S.Naturals``. + + Examples + ======== + + >>> from sympy import S, Interval, pprint + >>> 5 in S.Naturals + True + >>> iterable = iter(S.Naturals) + >>> next(iterable) + 1 + >>> next(iterable) + 2 + >>> next(iterable) + 3 + >>> pprint(S.Naturals.intersect(Interval(0, 10))) + {1, 2, ..., 10} + + See Also + ======== + + Naturals0 : non-negative integers (i.e. includes 0, too) + Integers : also includes negative integers + """ + + is_iterable = True + _inf = S.One + _sup = S.Infinity + is_empty = False + is_finite_set = False + + def _contains(self, other): + if not isinstance(other, Expr): + return S.false + elif other.is_positive and other.is_integer: + return S.true + elif other.is_integer is False or other.is_positive is False: + return S.false + + def _eval_is_subset(self, other): + return Range(1, oo).is_subset(other) + + def _eval_is_superset(self, other): + return Range(1, oo).is_superset(other) + + def __iter__(self): + i = self._inf + while True: + yield i + i = i + 1 + + @property + def _boundary(self): + return self + + def as_relational(self, x): + return And(Eq(floor(x), x), x >= self.inf, x < oo) + + def _kind(self): + return SetKind(NumberKind) + + +class Naturals0(Naturals): + """Represents the whole numbers which are all the non-negative integers, + inclusive of zero. + + See Also + ======== + + Naturals : positive integers; does not include 0 + Integers : also includes the negative integers + """ + _inf = S.Zero + + def _contains(self, other): + if not isinstance(other, Expr): + return S.false + elif other.is_integer and other.is_nonnegative: + return S.true + elif other.is_integer is False or other.is_nonnegative is False: + return S.false + + def _eval_is_subset(self, other): + return Range(oo).is_subset(other) + + def _eval_is_superset(self, other): + return Range(oo).is_superset(other) + + +class Integers(Set, metaclass=Singleton): + """ + Represents all integers: positive, negative and zero. This set is also + available as the singleton ``S.Integers``. + + Examples + ======== + + >>> from sympy import S, Interval, pprint + >>> 5 in S.Naturals + True + >>> iterable = iter(S.Integers) + >>> next(iterable) + 0 + >>> next(iterable) + 1 + >>> next(iterable) + -1 + >>> next(iterable) + 2 + + >>> pprint(S.Integers.intersect(Interval(-4, 4))) + {-4, -3, ..., 4} + + See Also + ======== + + Naturals0 : non-negative integers + Integers : positive and negative integers and zero + """ + + is_iterable = True + is_empty = False + is_finite_set = False + + def _contains(self, other): + if not isinstance(other, Expr): + return S.false + return tfn[other.is_integer] + + def __iter__(self): + yield S.Zero + i = S.One + while True: + yield i + yield -i + i = i + 1 + + @property + def _inf(self): + return S.NegativeInfinity + + @property + def _sup(self): + return S.Infinity + + @property + def _boundary(self): + return self + + def _kind(self): + return SetKind(NumberKind) + + def as_relational(self, x): + return And(Eq(floor(x), x), -oo < x, x < oo) + + def _eval_is_subset(self, other): + return Range(-oo, oo).is_subset(other) + + def _eval_is_superset(self, other): + return Range(-oo, oo).is_superset(other) + + +class Reals(Interval, metaclass=Singleton): + """ + Represents all real numbers + from negative infinity to positive infinity, + including all integer, rational and irrational numbers. + This set is also available as the singleton ``S.Reals``. + + + Examples + ======== + + >>> from sympy import S, Rational, pi, I + >>> 5 in S.Reals + True + >>> Rational(-1, 2) in S.Reals + True + >>> pi in S.Reals + True + >>> 3*I in S.Reals + False + >>> S.Reals.contains(pi) + True + + + See Also + ======== + + ComplexRegion + """ + @property + def start(self): + return S.NegativeInfinity + + @property + def end(self): + return S.Infinity + + @property + def left_open(self): + return True + + @property + def right_open(self): + return True + + def __eq__(self, other): + return other == Interval(S.NegativeInfinity, S.Infinity) + + def __hash__(self): + return hash(Interval(S.NegativeInfinity, S.Infinity)) + + +class ImageSet(Set): + """ + Image of a set under a mathematical function. The transformation + must be given as a Lambda function which has as many arguments + as the elements of the set upon which it operates, e.g. 1 argument + when acting on the set of integers or 2 arguments when acting on + a complex region. + + This function is not normally called directly, but is called + from ``imageset``. + + + Examples + ======== + + >>> from sympy import Symbol, S, pi, Dummy, Lambda + >>> from sympy import FiniteSet, ImageSet, Interval + + >>> x = Symbol('x') + >>> N = S.Naturals + >>> squares = ImageSet(Lambda(x, x**2), N) # {x**2 for x in N} + >>> 4 in squares + True + >>> 5 in squares + False + + >>> FiniteSet(0, 1, 2, 3, 4, 5, 6, 7, 9, 10).intersect(squares) + {1, 4, 9} + + >>> square_iterable = iter(squares) + >>> for i in range(4): + ... next(square_iterable) + 1 + 4 + 9 + 16 + + If you want to get value for `x` = 2, 1/2 etc. (Please check whether the + `x` value is in ``base_set`` or not before passing it as args) + + >>> squares.lamda(2) + 4 + >>> squares.lamda(S(1)/2) + 1/4 + + >>> n = Dummy('n') + >>> solutions = ImageSet(Lambda(n, n*pi), S.Integers) # solutions of sin(x) = 0 + >>> dom = Interval(-1, 1) + >>> dom.intersect(solutions) + {0} + + See Also + ======== + + sympy.sets.sets.imageset + """ + def __new__(cls, flambda, *sets): + if not isinstance(flambda, Lambda): + raise ValueError('First argument must be a Lambda') + + signature = flambda.signature + + if len(signature) != len(sets): + raise ValueError('Incompatible signature') + + sets = [_sympify(s) for s in sets] + + if not all(isinstance(s, Set) for s in sets): + raise TypeError("Set arguments to ImageSet should of type Set") + + if not all(cls._check_sig(sg, st) for sg, st in zip(signature, sets)): + raise ValueError("Signature %s does not match sets %s" % (signature, sets)) + + if flambda is S.IdentityFunction and len(sets) == 1: + return sets[0] + + if not set(flambda.variables) & flambda.expr.free_symbols: + is_empty = fuzzy_or(s.is_empty for s in sets) + if is_empty == True: + return S.EmptySet + elif is_empty == False: + return FiniteSet(flambda.expr) + + return Basic.__new__(cls, flambda, *sets) + + lamda = property(lambda self: self.args[0]) + base_sets = property(lambda self: self.args[1:]) + + @property + def base_set(self): + # XXX: Maybe deprecate this? It is poorly defined in handling + # the multivariate case... + sets = self.base_sets + if len(sets) == 1: + return sets[0] + else: + return ProductSet(*sets).flatten() + + @property + def base_pset(self): + return ProductSet(*self.base_sets) + + @classmethod + def _check_sig(cls, sig_i, set_i): + if sig_i.is_symbol: + return True + elif isinstance(set_i, ProductSet): + sets = set_i.sets + if len(sig_i) != len(sets): + return False + # Recurse through the signature for nested tuples: + return all(cls._check_sig(ts, ps) for ts, ps in zip(sig_i, sets)) + else: + # XXX: Need a better way of checking whether a set is a set of + # Tuples or not. For example a FiniteSet can contain Tuples + # but so can an ImageSet or a ConditionSet. Others like + # Integers, Reals etc can not contain Tuples. We could just + # list the possibilities here... Current code for e.g. + # _contains probably only works for ProductSet. + return True # Give the benefit of the doubt + + def __iter__(self): + already_seen = set() + for i in self.base_pset: + val = self.lamda(*i) + if val in already_seen: + continue + else: + already_seen.add(val) + yield val + + def _is_multivariate(self): + return len(self.lamda.variables) > 1 + + def _contains(self, other): + from sympy.solvers.solveset import _solveset_multi + + def get_symsetmap(signature, base_sets): + '''Attempt to get a map of symbols to base_sets''' + queue = list(zip(signature, base_sets)) + symsetmap = {} + for sig, base_set in queue: + if sig.is_symbol: + symsetmap[sig] = base_set + elif base_set.is_ProductSet: + sets = base_set.sets + if len(sig) != len(sets): + raise ValueError("Incompatible signature") + # Recurse + queue.extend(zip(sig, sets)) + else: + # If we get here then we have something like sig = (x, y) and + # base_set = {(1, 2), (3, 4)}. For now we give up. + return None + + return symsetmap + + def get_equations(expr, candidate): + '''Find the equations relating symbols in expr and candidate.''' + queue = [(expr, candidate)] + for e, c in queue: + if not isinstance(e, Tuple): + yield Eq(e, c) + elif not isinstance(c, Tuple) or len(e) != len(c): + yield False + return + else: + queue.extend(zip(e, c)) + + # Get the basic objects together: + other = _sympify(other) + expr = self.lamda.expr + sig = self.lamda.signature + variables = self.lamda.variables + base_sets = self.base_sets + + # Use dummy symbols for ImageSet parameters so they don't match + # anything in other + rep = {v: Dummy(v.name) for v in variables} + variables = [v.subs(rep) for v in variables] + sig = sig.subs(rep) + expr = expr.subs(rep) + + # Map the parts of other to those in the Lambda expr + equations = [] + for eq in get_equations(expr, other): + # Unsatisfiable equation? + if eq is False: + return S.false + equations.append(eq) + + # Map the symbols in the signature to the corresponding domains + symsetmap = get_symsetmap(sig, base_sets) + if symsetmap is None: + # Can't factor the base sets to a ProductSet + return None + + # Which of the variables in the Lambda signature need to be solved for? + symss = (eq.free_symbols for eq in equations) + variables = set(variables) & reduce(set.union, symss, set()) + + # Use internal multivariate solveset + variables = tuple(variables) + base_sets = [symsetmap[v] for v in variables] + solnset = _solveset_multi(equations, variables, base_sets) + if solnset is None: + return None + return tfn[fuzzy_not(solnset.is_empty)] + + @property + def is_iterable(self): + return all(s.is_iterable for s in self.base_sets) + + def doit(self, **hints): + from sympy.sets.setexpr import SetExpr + f = self.lamda + sig = f.signature + if len(sig) == 1 and sig[0].is_symbol and isinstance(f.expr, Expr): + base_set = self.base_sets[0] + return SetExpr(base_set)._eval_func(f).set + if all(s.is_FiniteSet for s in self.base_sets): + return FiniteSet(*(f(*a) for a in product(*self.base_sets))) + return self + + def _kind(self): + return SetKind(self.lamda.expr.kind) + + +class Range(Set): + """ + Represents a range of integers. Can be called as ``Range(stop)``, + ``Range(start, stop)``, or ``Range(start, stop, step)``; when ``step`` is + not given it defaults to 1. + + ``Range(stop)`` is the same as ``Range(0, stop, 1)`` and the stop value + (just as for Python ranges) is not included in the Range values. + + >>> from sympy import Range + >>> list(Range(3)) + [0, 1, 2] + + The step can also be negative: + + >>> list(Range(10, 0, -2)) + [10, 8, 6, 4, 2] + + The stop value is made canonical so equivalent ranges always + have the same args: + + >>> Range(0, 10, 3) + Range(0, 12, 3) + + Infinite ranges are allowed. ``oo`` and ``-oo`` are never included in the + set (``Range`` is always a subset of ``Integers``). If the starting point + is infinite, then the final value is ``stop - step``. To iterate such a + range, it needs to be reversed: + + >>> from sympy import oo + >>> r = Range(-oo, 1) + >>> r[-1] + 0 + >>> next(iter(r)) + Traceback (most recent call last): + ... + TypeError: Cannot iterate over Range with infinite start + >>> next(iter(r.reversed)) + 0 + + Although ``Range`` is a :class:`Set` (and supports the normal set + operations) it maintains the order of the elements and can + be used in contexts where ``range`` would be used. + + >>> from sympy import Interval + >>> Range(0, 10, 2).intersect(Interval(3, 7)) + Range(4, 8, 2) + >>> list(_) + [4, 6] + + Although slicing of a Range will always return a Range -- possibly + empty -- an empty set will be returned from any intersection that + is empty: + + >>> Range(3)[:0] + Range(0, 0, 1) + >>> Range(3).intersect(Interval(4, oo)) + EmptySet + >>> Range(3).intersect(Range(4, oo)) + EmptySet + + Range will accept symbolic arguments but has very limited support + for doing anything other than displaying the Range: + + >>> from sympy import Symbol, pprint + >>> from sympy.abc import i, j, k + >>> Range(i, j, k).start + i + >>> Range(i, j, k).inf + Traceback (most recent call last): + ... + ValueError: invalid method for symbolic range + + Better success will be had when using integer symbols: + + >>> n = Symbol('n', integer=True) + >>> r = Range(n, n + 20, 3) + >>> r.inf + n + >>> pprint(r) + {n, n + 3, ..., n + 18} + """ + + def __new__(cls, *args): + if len(args) == 1: + if isinstance(args[0], range): + raise TypeError( + 'use sympify(%s) to convert range to Range' % args[0]) + + # expand range + slc = slice(*args) + + if slc.step == 0: + raise ValueError("step cannot be 0") + + start, stop, step = slc.start or 0, slc.stop, slc.step or 1 + try: + ok = [] + for w in (start, stop, step): + w = sympify(w) + if w in [S.NegativeInfinity, S.Infinity] or ( + w.has(Symbol) and w.is_integer != False): + ok.append(w) + elif not w.is_Integer: + if w.is_infinite: + raise ValueError('infinite symbols not allowed') + raise ValueError + else: + ok.append(w) + except ValueError: + raise ValueError(filldedent(''' + Finite arguments to Range must be integers; `imageset` can define + other cases, e.g. use `imageset(i, i/10, Range(3))` to give + [0, 1/10, 1/5].''')) + start, stop, step = ok + + null = False + if any(i.has(Symbol) for i in (start, stop, step)): + dif = stop - start + n = dif/step + if n.is_Rational: + if dif == 0: + null = True + else: # (x, x + 5, 2) or (x, 3*x, x) + n = floor(n) + end = start + n*step + if dif.is_Rational: # (x, x + 5, 2) + if (end - stop).is_negative: + end += step + else: # (x, 3*x, x) + if (end/stop - 1).is_negative: + end += step + elif n.is_extended_negative: + null = True + else: + end = stop # other methods like sup and reversed must fail + elif start.is_infinite: + span = step*(stop - start) + if span is S.NaN or span <= 0: + null = True + elif step.is_Integer and stop.is_infinite and abs(step) != 1: + raise ValueError(filldedent(''' + Step size must be %s in this case.''' % (1 if step > 0 else -1))) + else: + end = stop + else: + oostep = step.is_infinite + if oostep: + step = S.One if step > 0 else S.NegativeOne + n = ceiling((stop - start)/step) + if n <= 0: + null = True + elif oostep: + step = S.One # make it canonical + end = start + step + else: + end = start + n*step + if null: + start = end = S.Zero + step = S.One + return Basic.__new__(cls, start, end, step) + + start = property(lambda self: self.args[0]) + stop = property(lambda self: self.args[1]) + step = property(lambda self: self.args[2]) + + @property + def reversed(self): + """Return an equivalent Range in the opposite order. + + Examples + ======== + + >>> from sympy import Range + >>> Range(10).reversed + Range(9, -1, -1) + """ + if self.has(Symbol): + n = (self.stop - self.start)/self.step + if not n.is_extended_positive or not all( + i.is_integer or i.is_infinite for i in self.args): + raise ValueError('invalid method for symbolic range') + if self.start == self.stop: + return self + return self.func( + self.stop - self.step, self.start - self.step, -self.step) + + def _kind(self): + return SetKind(NumberKind) + + def _contains(self, other): + if self.start == self.stop: + return S.false + if other.is_infinite: + return S.false + if not other.is_integer: + return tfn[other.is_integer] + if self.has(Symbol): + n = (self.stop - self.start)/self.step + if not n.is_extended_positive or not all( + i.is_integer or i.is_infinite for i in self.args): + return + else: + n = self.size + if self.start.is_finite: + ref = self.start + elif self.stop.is_finite: + ref = self.stop + else: # both infinite; step is +/- 1 (enforced by __new__) + return S.true + if n == 1: + return Eq(other, self[0]) + res = (ref - other) % self.step + if res == S.Zero: + if self.has(Symbol): + d = Dummy('i') + return self.as_relational(d).subs(d, other) + return And(other >= self.inf, other <= self.sup) + elif res.is_Integer: # off sequence + return S.false + else: # symbolic/unsimplified residue modulo step + return None + + def __iter__(self): + n = self.size # validate + if not (n.has(S.Infinity) or n.has(S.NegativeInfinity) or n.is_Integer): + raise TypeError("Cannot iterate over symbolic Range") + if self.start in [S.NegativeInfinity, S.Infinity]: + raise TypeError("Cannot iterate over Range with infinite start") + elif self.start != self.stop: + i = self.start + if n.is_infinite: + while True: + yield i + i += self.step + else: + for _ in range(n): + yield i + i += self.step + + @property + def is_iterable(self): + # Check that size can be determined, used by __iter__ + dif = self.stop - self.start + n = dif/self.step + if not (n.has(S.Infinity) or n.has(S.NegativeInfinity) or n.is_Integer): + return False + if self.start in [S.NegativeInfinity, S.Infinity]: + return False + if not (n.is_extended_nonnegative and all(i.is_integer for i in self.args)): + return False + return True + + def __len__(self): + rv = self.size + if rv is S.Infinity: + raise ValueError('Use .size to get the length of an infinite Range') + return int(rv) + + @property + def size(self): + if self.start == self.stop: + return S.Zero + dif = self.stop - self.start + n = dif/self.step + if n.is_infinite: + return S.Infinity + if n.is_extended_nonnegative and all(i.is_integer for i in self.args): + return abs(floor(n)) + raise ValueError('Invalid method for symbolic Range') + + @property + def is_finite_set(self): + if self.start.is_integer and self.stop.is_integer: + return True + return self.size.is_finite + + @property + def is_empty(self): + try: + return self.size.is_zero + except ValueError: + return None + + def __bool__(self): + # this only distinguishes between definite null range + # and non-null/unknown null; getting True doesn't mean + # that it actually is not null + b = is_eq(self.start, self.stop) + if b is None: + raise ValueError('cannot tell if Range is null or not') + return not bool(b) + + def __getitem__(self, i): + ooslice = "cannot slice from the end with an infinite value" + zerostep = "slice step cannot be zero" + infinite = "slicing not possible on range with infinite start" + # if we had to take every other element in the following + # oo, ..., 6, 4, 2, 0 + # we might get oo, ..., 4, 0 or oo, ..., 6, 2 + ambiguous = "cannot unambiguously re-stride from the end " + \ + "with an infinite value" + if isinstance(i, slice): + if self.size.is_finite: # validates, too + if self.start == self.stop: + return Range(0) + start, stop, step = i.indices(self.size) + n = ceiling((stop - start)/step) + if n <= 0: + return Range(0) + canonical_stop = start + n*step + end = canonical_stop - step + ss = step*self.step + return Range(self[start], self[end] + ss, ss) + else: # infinite Range + start = i.start + stop = i.stop + if i.step == 0: + raise ValueError(zerostep) + step = i.step or 1 + ss = step*self.step + #--------------------- + # handle infinite Range + # i.e. Range(-oo, oo) or Range(oo, -oo, -1) + # -------------------- + if self.start.is_infinite and self.stop.is_infinite: + raise ValueError(infinite) + #--------------------- + # handle infinite on right + # e.g. Range(0, oo) or Range(0, -oo, -1) + # -------------------- + if self.stop.is_infinite: + # start and stop are not interdependent -- + # they only depend on step --so we use the + # equivalent reversed values + return self.reversed[ + stop if stop is None else -stop + 1: + start if start is None else -start: + step].reversed + #--------------------- + # handle infinite on the left + # e.g. Range(oo, 0, -1) or Range(-oo, 0) + # -------------------- + # consider combinations of + # start/stop {== None, < 0, == 0, > 0} and + # step {< 0, > 0} + if start is None: + if stop is None: + if step < 0: + return Range(self[-1], self.start, ss) + elif step > 1: + raise ValueError(ambiguous) + else: # == 1 + return self + elif stop < 0: + if step < 0: + return Range(self[-1], self[stop], ss) + else: # > 0 + return Range(self.start, self[stop], ss) + elif stop == 0: + if step > 0: + return Range(0) + else: # < 0 + raise ValueError(ooslice) + elif stop == 1: + if step > 0: + raise ValueError(ooslice) # infinite singleton + else: # < 0 + raise ValueError(ooslice) + else: # > 1 + raise ValueError(ooslice) + elif start < 0: + if stop is None: + if step < 0: + return Range(self[start], self.start, ss) + else: # > 0 + return Range(self[start], self.stop, ss) + elif stop < 0: + return Range(self[start], self[stop], ss) + elif stop == 0: + if step < 0: + raise ValueError(ooslice) + else: # > 0 + return Range(0) + elif stop > 0: + raise ValueError(ooslice) + elif start == 0: + if stop is None: + if step < 0: + raise ValueError(ooslice) # infinite singleton + elif step > 1: + raise ValueError(ambiguous) + else: # == 1 + return self + elif stop < 0: + if step > 1: + raise ValueError(ambiguous) + elif step == 1: + return Range(self.start, self[stop], ss) + else: # < 0 + return Range(0) + else: # >= 0 + raise ValueError(ooslice) + elif start > 0: + raise ValueError(ooslice) + else: + if self.start == self.stop: + raise IndexError('Range index out of range') + if not (all(i.is_integer or i.is_infinite + for i in self.args) and ((self.stop - self.start)/ + self.step).is_extended_positive): + raise ValueError('Invalid method for symbolic Range') + if i == 0: + if self.start.is_infinite: + raise ValueError(ooslice) + return self.start + if i == -1: + if self.stop.is_infinite: + raise ValueError(ooslice) + return self.stop - self.step + n = self.size # must be known for any other index + rv = (self.stop if i < 0 else self.start) + i*self.step + if rv.is_infinite: + raise ValueError(ooslice) + val = (rv - self.start)/self.step + rel = fuzzy_or([val.is_infinite, + fuzzy_and([val.is_nonnegative, (n-val).is_nonnegative])]) + if rel: + return rv + if rel is None: + raise ValueError('Invalid method for symbolic Range') + raise IndexError("Range index out of range") + + @property + def _inf(self): + if not self: + return S.EmptySet.inf + if self.has(Symbol): + if all(i.is_integer or i.is_infinite for i in self.args): + dif = self.stop - self.start + if self.step.is_positive and dif.is_positive: + return self.start + elif self.step.is_negative and dif.is_negative: + return self.stop - self.step + raise ValueError('invalid method for symbolic range') + if self.step > 0: + return self.start + else: + return self.stop - self.step + + @property + def _sup(self): + if not self: + return S.EmptySet.sup + if self.has(Symbol): + if all(i.is_integer or i.is_infinite for i in self.args): + dif = self.stop - self.start + if self.step.is_positive and dif.is_positive: + return self.stop - self.step + elif self.step.is_negative and dif.is_negative: + return self.start + raise ValueError('invalid method for symbolic range') + if self.step > 0: + return self.stop - self.step + else: + return self.start + + @property + def _boundary(self): + return self + + def as_relational(self, x): + """Rewrite a Range in terms of equalities and logic operators. """ + if self.start.is_infinite: + assert not self.stop.is_infinite # by instantiation + a = self.reversed.start + else: + a = self.start + step = self.step + in_seq = Eq(Mod(x - a, step), 0) + ints = And(Eq(Mod(a, 1), 0), Eq(Mod(step, 1), 0)) + n = (self.stop - self.start)/self.step + if n == 0: + return S.EmptySet.as_relational(x) + if n == 1: + return And(Eq(x, a), ints) + try: + a, b = self.inf, self.sup + except ValueError: + a = None + if a is not None: + range_cond = And( + x > a if a.is_infinite else x >= a, + x < b if b.is_infinite else x <= b) + else: + a, b = self.start, self.stop - self.step + range_cond = Or( + And(self.step >= 1, x > a if a.is_infinite else x >= a, + x < b if b.is_infinite else x <= b), + And(self.step <= -1, x < a if a.is_infinite else x <= a, + x > b if b.is_infinite else x >= b)) + return And(in_seq, ints, range_cond) + + +_sympy_converter[range] = lambda r: Range(r.start, r.stop, r.step) + +def normalize_theta_set(theta): + r""" + Normalize a Real Set `theta` in the interval `[0, 2\pi)`. It returns + a normalized value of theta in the Set. For Interval, a maximum of + one cycle $[0, 2\pi]$, is returned i.e. for theta equal to $[0, 10\pi]$, + returned normalized value would be $[0, 2\pi)$. As of now intervals + with end points as non-multiples of ``pi`` is not supported. + + Raises + ====== + + NotImplementedError + The algorithms for Normalizing theta Set are not yet + implemented. + ValueError + The input is not valid, i.e. the input is not a real set. + RuntimeError + It is a bug, please report to the github issue tracker. + + Examples + ======== + + >>> from sympy.sets.fancysets import normalize_theta_set + >>> from sympy import Interval, FiniteSet, pi + >>> normalize_theta_set(Interval(9*pi/2, 5*pi)) + Interval(pi/2, pi) + >>> normalize_theta_set(Interval(-3*pi/2, pi/2)) + Interval.Ropen(0, 2*pi) + >>> normalize_theta_set(Interval(-pi/2, pi/2)) + Union(Interval(0, pi/2), Interval.Ropen(3*pi/2, 2*pi)) + >>> normalize_theta_set(Interval(-4*pi, 3*pi)) + Interval.Ropen(0, 2*pi) + >>> normalize_theta_set(Interval(-3*pi/2, -pi/2)) + Interval(pi/2, 3*pi/2) + >>> normalize_theta_set(FiniteSet(0, pi, 3*pi)) + {0, pi} + + """ + from sympy.functions.elementary.trigonometric import _pi_coeff + + if theta.is_Interval: + interval_len = theta.measure + # one complete circle + if interval_len >= 2*S.Pi: + if interval_len == 2*S.Pi and theta.left_open and theta.right_open: + k = _pi_coeff(theta.start) + return Union(Interval(0, k*S.Pi, False, True), + Interval(k*S.Pi, 2*S.Pi, True, True)) + return Interval(0, 2*S.Pi, False, True) + + k_start, k_end = _pi_coeff(theta.start), _pi_coeff(theta.end) + + if k_start is None or k_end is None: + raise NotImplementedError("Normalizing theta without pi as coefficient is " + "not yet implemented") + new_start = k_start*S.Pi + new_end = k_end*S.Pi + + if new_start > new_end: + return Union(Interval(S.Zero, new_end, False, theta.right_open), + Interval(new_start, 2*S.Pi, theta.left_open, True)) + else: + return Interval(new_start, new_end, theta.left_open, theta.right_open) + + elif theta.is_FiniteSet: + new_theta = [] + for element in theta: + k = _pi_coeff(element) + if k is None: + raise NotImplementedError('Normalizing theta without pi as ' + 'coefficient, is not Implemented.') + else: + new_theta.append(k*S.Pi) + return FiniteSet(*new_theta) + + elif theta.is_Union: + return Union(*[normalize_theta_set(interval) for interval in theta.args]) + + elif theta.is_subset(S.Reals): + raise NotImplementedError("Normalizing theta when, it is of type %s is not " + "implemented" % type(theta)) + else: + raise ValueError(" %s is not a real set" % (theta)) + + +class ComplexRegion(Set): + r""" + Represents the Set of all Complex Numbers. It can represent a + region of Complex Plane in both the standard forms Polar and + Rectangular coordinates. + + * Polar Form + Input is in the form of the ProductSet or Union of ProductSets + of the intervals of ``r`` and ``theta``, and use the flag ``polar=True``. + + .. math:: Z = \{z \in \mathbb{C} \mid z = r\times (\cos(\theta) + I\sin(\theta)), r \in [\texttt{r}], \theta \in [\texttt{theta}]\} + + * Rectangular Form + Input is in the form of the ProductSet or Union of ProductSets + of interval of x and y, the real and imaginary parts of the Complex numbers in a plane. + Default input type is in rectangular form. + + .. math:: Z = \{z \in \mathbb{C} \mid z = x + Iy, x \in [\operatorname{re}(z)], y \in [\operatorname{im}(z)]\} + + Examples + ======== + + >>> from sympy import ComplexRegion, Interval, S, I, Union + >>> a = Interval(2, 3) + >>> b = Interval(4, 6) + >>> c1 = ComplexRegion(a*b) # Rectangular Form + >>> c1 + CartesianComplexRegion(ProductSet(Interval(2, 3), Interval(4, 6))) + + * c1 represents the rectangular region in complex plane + surrounded by the coordinates (2, 4), (3, 4), (3, 6) and + (2, 6), of the four vertices. + + >>> c = Interval(1, 8) + >>> c2 = ComplexRegion(Union(a*b, b*c)) + >>> c2 + CartesianComplexRegion(Union(ProductSet(Interval(2, 3), Interval(4, 6)), ProductSet(Interval(4, 6), Interval(1, 8)))) + + * c2 represents the Union of two rectangular regions in complex + plane. One of them surrounded by the coordinates of c1 and + other surrounded by the coordinates (4, 1), (6, 1), (6, 8) and + (4, 8). + + >>> 2.5 + 4.5*I in c1 + True + >>> 2.5 + 6.5*I in c1 + False + + >>> r = Interval(0, 1) + >>> theta = Interval(0, 2*S.Pi) + >>> c2 = ComplexRegion(r*theta, polar=True) # Polar Form + >>> c2 # unit Disk + PolarComplexRegion(ProductSet(Interval(0, 1), Interval.Ropen(0, 2*pi))) + + * c2 represents the region in complex plane inside the + Unit Disk centered at the origin. + + >>> 0.5 + 0.5*I in c2 + True + >>> 1 + 2*I in c2 + False + + >>> unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) + >>> upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True) + >>> intersection = unit_disk.intersect(upper_half_unit_disk) + >>> intersection + PolarComplexRegion(ProductSet(Interval(0, 1), Interval(0, pi))) + >>> intersection == upper_half_unit_disk + True + + See Also + ======== + + CartesianComplexRegion + PolarComplexRegion + Complexes + + """ + is_ComplexRegion = True + + def __new__(cls, sets, polar=False): + if polar is False: + return CartesianComplexRegion(sets) + elif polar is True: + return PolarComplexRegion(sets) + else: + raise ValueError("polar should be either True or False") + + @property + def sets(self): + """ + Return raw input sets to the self. + + Examples + ======== + + >>> from sympy import Interval, ComplexRegion, Union + >>> a = Interval(2, 3) + >>> b = Interval(4, 5) + >>> c = Interval(1, 7) + >>> C1 = ComplexRegion(a*b) + >>> C1.sets + ProductSet(Interval(2, 3), Interval(4, 5)) + >>> C2 = ComplexRegion(Union(a*b, b*c)) + >>> C2.sets + Union(ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7))) + + """ + return self.args[0] + + @property + def psets(self): + """ + Return a tuple of sets (ProductSets) input of the self. + + Examples + ======== + + >>> from sympy import Interval, ComplexRegion, Union + >>> a = Interval(2, 3) + >>> b = Interval(4, 5) + >>> c = Interval(1, 7) + >>> C1 = ComplexRegion(a*b) + >>> C1.psets + (ProductSet(Interval(2, 3), Interval(4, 5)),) + >>> C2 = ComplexRegion(Union(a*b, b*c)) + >>> C2.psets + (ProductSet(Interval(2, 3), Interval(4, 5)), ProductSet(Interval(4, 5), Interval(1, 7))) + + """ + if self.sets.is_ProductSet: + psets = () + psets = psets + (self.sets, ) + else: + psets = self.sets.args + return psets + + @property + def a_interval(self): + """ + Return the union of intervals of `x` when, self is in + rectangular form, or the union of intervals of `r` when + self is in polar form. + + Examples + ======== + + >>> from sympy import Interval, ComplexRegion, Union + >>> a = Interval(2, 3) + >>> b = Interval(4, 5) + >>> c = Interval(1, 7) + >>> C1 = ComplexRegion(a*b) + >>> C1.a_interval + Interval(2, 3) + >>> C2 = ComplexRegion(Union(a*b, b*c)) + >>> C2.a_interval + Union(Interval(2, 3), Interval(4, 5)) + + """ + a_interval = [] + for element in self.psets: + a_interval.append(element.args[0]) + + a_interval = Union(*a_interval) + return a_interval + + @property + def b_interval(self): + """ + Return the union of intervals of `y` when, self is in + rectangular form, or the union of intervals of `theta` + when self is in polar form. + + Examples + ======== + + >>> from sympy import Interval, ComplexRegion, Union + >>> a = Interval(2, 3) + >>> b = Interval(4, 5) + >>> c = Interval(1, 7) + >>> C1 = ComplexRegion(a*b) + >>> C1.b_interval + Interval(4, 5) + >>> C2 = ComplexRegion(Union(a*b, b*c)) + >>> C2.b_interval + Interval(1, 7) + + """ + b_interval = [] + for element in self.psets: + b_interval.append(element.args[1]) + + b_interval = Union(*b_interval) + return b_interval + + @property + def _measure(self): + """ + The measure of self.sets. + + Examples + ======== + + >>> from sympy import Interval, ComplexRegion, S + >>> a, b = Interval(2, 5), Interval(4, 8) + >>> c = Interval(0, 2*S.Pi) + >>> c1 = ComplexRegion(a*b) + >>> c1.measure + 12 + >>> c2 = ComplexRegion(a*c, polar=True) + >>> c2.measure + 6*pi + + """ + return self.sets._measure + + def _kind(self): + return self.args[0].kind + + @classmethod + def from_real(cls, sets): + """ + Converts given subset of real numbers to a complex region. + + Examples + ======== + + >>> from sympy import Interval, ComplexRegion + >>> unit = Interval(0,1) + >>> ComplexRegion.from_real(unit) + CartesianComplexRegion(ProductSet(Interval(0, 1), {0})) + + """ + if not sets.is_subset(S.Reals): + raise ValueError("sets must be a subset of the real line") + + return CartesianComplexRegion(sets * FiniteSet(0)) + + def _contains(self, other): + from sympy.functions import arg, Abs + + isTuple = isinstance(other, Tuple) + if isTuple and len(other) != 2: + raise ValueError('expecting Tuple of length 2') + + # If the other is not an Expression, and neither a Tuple + if not isinstance(other, (Expr, Tuple)): + return S.false + + # self in rectangular form + if not self.polar: + re, im = other if isTuple else other.as_real_imag() + return tfn[fuzzy_or(fuzzy_and([ + pset.args[0]._contains(re), + pset.args[1]._contains(im)]) + for pset in self.psets)] + + # self in polar form + elif self.polar: + if other.is_zero: + # ignore undefined complex argument + return tfn[fuzzy_or(pset.args[0]._contains(S.Zero) + for pset in self.psets)] + if isTuple: + r, theta = other + else: + r, theta = Abs(other), arg(other) + if theta.is_real and theta.is_number: + # angles in psets are normalized to [0, 2pi) + theta %= 2*S.Pi + return tfn[fuzzy_or(fuzzy_and([ + pset.args[0]._contains(r), + pset.args[1]._contains(theta)]) + for pset in self.psets)] + + +class CartesianComplexRegion(ComplexRegion): + r""" + Set representing a square region of the complex plane. + + .. math:: Z = \{z \in \mathbb{C} \mid z = x + Iy, x \in [\operatorname{re}(z)], y \in [\operatorname{im}(z)]\} + + Examples + ======== + + >>> from sympy import ComplexRegion, I, Interval + >>> region = ComplexRegion(Interval(1, 3) * Interval(4, 6)) + >>> 2 + 5*I in region + True + >>> 5*I in region + False + + See also + ======== + + ComplexRegion + PolarComplexRegion + Complexes + """ + + polar = False + variables = symbols('x, y', cls=Dummy) + + def __new__(cls, sets): + + if sets == S.Reals*S.Reals: + return S.Complexes + + if all(_a.is_FiniteSet for _a in sets.args) and (len(sets.args) == 2): + + # ** ProductSet of FiniteSets in the Complex Plane. ** + # For Cases like ComplexRegion({2, 4}*{3}), It + # would return {2 + 3*I, 4 + 3*I} + + # FIXME: This should probably be handled with something like: + # return ImageSet(Lambda((x, y), x+I*y), sets).rewrite(FiniteSet) + complex_num = [] + for x in sets.args[0]: + for y in sets.args[1]: + complex_num.append(x + S.ImaginaryUnit*y) + return FiniteSet(*complex_num) + else: + return Set.__new__(cls, sets) + + @property + def expr(self): + x, y = self.variables + return x + S.ImaginaryUnit*y + + +class PolarComplexRegion(ComplexRegion): + r""" + Set representing a polar region of the complex plane. + + .. math:: Z = \{z \in \mathbb{C} \mid z = r\times (\cos(\theta) + I\sin(\theta)), r \in [\texttt{r}], \theta \in [\texttt{theta}]\} + + Examples + ======== + + >>> from sympy import ComplexRegion, Interval, oo, pi, I + >>> rset = Interval(0, oo) + >>> thetaset = Interval(0, pi) + >>> upper_half_plane = ComplexRegion(rset * thetaset, polar=True) + >>> 1 + I in upper_half_plane + True + >>> 1 - I in upper_half_plane + False + + See also + ======== + + ComplexRegion + CartesianComplexRegion + Complexes + + """ + + polar = True + variables = symbols('r, theta', cls=Dummy) + + def __new__(cls, sets): + + new_sets = [] + # sets is Union of ProductSets + if not sets.is_ProductSet: + for k in sets.args: + new_sets.append(k) + # sets is ProductSets + else: + new_sets.append(sets) + # Normalize input theta + for k, v in enumerate(new_sets): + new_sets[k] = ProductSet(v.args[0], + normalize_theta_set(v.args[1])) + sets = Union(*new_sets) + return Set.__new__(cls, sets) + + @property + def expr(self): + r, theta = self.variables + return r*(cos(theta) + S.ImaginaryUnit*sin(theta)) + + +class Complexes(CartesianComplexRegion, metaclass=Singleton): + """ + The :class:`Set` of all complex numbers + + Examples + ======== + + >>> from sympy import S, I + >>> S.Complexes + Complexes + >>> 1 + I in S.Complexes + True + + See also + ======== + + Reals + ComplexRegion + + """ + + is_empty = False + is_finite_set = False + + # Override property from superclass since Complexes has no args + @property + def sets(self): + return ProductSet(S.Reals, S.Reals) + + def __new__(cls): + return Set.__new__(cls) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/handlers/intersection.py b/vllm/lib/python3.10/site-packages/sympy/sets/handlers/intersection.py new file mode 100644 index 0000000000000000000000000000000000000000..fcb9309ef3e9d2722ab1bfe664f1d1644f17da5d --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/handlers/intersection.py @@ -0,0 +1,533 @@ +from sympy.core.basic import _aresame +from sympy.core.function import Lambda, expand_complex +from sympy.core.mul import Mul +from sympy.core.numbers import ilcm, Float +from sympy.core.relational import Eq +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, symbols) +from sympy.core.sorting import ordered +from sympy.functions.elementary.complexes import sign +from sympy.functions.elementary.integers import floor, ceiling +from sympy.sets.fancysets import ComplexRegion +from sympy.sets.sets import (FiniteSet, Intersection, Interval, Set, Union) +from sympy.multipledispatch import Dispatcher +from sympy.sets.conditionset import ConditionSet +from sympy.sets.fancysets import (Integers, Naturals, Reals, Range, + ImageSet, Rationals) +from sympy.sets.sets import EmptySet, UniversalSet, imageset, ProductSet +from sympy.simplify.radsimp import numer + + +intersection_sets = Dispatcher('intersection_sets') + + +@intersection_sets.register(ConditionSet, ConditionSet) +def _(a, b): + return None + +@intersection_sets.register(ConditionSet, Set) +def _(a, b): + return ConditionSet(a.sym, a.condition, Intersection(a.base_set, b)) + +@intersection_sets.register(Naturals, Integers) +def _(a, b): + return a + +@intersection_sets.register(Naturals, Naturals) +def _(a, b): + return a if a is S.Naturals else b + +@intersection_sets.register(Interval, Naturals) +def _(a, b): + return intersection_sets(b, a) + +@intersection_sets.register(ComplexRegion, Set) +def _(self, other): + if other.is_ComplexRegion: + # self in rectangular form + if (not self.polar) and (not other.polar): + return ComplexRegion(Intersection(self.sets, other.sets)) + + # self in polar form + elif self.polar and other.polar: + r1, theta1 = self.a_interval, self.b_interval + r2, theta2 = other.a_interval, other.b_interval + new_r_interval = Intersection(r1, r2) + new_theta_interval = Intersection(theta1, theta2) + + # 0 and 2*Pi means the same + if ((2*S.Pi in theta1 and S.Zero in theta2) or + (2*S.Pi in theta2 and S.Zero in theta1)): + new_theta_interval = Union(new_theta_interval, + FiniteSet(0)) + return ComplexRegion(new_r_interval*new_theta_interval, + polar=True) + + + if other.is_subset(S.Reals): + new_interval = [] + x = symbols("x", cls=Dummy, real=True) + + # self in rectangular form + if not self.polar: + for element in self.psets: + if S.Zero in element.args[1]: + new_interval.append(element.args[0]) + new_interval = Union(*new_interval) + return Intersection(new_interval, other) + + # self in polar form + elif self.polar: + for element in self.psets: + if S.Zero in element.args[1]: + new_interval.append(element.args[0]) + if S.Pi in element.args[1]: + new_interval.append(ImageSet(Lambda(x, -x), element.args[0])) + if S.Zero in element.args[0]: + new_interval.append(FiniteSet(0)) + new_interval = Union(*new_interval) + return Intersection(new_interval, other) + +@intersection_sets.register(Integers, Reals) +def _(a, b): + return a + +@intersection_sets.register(Range, Interval) +def _(a, b): + # Check that there are no symbolic arguments + if not all(i.is_number for i in a.args + b.args[:2]): + return + + # In case of null Range, return an EmptySet. + if a.size == 0: + return S.EmptySet + + # trim down to self's size, and represent + # as a Range with step 1. + start = ceiling(max(b.inf, a.inf)) + if start not in b: + start += 1 + end = floor(min(b.sup, a.sup)) + if end not in b: + end -= 1 + return intersection_sets(a, Range(start, end + 1)) + +@intersection_sets.register(Range, Naturals) +def _(a, b): + return intersection_sets(a, Interval(b.inf, S.Infinity)) + +@intersection_sets.register(Range, Range) +def _(a, b): + # Check that there are no symbolic range arguments + if not all(all(v.is_number for v in r.args) for r in [a, b]): + return None + + # non-overlap quick exits + if not b: + return S.EmptySet + if not a: + return S.EmptySet + if b.sup < a.inf: + return S.EmptySet + if b.inf > a.sup: + return S.EmptySet + + # work with finite end at the start + r1 = a + if r1.start.is_infinite: + r1 = r1.reversed + r2 = b + if r2.start.is_infinite: + r2 = r2.reversed + + # If both ends are infinite then it means that one Range is just the set + # of all integers (the step must be 1). + if r1.start.is_infinite: + return b + if r2.start.is_infinite: + return a + + from sympy.solvers.diophantine.diophantine import diop_linear + + # this equation represents the values of the Range; + # it's a linear equation + eq = lambda r, i: r.start + i*r.step + + # we want to know when the two equations might + # have integer solutions so we use the diophantine + # solver + va, vb = diop_linear(eq(r1, Dummy('a')) - eq(r2, Dummy('b'))) + + # check for no solution + no_solution = va is None and vb is None + if no_solution: + return S.EmptySet + + # there is a solution + # ------------------- + + # find the coincident point, c + a0 = va.as_coeff_Add()[0] + c = eq(r1, a0) + + # find the first point, if possible, in each range + # since c may not be that point + def _first_finite_point(r1, c): + if c == r1.start: + return c + # st is the signed step we need to take to + # get from c to r1.start + st = sign(r1.start - c)*step + # use Range to calculate the first point: + # we want to get as close as possible to + # r1.start; the Range will not be null since + # it will at least contain c + s1 = Range(c, r1.start + st, st)[-1] + if s1 == r1.start: + pass + else: + # if we didn't hit r1.start then, if the + # sign of st didn't match the sign of r1.step + # we are off by one and s1 is not in r1 + if sign(r1.step) != sign(st): + s1 -= st + if s1 not in r1: + return + return s1 + + # calculate the step size of the new Range + step = abs(ilcm(r1.step, r2.step)) + s1 = _first_finite_point(r1, c) + if s1 is None: + return S.EmptySet + s2 = _first_finite_point(r2, c) + if s2 is None: + return S.EmptySet + + # replace the corresponding start or stop in + # the original Ranges with these points; the + # result must have at least one point since + # we know that s1 and s2 are in the Ranges + def _updated_range(r, first): + st = sign(r.step)*step + if r.start.is_finite: + rv = Range(first, r.stop, st) + else: + rv = Range(r.start, first + st, st) + return rv + r1 = _updated_range(a, s1) + r2 = _updated_range(b, s2) + + # work with them both in the increasing direction + if sign(r1.step) < 0: + r1 = r1.reversed + if sign(r2.step) < 0: + r2 = r2.reversed + + # return clipped Range with positive step; it + # can't be empty at this point + start = max(r1.start, r2.start) + stop = min(r1.stop, r2.stop) + return Range(start, stop, step) + + +@intersection_sets.register(Range, Integers) +def _(a, b): + return a + + +@intersection_sets.register(Range, Rationals) +def _(a, b): + return a + + +@intersection_sets.register(ImageSet, Set) +def _(self, other): + from sympy.solvers.diophantine import diophantine + + # Only handle the straight-forward univariate case + if (len(self.lamda.variables) > 1 + or self.lamda.signature != self.lamda.variables): + return None + base_set = self.base_sets[0] + + # Intersection between ImageSets with Integers as base set + # For {f(n) : n in Integers} & {g(m) : m in Integers} we solve the + # diophantine equations f(n)=g(m). + # If the solutions for n are {h(t) : t in Integers} then we return + # {f(h(t)) : t in integers}. + # If the solutions for n are {n_1, n_2, ..., n_k} then we return + # {f(n_i) : 1 <= i <= k}. + if base_set is S.Integers: + gm = None + if isinstance(other, ImageSet) and other.base_sets == (S.Integers,): + gm = other.lamda.expr + var = other.lamda.variables[0] + # Symbol of second ImageSet lambda must be distinct from first + m = Dummy('m') + gm = gm.subs(var, m) + elif other is S.Integers: + m = gm = Dummy('m') + if gm is not None: + fn = self.lamda.expr + n = self.lamda.variables[0] + try: + solns = list(diophantine(fn - gm, syms=(n, m), permute=True)) + except (TypeError, NotImplementedError): + # TypeError if equation not polynomial with rational coeff. + # NotImplementedError if correct format but no solver. + return + # 3 cases are possible for solns: + # - empty set, + # - one or more parametric (infinite) solutions, + # - a finite number of (non-parametric) solution couples. + # Among those, there is one type of solution set that is + # not helpful here: multiple parametric solutions. + if len(solns) == 0: + return S.EmptySet + elif any(s.free_symbols for tupl in solns for s in tupl): + if len(solns) == 1: + soln, solm = solns[0] + (t,) = soln.free_symbols + expr = fn.subs(n, soln.subs(t, n)).expand() + return imageset(Lambda(n, expr), S.Integers) + else: + return + else: + return FiniteSet(*(fn.subs(n, s[0]) for s in solns)) + + if other == S.Reals: + from sympy.solvers.solvers import denoms, solve_linear + + def _solution_union(exprs, sym): + # return a union of linear solutions to i in expr; + # if i cannot be solved, use a ConditionSet for solution + sols = [] + for i in exprs: + x, xis = solve_linear(i, 0, [sym]) + if x == sym: + sols.append(FiniteSet(xis)) + else: + sols.append(ConditionSet(sym, Eq(i, 0))) + return Union(*sols) + + f = self.lamda.expr + n = self.lamda.variables[0] + + n_ = Dummy(n.name, real=True) + f_ = f.subs(n, n_) + + re, im = f_.as_real_imag() + im = expand_complex(im) + + re = re.subs(n_, n) + im = im.subs(n_, n) + ifree = im.free_symbols + lam = Lambda(n, re) + if im.is_zero: + # allow re-evaluation + # of self in this case to make + # the result canonical + pass + elif im.is_zero is False: + return S.EmptySet + elif ifree != {n}: + return None + else: + # univarite imaginary part in same variable; + # use numer instead of as_numer_denom to keep + # this as fast as possible while still handling + # simple cases + base_set &= _solution_union( + Mul.make_args(numer(im)), n) + # exclude values that make denominators 0 + base_set -= _solution_union(denoms(f), n) + return imageset(lam, base_set) + + elif isinstance(other, Interval): + from sympy.solvers.solveset import (invert_real, invert_complex, + solveset) + + f = self.lamda.expr + n = self.lamda.variables[0] + new_inf, new_sup = None, None + new_lopen, new_ropen = other.left_open, other.right_open + + if f.is_real: + inverter = invert_real + else: + inverter = invert_complex + + g1, h1 = inverter(f, other.inf, n) + g2, h2 = inverter(f, other.sup, n) + + if all(isinstance(i, FiniteSet) for i in (h1, h2)): + if g1 == n: + if len(h1) == 1: + new_inf = h1.args[0] + if g2 == n: + if len(h2) == 1: + new_sup = h2.args[0] + # TODO: Design a technique to handle multiple-inverse + # functions + + # Any of the new boundary values cannot be determined + if any(i is None for i in (new_sup, new_inf)): + return + + + range_set = S.EmptySet + + if all(i.is_real for i in (new_sup, new_inf)): + # this assumes continuity of underlying function + # however fixes the case when it is decreasing + if new_inf > new_sup: + new_inf, new_sup = new_sup, new_inf + new_interval = Interval(new_inf, new_sup, new_lopen, new_ropen) + range_set = base_set.intersect(new_interval) + else: + if other.is_subset(S.Reals): + solutions = solveset(f, n, S.Reals) + if not isinstance(range_set, (ImageSet, ConditionSet)): + range_set = solutions.intersect(other) + else: + return + + if range_set is S.EmptySet: + return S.EmptySet + elif isinstance(range_set, Range) and range_set.size is not S.Infinity: + range_set = FiniteSet(*list(range_set)) + + if range_set is not None: + return imageset(Lambda(n, f), range_set) + return + else: + return + + +@intersection_sets.register(ProductSet, ProductSet) +def _(a, b): + if len(b.args) != len(a.args): + return S.EmptySet + return ProductSet(*(i.intersect(j) for i, j in zip(a.sets, b.sets))) + + +@intersection_sets.register(Interval, Interval) +def _(a, b): + # handle (-oo, oo) + infty = S.NegativeInfinity, S.Infinity + if a == Interval(*infty): + l, r = a.left, a.right + if l.is_real or l in infty or r.is_real or r in infty: + return b + + # We can't intersect [0,3] with [x,6] -- we don't know if x>0 or x<0 + if not a._is_comparable(b): + return None + + empty = False + + if a.start <= b.end and b.start <= a.end: + # Get topology right. + if a.start < b.start: + start = b.start + left_open = b.left_open + elif a.start > b.start: + start = a.start + left_open = a.left_open + else: + start = a.start + if not _aresame(a.start, b.start): + # For example Integer(2) != Float(2) + # Prefer the Float boundary because Floats should be + # contagious in calculations. + if b.start.has(Float) and not a.start.has(Float): + start = b.start + elif a.start.has(Float) and not b.start.has(Float): + start = a.start + else: + #this is to ensure that if Eq(a.start, b.start) but + #type(a.start) != type(b.start) the order of a and b + #does not matter for the result + start = list(ordered([a,b]))[0].start + left_open = a.left_open or b.left_open + + if a.end < b.end: + end = a.end + right_open = a.right_open + elif a.end > b.end: + end = b.end + right_open = b.right_open + else: + # see above for logic with start + end = a.end + if not _aresame(a.end, b.end): + if b.end.has(Float) and not a.end.has(Float): + end = b.end + elif a.end.has(Float) and not b.end.has(Float): + end = a.end + else: + end = list(ordered([a,b]))[0].end + right_open = a.right_open or b.right_open + + if end - start == 0 and (left_open or right_open): + empty = True + else: + empty = True + + if empty: + return S.EmptySet + + return Interval(start, end, left_open, right_open) + +@intersection_sets.register(EmptySet, Set) +def _(a, b): + return S.EmptySet + +@intersection_sets.register(UniversalSet, Set) +def _(a, b): + return b + +@intersection_sets.register(FiniteSet, FiniteSet) +def _(a, b): + return FiniteSet(*(a._elements & b._elements)) + +@intersection_sets.register(FiniteSet, Set) +def _(a, b): + try: + return FiniteSet(*[el for el in a if el in b]) + except TypeError: + return None # could not evaluate `el in b` due to symbolic ranges. + +@intersection_sets.register(Set, Set) +def _(a, b): + return None + +@intersection_sets.register(Integers, Rationals) +def _(a, b): + return a + +@intersection_sets.register(Naturals, Rationals) +def _(a, b): + return a + +@intersection_sets.register(Rationals, Reals) +def _(a, b): + return a + +def _intlike_interval(a, b): + try: + if b._inf is S.NegativeInfinity and b._sup is S.Infinity: + return a + s = Range(max(a.inf, ceiling(b.left)), floor(b.right) + 1) + return intersection_sets(s, b) # take out endpoints if open interval + except ValueError: + return None + +@intersection_sets.register(Integers, Interval) +def _(a, b): + return _intlike_interval(a, b) + +@intersection_sets.register(Naturals, Interval) +def _(a, b): + return _intlike_interval(a, b) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/handlers/issubset.py b/vllm/lib/python3.10/site-packages/sympy/sets/handlers/issubset.py new file mode 100644 index 0000000000000000000000000000000000000000..cc23e8bf56f1743cd7f08452dd09a0acf981f5da --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/handlers/issubset.py @@ -0,0 +1,144 @@ +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.core.logic import fuzzy_and, fuzzy_bool, fuzzy_not, fuzzy_or +from sympy.core.relational import Eq +from sympy.sets.sets import FiniteSet, Interval, Set, Union, ProductSet +from sympy.sets.fancysets import Complexes, Reals, Range, Rationals +from sympy.multipledispatch import Dispatcher + + +_inf_sets = [S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals, S.Complexes] + + +is_subset_sets = Dispatcher('is_subset_sets') + + +@is_subset_sets.register(Set, Set) +def _(a, b): + return None + +@is_subset_sets.register(Interval, Interval) +def _(a, b): + # This is correct but can be made more comprehensive... + if fuzzy_bool(a.start < b.start): + return False + if fuzzy_bool(a.end > b.end): + return False + if (b.left_open and not a.left_open and fuzzy_bool(Eq(a.start, b.start))): + return False + if (b.right_open and not a.right_open and fuzzy_bool(Eq(a.end, b.end))): + return False + +@is_subset_sets.register(Interval, FiniteSet) +def _(a_interval, b_fs): + # An Interval can only be a subset of a finite set if it is finite + # which can only happen if it has zero measure. + if fuzzy_not(a_interval.measure.is_zero): + return False + +@is_subset_sets.register(Interval, Union) +def _(a_interval, b_u): + if all(isinstance(s, (Interval, FiniteSet)) for s in b_u.args): + intervals = [s for s in b_u.args if isinstance(s, Interval)] + if all(fuzzy_bool(a_interval.start < s.start) for s in intervals): + return False + if all(fuzzy_bool(a_interval.end > s.end) for s in intervals): + return False + if a_interval.measure.is_nonzero: + no_overlap = lambda s1, s2: fuzzy_or([ + fuzzy_bool(s1.end <= s2.start), + fuzzy_bool(s1.start >= s2.end), + ]) + if all(no_overlap(s, a_interval) for s in intervals): + return False + +@is_subset_sets.register(Range, Range) +def _(a, b): + if a.step == b.step == 1: + return fuzzy_and([fuzzy_bool(a.start >= b.start), + fuzzy_bool(a.stop <= b.stop)]) + +@is_subset_sets.register(Range, Interval) +def _(a_range, b_interval): + if a_range.step.is_positive: + if b_interval.left_open and a_range.inf.is_finite: + cond_left = a_range.inf > b_interval.left + else: + cond_left = a_range.inf >= b_interval.left + if b_interval.right_open and a_range.sup.is_finite: + cond_right = a_range.sup < b_interval.right + else: + cond_right = a_range.sup <= b_interval.right + return fuzzy_and([cond_left, cond_right]) + +@is_subset_sets.register(Range, FiniteSet) +def _(a_range, b_finiteset): + try: + a_size = a_range.size + except ValueError: + # symbolic Range of unknown size + return None + if a_size > len(b_finiteset): + return False + elif any(arg.has(Symbol) for arg in a_range.args): + return fuzzy_and(b_finiteset.contains(x) for x in a_range) + else: + # Checking A \ B == EmptySet is more efficient than repeated naive + # membership checks on an arbitrary FiniteSet. + a_set = set(a_range) + b_remaining = len(b_finiteset) + # Symbolic expressions and numbers of unknown type (integer or not) are + # all counted as "candidates", i.e. *potentially* matching some a in + # a_range. + cnt_candidate = 0 + for b in b_finiteset: + if b.is_Integer: + a_set.discard(b) + elif fuzzy_not(b.is_integer): + pass + else: + cnt_candidate += 1 + b_remaining -= 1 + if len(a_set) > b_remaining + cnt_candidate: + return False + if len(a_set) == 0: + return True + return None + +@is_subset_sets.register(Interval, Range) +def _(a_interval, b_range): + if a_interval.measure.is_extended_nonzero: + return False + +@is_subset_sets.register(Interval, Rationals) +def _(a_interval, b_rationals): + if a_interval.measure.is_extended_nonzero: + return False + +@is_subset_sets.register(Range, Complexes) +def _(a, b): + return True + +@is_subset_sets.register(Complexes, Interval) +def _(a, b): + return False + +@is_subset_sets.register(Complexes, Range) +def _(a, b): + return False + +@is_subset_sets.register(Complexes, Rationals) +def _(a, b): + return False + +@is_subset_sets.register(Rationals, Reals) +def _(a, b): + return True + +@is_subset_sets.register(Rationals, Range) +def _(a, b): + return False + +@is_subset_sets.register(ProductSet, FiniteSet) +def _(a_ps, b_fs): + return fuzzy_and(b_fs.contains(x) for x in a_ps) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/handlers/mul.py b/vllm/lib/python3.10/site-packages/sympy/sets/handlers/mul.py new file mode 100644 index 0000000000000000000000000000000000000000..0dedc8068b7973fd4cb6fbf2854e5fa671d188de --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/handlers/mul.py @@ -0,0 +1,79 @@ +from sympy.core import Basic, Expr +from sympy.core.numbers import oo +from sympy.core.symbol import symbols +from sympy.multipledispatch import Dispatcher +from sympy.sets.setexpr import set_mul +from sympy.sets.sets import Interval, Set + + +_x, _y = symbols("x y") + + +_set_mul = Dispatcher('_set_mul') +_set_div = Dispatcher('_set_div') + + +@_set_mul.register(Basic, Basic) +def _(x, y): + return None + +@_set_mul.register(Set, Set) +def _(x, y): + return None + +@_set_mul.register(Expr, Expr) +def _(x, y): + return x*y + +@_set_mul.register(Interval, Interval) +def _(x, y): + """ + Multiplications in interval arithmetic + https://en.wikipedia.org/wiki/Interval_arithmetic + """ + # TODO: some intervals containing 0 and oo will fail as 0*oo returns nan. + comvals = ( + (x.start * y.start, bool(x.left_open or y.left_open)), + (x.start * y.end, bool(x.left_open or y.right_open)), + (x.end * y.start, bool(x.right_open or y.left_open)), + (x.end * y.end, bool(x.right_open or y.right_open)), + ) + # TODO: handle symbolic intervals + minval, minopen = min(comvals) + maxval, maxopen = max(comvals) + return Interval( + minval, + maxval, + minopen, + maxopen + ) + +@_set_div.register(Basic, Basic) +def _(x, y): + return None + +@_set_div.register(Expr, Expr) +def _(x, y): + return x/y + +@_set_div.register(Set, Set) +def _(x, y): + return None + +@_set_div.register(Interval, Interval) +def _(x, y): + """ + Divisions in interval arithmetic + https://en.wikipedia.org/wiki/Interval_arithmetic + """ + if (y.start*y.end).is_negative: + return Interval(-oo, oo) + if y.start == 0: + s2 = oo + else: + s2 = 1/y.start + if y.end == 0: + s1 = -oo + else: + s1 = 1/y.end + return set_mul(x, Interval(s1, s2, y.right_open, y.left_open)) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/ordinals.py b/vllm/lib/python3.10/site-packages/sympy/sets/ordinals.py new file mode 100644 index 0000000000000000000000000000000000000000..cfe062354cfe58a4747998e51fa0d261e67576cc --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/ordinals.py @@ -0,0 +1,282 @@ +from sympy.core import Basic, Integer +import operator + + +class OmegaPower(Basic): + """ + Represents ordinal exponential and multiplication terms one of the + building blocks of the :class:`Ordinal` class. + In ``OmegaPower(a, b)``, ``a`` represents exponent and ``b`` represents multiplicity. + """ + def __new__(cls, a, b): + if isinstance(b, int): + b = Integer(b) + if not isinstance(b, Integer) or b <= 0: + raise TypeError("multiplicity must be a positive integer") + + if not isinstance(a, Ordinal): + a = Ordinal.convert(a) + + return Basic.__new__(cls, a, b) + + @property + def exp(self): + return self.args[0] + + @property + def mult(self): + return self.args[1] + + def _compare_term(self, other, op): + if self.exp == other.exp: + return op(self.mult, other.mult) + else: + return op(self.exp, other.exp) + + def __eq__(self, other): + if not isinstance(other, OmegaPower): + try: + other = OmegaPower(0, other) + except TypeError: + return NotImplemented + return self.args == other.args + + def __hash__(self): + return Basic.__hash__(self) + + def __lt__(self, other): + if not isinstance(other, OmegaPower): + try: + other = OmegaPower(0, other) + except TypeError: + return NotImplemented + return self._compare_term(other, operator.lt) + + +class Ordinal(Basic): + """ + Represents ordinals in Cantor normal form. + + Internally, this class is just a list of instances of OmegaPower. + + Examples + ======== + >>> from sympy import Ordinal, OmegaPower + >>> from sympy.sets.ordinals import omega + >>> w = omega + >>> w.is_limit_ordinal + True + >>> Ordinal(OmegaPower(w + 1, 1), OmegaPower(3, 2)) + w**(w + 1) + w**3*2 + >>> 3 + w + w + >>> (w + 1) * w + w**2 + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Ordinal_arithmetic + """ + def __new__(cls, *terms): + obj = super().__new__(cls, *terms) + powers = [i.exp for i in obj.args] + if not all(powers[i] >= powers[i+1] for i in range(len(powers) - 1)): + raise ValueError("powers must be in decreasing order") + return obj + + @property + def terms(self): + return self.args + + @property + def leading_term(self): + if self == ord0: + raise ValueError("ordinal zero has no leading term") + return self.terms[0] + + @property + def trailing_term(self): + if self == ord0: + raise ValueError("ordinal zero has no trailing term") + return self.terms[-1] + + @property + def is_successor_ordinal(self): + try: + return self.trailing_term.exp == ord0 + except ValueError: + return False + + @property + def is_limit_ordinal(self): + try: + return not self.trailing_term.exp == ord0 + except ValueError: + return False + + @property + def degree(self): + return self.leading_term.exp + + @classmethod + def convert(cls, integer_value): + if integer_value == 0: + return ord0 + return Ordinal(OmegaPower(0, integer_value)) + + def __eq__(self, other): + if not isinstance(other, Ordinal): + try: + other = Ordinal.convert(other) + except TypeError: + return NotImplemented + return self.terms == other.terms + + def __hash__(self): + return hash(self.args) + + def __lt__(self, other): + if not isinstance(other, Ordinal): + try: + other = Ordinal.convert(other) + except TypeError: + return NotImplemented + for term_self, term_other in zip(self.terms, other.terms): + if term_self != term_other: + return term_self < term_other + return len(self.terms) < len(other.terms) + + def __le__(self, other): + return (self == other or self < other) + + def __gt__(self, other): + return not self <= other + + def __ge__(self, other): + return not self < other + + def __str__(self): + net_str = "" + plus_count = 0 + if self == ord0: + return 'ord0' + for i in self.terms: + if plus_count: + net_str += " + " + + if i.exp == ord0: + net_str += str(i.mult) + elif i.exp == 1: + net_str += 'w' + elif len(i.exp.terms) > 1 or i.exp.is_limit_ordinal: + net_str += 'w**(%s)'%i.exp + else: + net_str += 'w**%s'%i.exp + + if not i.mult == 1 and not i.exp == ord0: + net_str += '*%s'%i.mult + + plus_count += 1 + return(net_str) + + __repr__ = __str__ + + def __add__(self, other): + if not isinstance(other, Ordinal): + try: + other = Ordinal.convert(other) + except TypeError: + return NotImplemented + if other == ord0: + return self + a_terms = list(self.terms) + b_terms = list(other.terms) + r = len(a_terms) - 1 + b_exp = other.degree + while r >= 0 and a_terms[r].exp < b_exp: + r -= 1 + if r < 0: + terms = b_terms + elif a_terms[r].exp == b_exp: + sum_term = OmegaPower(b_exp, a_terms[r].mult + other.leading_term.mult) + terms = a_terms[:r] + [sum_term] + b_terms[1:] + else: + terms = a_terms[:r+1] + b_terms + return Ordinal(*terms) + + def __radd__(self, other): + if not isinstance(other, Ordinal): + try: + other = Ordinal.convert(other) + except TypeError: + return NotImplemented + return other + self + + def __mul__(self, other): + if not isinstance(other, Ordinal): + try: + other = Ordinal.convert(other) + except TypeError: + return NotImplemented + if ord0 in (self, other): + return ord0 + a_exp = self.degree + a_mult = self.leading_term.mult + summation = [] + if other.is_limit_ordinal: + for arg in other.terms: + summation.append(OmegaPower(a_exp + arg.exp, arg.mult)) + + else: + for arg in other.terms[:-1]: + summation.append(OmegaPower(a_exp + arg.exp, arg.mult)) + b_mult = other.trailing_term.mult + summation.append(OmegaPower(a_exp, a_mult*b_mult)) + summation += list(self.terms[1:]) + return Ordinal(*summation) + + def __rmul__(self, other): + if not isinstance(other, Ordinal): + try: + other = Ordinal.convert(other) + except TypeError: + return NotImplemented + return other * self + + def __pow__(self, other): + if not self == omega: + return NotImplemented + return Ordinal(OmegaPower(other, 1)) + + +class OrdinalZero(Ordinal): + """The ordinal zero. + + OrdinalZero can be imported as ``ord0``. + """ + pass + + +class OrdinalOmega(Ordinal): + """The ordinal omega which forms the base of all ordinals in cantor normal form. + + OrdinalOmega can be imported as ``omega``. + + Examples + ======== + + >>> from sympy.sets.ordinals import omega + >>> omega + omega + w*2 + """ + def __new__(cls): + return Ordinal.__new__(cls) + + @property + def terms(self): + return (OmegaPower(1, 1),) + + +ord0 = OrdinalZero() +omega = OrdinalOmega() diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/powerset.py b/vllm/lib/python3.10/site-packages/sympy/sets/powerset.py new file mode 100644 index 0000000000000000000000000000000000000000..2eb3b41b9859281480bc9517a1cad0abe7a5683f --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/powerset.py @@ -0,0 +1,119 @@ +from sympy.core.decorators import _sympifyit +from sympy.core.parameters import global_parameters +from sympy.core.logic import fuzzy_bool +from sympy.core.singleton import S +from sympy.core.sympify import _sympify + +from .sets import Set, FiniteSet, SetKind + + +class PowerSet(Set): + r"""A symbolic object representing a power set. + + Parameters + ========== + + arg : Set + The set to take power of. + + evaluate : bool + The flag to control evaluation. + + If the evaluation is disabled for finite sets, it can take + advantage of using subset test as a membership test. + + Notes + ===== + + Power set `\mathcal{P}(S)` is defined as a set containing all the + subsets of `S`. + + If the set `S` is a finite set, its power set would have + `2^{\left| S \right|}` elements, where `\left| S \right|` denotes + the cardinality of `S`. + + Examples + ======== + + >>> from sympy import PowerSet, S, FiniteSet + + A power set of a finite set: + + >>> PowerSet(FiniteSet(1, 2, 3)) + PowerSet({1, 2, 3}) + + A power set of an empty set: + + >>> PowerSet(S.EmptySet) + PowerSet(EmptySet) + >>> PowerSet(PowerSet(S.EmptySet)) + PowerSet(PowerSet(EmptySet)) + + A power set of an infinite set: + + >>> PowerSet(S.Reals) + PowerSet(Reals) + + Evaluating the power set of a finite set to its explicit form: + + >>> PowerSet(FiniteSet(1, 2, 3)).rewrite(FiniteSet) + FiniteSet(EmptySet, {1}, {2}, {3}, {1, 2}, {1, 3}, {2, 3}, {1, 2, 3}) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Power_set + + .. [2] https://en.wikipedia.org/wiki/Axiom_of_power_set + """ + def __new__(cls, arg, evaluate=None): + if evaluate is None: + evaluate=global_parameters.evaluate + + arg = _sympify(arg) + + if not isinstance(arg, Set): + raise ValueError('{} must be a set.'.format(arg)) + + return super().__new__(cls, arg) + + @property + def arg(self): + return self.args[0] + + def _eval_rewrite_as_FiniteSet(self, *args, **kwargs): + arg = self.arg + if arg.is_FiniteSet: + return arg.powerset() + return None + + @_sympifyit('other', NotImplemented) + def _contains(self, other): + if not isinstance(other, Set): + return None + + return fuzzy_bool(self.arg.is_superset(other)) + + def _eval_is_subset(self, other): + if isinstance(other, PowerSet): + return self.arg.is_subset(other.arg) + + def __len__(self): + return 2 ** len(self.arg) + + def __iter__(self): + found = [S.EmptySet] + yield S.EmptySet + + for x in self.arg: + temp = [] + x = FiniteSet(x) + for y in found: + new = x + y + yield new + temp.append(new) + found.extend(temp) + + @property + def kind(self): + return SetKind(self.arg.kind) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/setexpr.py b/vllm/lib/python3.10/site-packages/sympy/sets/setexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..94d77d5293617a620b70a945888987ce6cc61157 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/setexpr.py @@ -0,0 +1,97 @@ +from sympy.core import Expr +from sympy.core.decorators import call_highest_priority, _sympifyit +from .fancysets import ImageSet +from .sets import set_add, set_sub, set_mul, set_div, set_pow, set_function + + +class SetExpr(Expr): + """An expression that can take on values of a set. + + Examples + ======== + + >>> from sympy import Interval, FiniteSet + >>> from sympy.sets.setexpr import SetExpr + + >>> a = SetExpr(Interval(0, 5)) + >>> b = SetExpr(FiniteSet(1, 10)) + >>> (a + b).set + Union(Interval(1, 6), Interval(10, 15)) + >>> (2*a + b).set + Interval(1, 20) + """ + _op_priority = 11.0 + + def __new__(cls, setarg): + return Expr.__new__(cls, setarg) + + set = property(lambda self: self.args[0]) + + def _latex(self, printer): + return r"SetExpr\left({}\right)".format(printer._print(self.set)) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__radd__') + def __add__(self, other): + return _setexpr_apply_operation(set_add, self, other) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__add__') + def __radd__(self, other): + return _setexpr_apply_operation(set_add, other, self) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__rmul__') + def __mul__(self, other): + return _setexpr_apply_operation(set_mul, self, other) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__mul__') + def __rmul__(self, other): + return _setexpr_apply_operation(set_mul, other, self) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__rsub__') + def __sub__(self, other): + return _setexpr_apply_operation(set_sub, self, other) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__sub__') + def __rsub__(self, other): + return _setexpr_apply_operation(set_sub, other, self) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__rpow__') + def __pow__(self, other): + return _setexpr_apply_operation(set_pow, self, other) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__pow__') + def __rpow__(self, other): + return _setexpr_apply_operation(set_pow, other, self) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__rtruediv__') + def __truediv__(self, other): + return _setexpr_apply_operation(set_div, self, other) + + @_sympifyit('other', NotImplemented) + @call_highest_priority('__truediv__') + def __rtruediv__(self, other): + return _setexpr_apply_operation(set_div, other, self) + + def _eval_func(self, func): + # TODO: this could be implemented straight into `imageset`: + res = set_function(func, self.set) + if res is None: + return SetExpr(ImageSet(func, self.set)) + return SetExpr(res) + + +def _setexpr_apply_operation(op, x, y): + if isinstance(x, SetExpr): + x = x.set + if isinstance(y, SetExpr): + y = y.set + out = op(x, y) + return SetExpr(out) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/sets.py b/vllm/lib/python3.10/site-packages/sympy/sets/sets.py new file mode 100644 index 0000000000000000000000000000000000000000..391150f213002d7b997181cb7363e50e567da580 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/sets.py @@ -0,0 +1,2769 @@ +from typing import Any, Callable +from functools import reduce +from collections import defaultdict +import inspect + +from sympy.core.kind import Kind, UndefinedKind, NumberKind +from sympy.core.basic import Basic +from sympy.core.containers import Tuple, TupleKind +from sympy.core.decorators import sympify_method_args, sympify_return +from sympy.core.evalf import EvalfMixin +from sympy.core.expr import Expr +from sympy.core.function import Lambda +from sympy.core.logic import (FuzzyBool, fuzzy_bool, fuzzy_or, fuzzy_and, + fuzzy_not) +from sympy.core.numbers import Float, Integer +from sympy.core.operations import LatticeOp +from sympy.core.parameters import global_parameters +from sympy.core.relational import Eq, Ne, is_lt +from sympy.core.singleton import Singleton, S +from sympy.core.sorting import ordered +from sympy.core.symbol import symbols, Symbol, Dummy, uniquely_named_symbol +from sympy.core.sympify import _sympify, sympify, _sympy_converter +from sympy.functions.elementary.exponential import exp, log +from sympy.functions.elementary.miscellaneous import Max, Min +from sympy.logic.boolalg import And, Or, Not, Xor, true, false +from sympy.utilities.decorator import deprecated +from sympy.utilities.exceptions import sympy_deprecation_warning +from sympy.utilities.iterables import (iproduct, sift, roundrobin, iterable, + subsets) +from sympy.utilities.misc import func_name, filldedent + +from mpmath import mpi, mpf + +from mpmath.libmp.libmpf import prec_to_dps + + +tfn = defaultdict(lambda: None, { + True: S.true, + S.true: S.true, + False: S.false, + S.false: S.false}) + + +@sympify_method_args +class Set(Basic, EvalfMixin): + """ + The base class for any kind of set. + + Explanation + =========== + + This is not meant to be used directly as a container of items. It does not + behave like the builtin ``set``; see :class:`FiniteSet` for that. + + Real intervals are represented by the :class:`Interval` class and unions of + sets by the :class:`Union` class. The empty set is represented by the + :class:`EmptySet` class and available as a singleton as ``S.EmptySet``. + """ + + __slots__ = () + + is_number = False + is_iterable = False + is_interval = False + + is_FiniteSet = False + is_Interval = False + is_ProductSet = False + is_Union = False + is_Intersection: FuzzyBool = None + is_UniversalSet: FuzzyBool = None + is_Complement: FuzzyBool = None + is_ComplexRegion = False + + is_empty: FuzzyBool = None + is_finite_set: FuzzyBool = None + + @property # type: ignore + @deprecated( + """ + The is_EmptySet attribute of Set objects is deprecated. + Use 's is S.EmptySet" or 's.is_empty' instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-is-emptyset", + ) + def is_EmptySet(self): + return None + + @staticmethod + def _infimum_key(expr): + """ + Return infimum (if possible) else S.Infinity. + """ + try: + infimum = expr.inf + assert infimum.is_comparable + infimum = infimum.evalf() # issue #18505 + except (NotImplementedError, + AttributeError, AssertionError, ValueError): + infimum = S.Infinity + return infimum + + def union(self, other): + """ + Returns the union of ``self`` and ``other``. + + Examples + ======== + + As a shortcut it is possible to use the ``+`` operator: + + >>> from sympy import Interval, FiniteSet + >>> Interval(0, 1).union(Interval(2, 3)) + Union(Interval(0, 1), Interval(2, 3)) + >>> Interval(0, 1) + Interval(2, 3) + Union(Interval(0, 1), Interval(2, 3)) + >>> Interval(1, 2, True, True) + FiniteSet(2, 3) + Union({3}, Interval.Lopen(1, 2)) + + Similarly it is possible to use the ``-`` operator for set differences: + + >>> Interval(0, 2) - Interval(0, 1) + Interval.Lopen(1, 2) + >>> Interval(1, 3) - FiniteSet(2) + Union(Interval.Ropen(1, 2), Interval.Lopen(2, 3)) + + """ + return Union(self, other) + + def intersect(self, other): + """ + Returns the intersection of 'self' and 'other'. + + Examples + ======== + + >>> from sympy import Interval + + >>> Interval(1, 3).intersect(Interval(1, 2)) + Interval(1, 2) + + >>> from sympy import imageset, Lambda, symbols, S + >>> n, m = symbols('n m') + >>> a = imageset(Lambda(n, 2*n), S.Integers) + >>> a.intersect(imageset(Lambda(m, 2*m + 1), S.Integers)) + EmptySet + + """ + return Intersection(self, other) + + def intersection(self, other): + """ + Alias for :meth:`intersect()` + """ + return self.intersect(other) + + def is_disjoint(self, other): + """ + Returns True if ``self`` and ``other`` are disjoint. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 2).is_disjoint(Interval(1, 2)) + False + >>> Interval(0, 2).is_disjoint(Interval(3, 4)) + True + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Disjoint_sets + """ + return self.intersect(other) == S.EmptySet + + def isdisjoint(self, other): + """ + Alias for :meth:`is_disjoint()` + """ + return self.is_disjoint(other) + + def complement(self, universe): + r""" + The complement of 'self' w.r.t the given universe. + + Examples + ======== + + >>> from sympy import Interval, S + >>> Interval(0, 1).complement(S.Reals) + Union(Interval.open(-oo, 0), Interval.open(1, oo)) + + >>> Interval(0, 1).complement(S.UniversalSet) + Complement(UniversalSet, Interval(0, 1)) + + """ + return Complement(universe, self) + + def _complement(self, other): + # this behaves as other - self + if isinstance(self, ProductSet) and isinstance(other, ProductSet): + # If self and other are disjoint then other - self == self + if len(self.sets) != len(other.sets): + return other + + # There can be other ways to represent this but this gives: + # (A x B) - (C x D) = ((A - C) x B) U (A x (B - D)) + overlaps = [] + pairs = list(zip(self.sets, other.sets)) + for n in range(len(pairs)): + sets = (o if i != n else o-s for i, (s, o) in enumerate(pairs)) + overlaps.append(ProductSet(*sets)) + return Union(*overlaps) + + elif isinstance(other, Interval): + if isinstance(self, (Interval, FiniteSet)): + return Intersection(other, self.complement(S.Reals)) + + elif isinstance(other, Union): + return Union(*(o - self for o in other.args)) + + elif isinstance(other, Complement): + return Complement(other.args[0], Union(other.args[1], self), evaluate=False) + + elif other is S.EmptySet: + return S.EmptySet + + elif isinstance(other, FiniteSet): + sifted = sift(other, lambda x: fuzzy_bool(self.contains(x))) + # ignore those that are contained in self + return Union(FiniteSet(*(sifted[False])), + Complement(FiniteSet(*(sifted[None])), self, evaluate=False) + if sifted[None] else S.EmptySet) + + def symmetric_difference(self, other): + """ + Returns symmetric difference of ``self`` and ``other``. + + Examples + ======== + + >>> from sympy import Interval, S + >>> Interval(1, 3).symmetric_difference(S.Reals) + Union(Interval.open(-oo, 1), Interval.open(3, oo)) + >>> Interval(1, 10).symmetric_difference(S.Reals) + Union(Interval.open(-oo, 1), Interval.open(10, oo)) + + >>> from sympy import S, EmptySet + >>> S.Reals.symmetric_difference(EmptySet) + Reals + + References + ========== + .. [1] https://en.wikipedia.org/wiki/Symmetric_difference + + """ + return SymmetricDifference(self, other) + + def _symmetric_difference(self, other): + return Union(Complement(self, other), Complement(other, self)) + + @property + def inf(self): + """ + The infimum of ``self``. + + Examples + ======== + + >>> from sympy import Interval, Union + >>> Interval(0, 1).inf + 0 + >>> Union(Interval(0, 1), Interval(2, 3)).inf + 0 + + """ + return self._inf + + @property + def _inf(self): + raise NotImplementedError("(%s)._inf" % self) + + @property + def sup(self): + """ + The supremum of ``self``. + + Examples + ======== + + >>> from sympy import Interval, Union + >>> Interval(0, 1).sup + 1 + >>> Union(Interval(0, 1), Interval(2, 3)).sup + 3 + + """ + return self._sup + + @property + def _sup(self): + raise NotImplementedError("(%s)._sup" % self) + + def contains(self, other): + """ + Returns a SymPy value indicating whether ``other`` is contained + in ``self``: ``true`` if it is, ``false`` if it is not, else + an unevaluated ``Contains`` expression (or, as in the case of + ConditionSet and a union of FiniteSet/Intervals, an expression + indicating the conditions for containment). + + Examples + ======== + + >>> from sympy import Interval, S + >>> from sympy.abc import x + + >>> Interval(0, 1).contains(0.5) + True + + As a shortcut it is possible to use the ``in`` operator, but that + will raise an error unless an affirmative true or false is not + obtained. + + >>> Interval(0, 1).contains(x) + (0 <= x) & (x <= 1) + >>> x in Interval(0, 1) + Traceback (most recent call last): + ... + TypeError: did not evaluate to a bool: None + + The result of 'in' is a bool, not a SymPy value + + >>> 1 in Interval(0, 2) + True + >>> _ is S.true + False + """ + from .contains import Contains + other = sympify(other, strict=True) + + c = self._contains(other) + if isinstance(c, Contains): + return c + if c is None: + return Contains(other, self, evaluate=False) + b = tfn[c] + if b is None: + return c + return b + + def _contains(self, other): + """Test if ``other`` is an element of the set ``self``. + + This is an internal method that is expected to be overridden by + subclasses of ``Set`` and will be called by the public + :func:`Set.contains` method or the :class:`Contains` expression. + + Parameters + ========== + + other: Sympified :class:`Basic` instance + The object whose membership in ``self`` is to be tested. + + Returns + ======= + + Symbolic :class:`Boolean` or ``None``. + + A return value of ``None`` indicates that it is unknown whether + ``other`` is contained in ``self``. Returning ``None`` from here + ensures that ``self.contains(other)`` or ``Contains(self, other)`` will + return an unevaluated :class:`Contains` expression. + + If not ``None`` then the returned value is a :class:`Boolean` that is + logically equivalent to the statement that ``other`` is an element of + ``self``. Usually this would be either ``S.true`` or ``S.false`` but + not always. + """ + raise NotImplementedError(f"{type(self).__name__}._contains") + + def is_subset(self, other): + """ + Returns True if ``self`` is a subset of ``other``. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 0.5).is_subset(Interval(0, 1)) + True + >>> Interval(0, 1).is_subset(Interval(0, 1, left_open=True)) + False + + """ + if not isinstance(other, Set): + raise ValueError("Unknown argument '%s'" % other) + + # Handle the trivial cases + if self == other: + return True + is_empty = self.is_empty + if is_empty is True: + return True + elif fuzzy_not(is_empty) and other.is_empty: + return False + if self.is_finite_set is False and other.is_finite_set: + return False + + # Dispatch on subclass rules + ret = self._eval_is_subset(other) + if ret is not None: + return ret + ret = other._eval_is_superset(self) + if ret is not None: + return ret + + # Use pairwise rules from multiple dispatch + from sympy.sets.handlers.issubset import is_subset_sets + ret = is_subset_sets(self, other) + if ret is not None: + return ret + + # Fall back on computing the intersection + # XXX: We shouldn't do this. A query like this should be handled + # without evaluating new Set objects. It should be the other way round + # so that the intersect method uses is_subset for evaluation. + if self.intersect(other) == self: + return True + + def _eval_is_subset(self, other): + '''Returns a fuzzy bool for whether self is a subset of other.''' + return None + + def _eval_is_superset(self, other): + '''Returns a fuzzy bool for whether self is a subset of other.''' + return None + + # This should be deprecated: + def issubset(self, other): + """ + Alias for :meth:`is_subset()` + """ + return self.is_subset(other) + + def is_proper_subset(self, other): + """ + Returns True if ``self`` is a proper subset of ``other``. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 0.5).is_proper_subset(Interval(0, 1)) + True + >>> Interval(0, 1).is_proper_subset(Interval(0, 1)) + False + + """ + if isinstance(other, Set): + return self != other and self.is_subset(other) + else: + raise ValueError("Unknown argument '%s'" % other) + + def is_superset(self, other): + """ + Returns True if ``self`` is a superset of ``other``. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 0.5).is_superset(Interval(0, 1)) + False + >>> Interval(0, 1).is_superset(Interval(0, 1, left_open=True)) + True + + """ + if isinstance(other, Set): + return other.is_subset(self) + else: + raise ValueError("Unknown argument '%s'" % other) + + # This should be deprecated: + def issuperset(self, other): + """ + Alias for :meth:`is_superset()` + """ + return self.is_superset(other) + + def is_proper_superset(self, other): + """ + Returns True if ``self`` is a proper superset of ``other``. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 1).is_proper_superset(Interval(0, 0.5)) + True + >>> Interval(0, 1).is_proper_superset(Interval(0, 1)) + False + + """ + if isinstance(other, Set): + return self != other and self.is_superset(other) + else: + raise ValueError("Unknown argument '%s'" % other) + + def _eval_powerset(self): + from .powerset import PowerSet + return PowerSet(self) + + def powerset(self): + """ + Find the Power set of ``self``. + + Examples + ======== + + >>> from sympy import EmptySet, FiniteSet, Interval + + A power set of an empty set: + + >>> A = EmptySet + >>> A.powerset() + {EmptySet} + + A power set of a finite set: + + >>> A = FiniteSet(1, 2) + >>> a, b, c = FiniteSet(1), FiniteSet(2), FiniteSet(1, 2) + >>> A.powerset() == FiniteSet(a, b, c, EmptySet) + True + + A power set of an interval: + + >>> Interval(1, 2).powerset() + PowerSet(Interval(1, 2)) + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Power_set + + """ + return self._eval_powerset() + + @property + def measure(self): + """ + The (Lebesgue) measure of ``self``. + + Examples + ======== + + >>> from sympy import Interval, Union + >>> Interval(0, 1).measure + 1 + >>> Union(Interval(0, 1), Interval(2, 3)).measure + 2 + + """ + return self._measure + + @property + def kind(self): + """ + The kind of a Set + + Explanation + =========== + + Any :class:`Set` will have kind :class:`SetKind` which is + parametrised by the kind of the elements of the set. For example + most sets are sets of numbers and will have kind + ``SetKind(NumberKind)``. If elements of sets are different in kind than + their kind will ``SetKind(UndefinedKind)``. See + :class:`sympy.core.kind.Kind` for an explanation of the kind system. + + Examples + ======== + + >>> from sympy import Interval, Matrix, FiniteSet, EmptySet, ProductSet, PowerSet + + >>> FiniteSet(Matrix([1, 2])).kind + SetKind(MatrixKind(NumberKind)) + + >>> Interval(1, 2).kind + SetKind(NumberKind) + + >>> EmptySet.kind + SetKind() + + A :class:`sympy.sets.powerset.PowerSet` is a set of sets: + + >>> PowerSet({1, 2, 3}).kind + SetKind(SetKind(NumberKind)) + + A :class:`ProductSet` represents the set of tuples of elements of + other sets. Its kind is :class:`sympy.core.containers.TupleKind` + parametrised by the kinds of the elements of those sets: + + >>> p = ProductSet(FiniteSet(1, 2), FiniteSet(3, 4)) + >>> list(p) + [(1, 3), (2, 3), (1, 4), (2, 4)] + >>> p.kind + SetKind(TupleKind(NumberKind, NumberKind)) + + When all elements of the set do not have same kind, the kind + will be returned as ``SetKind(UndefinedKind)``: + + >>> FiniteSet(0, Matrix([1, 2])).kind + SetKind(UndefinedKind) + + The kind of the elements of a set are given by the ``element_kind`` + attribute of ``SetKind``: + + >>> Interval(1, 2).kind.element_kind + NumberKind + + See Also + ======== + + NumberKind + sympy.core.kind.UndefinedKind + sympy.core.containers.TupleKind + MatrixKind + sympy.matrices.expressions.sets.MatrixSet + sympy.sets.conditionset.ConditionSet + Rationals + Naturals + Integers + sympy.sets.fancysets.ImageSet + sympy.sets.fancysets.Range + sympy.sets.fancysets.ComplexRegion + sympy.sets.powerset.PowerSet + sympy.sets.sets.ProductSet + sympy.sets.sets.Interval + sympy.sets.sets.Union + sympy.sets.sets.Intersection + sympy.sets.sets.Complement + sympy.sets.sets.EmptySet + sympy.sets.sets.UniversalSet + sympy.sets.sets.FiniteSet + sympy.sets.sets.SymmetricDifference + sympy.sets.sets.DisjointUnion + """ + return self._kind() + + @property + def boundary(self): + """ + The boundary or frontier of a set. + + Explanation + =========== + + A point x is on the boundary of a set S if + + 1. x is in the closure of S. + I.e. Every neighborhood of x contains a point in S. + 2. x is not in the interior of S. + I.e. There does not exist an open set centered on x contained + entirely within S. + + There are the points on the outer rim of S. If S is open then these + points need not actually be contained within S. + + For example, the boundary of an interval is its start and end points. + This is true regardless of whether or not the interval is open. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 1).boundary + {0, 1} + >>> Interval(0, 1, True, False).boundary + {0, 1} + """ + return self._boundary + + @property + def is_open(self): + """ + Property method to check whether a set is open. + + Explanation + =========== + + A set is open if and only if it has an empty intersection with its + boundary. In particular, a subset A of the reals is open if and only + if each one of its points is contained in an open interval that is a + subset of A. + + Examples + ======== + >>> from sympy import S + >>> S.Reals.is_open + True + >>> S.Rationals.is_open + False + """ + return Intersection(self, self.boundary).is_empty + + @property + def is_closed(self): + """ + A property method to check whether a set is closed. + + Explanation + =========== + + A set is closed if its complement is an open set. The closedness of a + subset of the reals is determined with respect to R and its standard + topology. + + Examples + ======== + >>> from sympy import Interval + >>> Interval(0, 1).is_closed + True + """ + return self.boundary.is_subset(self) + + @property + def closure(self): + """ + Property method which returns the closure of a set. + The closure is defined as the union of the set itself and its + boundary. + + Examples + ======== + >>> from sympy import S, Interval + >>> S.Reals.closure + Reals + >>> Interval(0, 1).closure + Interval(0, 1) + """ + return self + self.boundary + + @property + def interior(self): + """ + Property method which returns the interior of a set. + The interior of a set S consists all points of S that do not + belong to the boundary of S. + + Examples + ======== + >>> from sympy import Interval + >>> Interval(0, 1).interior + Interval.open(0, 1) + >>> Interval(0, 1).boundary.interior + EmptySet + """ + return self - self.boundary + + @property + def _boundary(self): + raise NotImplementedError() + + @property + def _measure(self): + raise NotImplementedError("(%s)._measure" % self) + + def _kind(self): + return SetKind(UndefinedKind) + + def _eval_evalf(self, prec): + dps = prec_to_dps(prec) + return self.func(*[arg.evalf(n=dps) for arg in self.args]) + + @sympify_return([('other', 'Set')], NotImplemented) + def __add__(self, other): + return self.union(other) + + @sympify_return([('other', 'Set')], NotImplemented) + def __or__(self, other): + return self.union(other) + + @sympify_return([('other', 'Set')], NotImplemented) + def __and__(self, other): + return self.intersect(other) + + @sympify_return([('other', 'Set')], NotImplemented) + def __mul__(self, other): + return ProductSet(self, other) + + @sympify_return([('other', 'Set')], NotImplemented) + def __xor__(self, other): + return SymmetricDifference(self, other) + + @sympify_return([('exp', Expr)], NotImplemented) + def __pow__(self, exp): + if not (exp.is_Integer and exp >= 0): + raise ValueError("%s: Exponent must be a positive Integer" % exp) + return ProductSet(*[self]*exp) + + @sympify_return([('other', 'Set')], NotImplemented) + def __sub__(self, other): + return Complement(self, other) + + def __contains__(self, other): + other = _sympify(other) + c = self._contains(other) + b = tfn[c] + if b is None: + # x in y must evaluate to T or F; to entertain a None + # result with Set use y.contains(x) + raise TypeError('did not evaluate to a bool: %r' % c) + return b + + +class ProductSet(Set): + """ + Represents a Cartesian Product of Sets. + + Explanation + =========== + + Returns a Cartesian product given several sets as either an iterable + or individual arguments. + + Can use ``*`` operator on any sets for convenient shorthand. + + Examples + ======== + + >>> from sympy import Interval, FiniteSet, ProductSet + >>> I = Interval(0, 5); S = FiniteSet(1, 2, 3) + >>> ProductSet(I, S) + ProductSet(Interval(0, 5), {1, 2, 3}) + + >>> (2, 2) in ProductSet(I, S) + True + + >>> Interval(0, 1) * Interval(0, 1) # The unit square + ProductSet(Interval(0, 1), Interval(0, 1)) + + >>> coin = FiniteSet('H', 'T') + >>> set(coin**2) + {(H, H), (H, T), (T, H), (T, T)} + + The Cartesian product is not commutative or associative e.g.: + + >>> I*S == S*I + False + >>> (I*I)*I == I*(I*I) + False + + Notes + ===== + + - Passes most operations down to the argument sets + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Cartesian_product + """ + is_ProductSet = True + + def __new__(cls, *sets, **assumptions): + if len(sets) == 1 and iterable(sets[0]) and not isinstance(sets[0], (Set, set)): + sympy_deprecation_warning( + """ +ProductSet(iterable) is deprecated. Use ProductSet(*iterable) instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-productset-iterable", + ) + sets = tuple(sets[0]) + + sets = [sympify(s) for s in sets] + + if not all(isinstance(s, Set) for s in sets): + raise TypeError("Arguments to ProductSet should be of type Set") + + # Nullary product of sets is *not* the empty set + if len(sets) == 0: + return FiniteSet(()) + + if S.EmptySet in sets: + return S.EmptySet + + return Basic.__new__(cls, *sets, **assumptions) + + @property + def sets(self): + return self.args + + def flatten(self): + def _flatten(sets): + for s in sets: + if s.is_ProductSet: + yield from _flatten(s.sets) + else: + yield s + return ProductSet(*_flatten(self.sets)) + + + + def _contains(self, element): + """ + ``in`` operator for ProductSets. + + Examples + ======== + + >>> from sympy import Interval + >>> (2, 3) in Interval(0, 5) * Interval(0, 5) + True + + >>> (10, 10) in Interval(0, 5) * Interval(0, 5) + False + + Passes operation on to constituent sets + """ + if element.is_Symbol: + return None + + if not isinstance(element, Tuple) or len(element) != len(self.sets): + return S.false + + return And(*[s.contains(e) for s, e in zip(self.sets, element)]) + + def as_relational(self, *symbols): + symbols = [_sympify(s) for s in symbols] + if len(symbols) != len(self.sets) or not all( + i.is_Symbol for i in symbols): + raise ValueError( + 'number of symbols must match the number of sets') + return And(*[s.as_relational(i) for s, i in zip(self.sets, symbols)]) + + @property + def _boundary(self): + return Union(*(ProductSet(*(b + b.boundary if i != j else b.boundary + for j, b in enumerate(self.sets))) + for i, a in enumerate(self.sets))) + + @property + def is_iterable(self): + """ + A property method which tests whether a set is iterable or not. + Returns True if set is iterable, otherwise returns False. + + Examples + ======== + + >>> from sympy import FiniteSet, Interval + >>> I = Interval(0, 1) + >>> A = FiniteSet(1, 2, 3, 4, 5) + >>> I.is_iterable + False + >>> A.is_iterable + True + + """ + return all(set.is_iterable for set in self.sets) + + def __iter__(self): + """ + A method which implements is_iterable property method. + If self.is_iterable returns True (both constituent sets are iterable), + then return the Cartesian Product. Otherwise, raise TypeError. + """ + return iproduct(*self.sets) + + @property + def is_empty(self): + return fuzzy_or(s.is_empty for s in self.sets) + + @property + def is_finite_set(self): + all_finite = fuzzy_and(s.is_finite_set for s in self.sets) + return fuzzy_or([self.is_empty, all_finite]) + + @property + def _measure(self): + measure = 1 + for s in self.sets: + measure *= s.measure + return measure + + def _kind(self): + return SetKind(TupleKind(*(i.kind.element_kind for i in self.args))) + + def __len__(self): + return reduce(lambda a, b: a*b, (len(s) for s in self.args)) + + def __bool__(self): + return all(self.sets) + + +class Interval(Set): + """ + Represents a real interval as a Set. + + Usage: + Returns an interval with end points ``start`` and ``end``. + + For ``left_open=True`` (default ``left_open`` is ``False``) the interval + will be open on the left. Similarly, for ``right_open=True`` the interval + will be open on the right. + + Examples + ======== + + >>> from sympy import Symbol, Interval + >>> Interval(0, 1) + Interval(0, 1) + >>> Interval.Ropen(0, 1) + Interval.Ropen(0, 1) + >>> Interval.Ropen(0, 1) + Interval.Ropen(0, 1) + >>> Interval.Lopen(0, 1) + Interval.Lopen(0, 1) + >>> Interval.open(0, 1) + Interval.open(0, 1) + + >>> a = Symbol('a', real=True) + >>> Interval(0, a) + Interval(0, a) + + Notes + ===== + - Only real end points are supported + - ``Interval(a, b)`` with $a > b$ will return the empty set + - Use the ``evalf()`` method to turn an Interval into an mpmath + ``mpi`` interval instance + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Interval_%28mathematics%29 + """ + is_Interval = True + + def __new__(cls, start, end, left_open=False, right_open=False): + + start = _sympify(start) + end = _sympify(end) + left_open = _sympify(left_open) + right_open = _sympify(right_open) + + if not all(isinstance(a, (type(true), type(false))) + for a in [left_open, right_open]): + raise NotImplementedError( + "left_open and right_open can have only true/false values, " + "got %s and %s" % (left_open, right_open)) + + # Only allow real intervals + if fuzzy_not(fuzzy_and(i.is_extended_real for i in (start, end, end-start))): + raise ValueError("Non-real intervals are not supported") + + # evaluate if possible + if is_lt(end, start): + return S.EmptySet + elif (end - start).is_negative: + return S.EmptySet + + if end == start and (left_open or right_open): + return S.EmptySet + if end == start and not (left_open or right_open): + if start is S.Infinity or start is S.NegativeInfinity: + return S.EmptySet + return FiniteSet(end) + + # Make sure infinite interval end points are open. + if start is S.NegativeInfinity: + left_open = true + if end is S.Infinity: + right_open = true + if start == S.Infinity or end == S.NegativeInfinity: + return S.EmptySet + + return Basic.__new__(cls, start, end, left_open, right_open) + + @property + def start(self): + """ + The left end point of the interval. + + This property takes the same value as the ``inf`` property. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 1).start + 0 + + """ + return self._args[0] + + @property + def end(self): + """ + The right end point of the interval. + + This property takes the same value as the ``sup`` property. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 1).end + 1 + + """ + return self._args[1] + + @property + def left_open(self): + """ + True if interval is left-open. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 1, left_open=True).left_open + True + >>> Interval(0, 1, left_open=False).left_open + False + + """ + return self._args[2] + + @property + def right_open(self): + """ + True if interval is right-open. + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(0, 1, right_open=True).right_open + True + >>> Interval(0, 1, right_open=False).right_open + False + + """ + return self._args[3] + + @classmethod + def open(cls, a, b): + """Return an interval including neither boundary.""" + return cls(a, b, True, True) + + @classmethod + def Lopen(cls, a, b): + """Return an interval not including the left boundary.""" + return cls(a, b, True, False) + + @classmethod + def Ropen(cls, a, b): + """Return an interval not including the right boundary.""" + return cls(a, b, False, True) + + @property + def _inf(self): + return self.start + + @property + def _sup(self): + return self.end + + @property + def left(self): + return self.start + + @property + def right(self): + return self.end + + @property + def is_empty(self): + if self.left_open or self.right_open: + cond = self.start >= self.end # One/both bounds open + else: + cond = self.start > self.end # Both bounds closed + return fuzzy_bool(cond) + + @property + def is_finite_set(self): + return self.measure.is_zero + + def _complement(self, other): + if other == S.Reals: + a = Interval(S.NegativeInfinity, self.start, + True, not self.left_open) + b = Interval(self.end, S.Infinity, not self.right_open, True) + return Union(a, b) + + if isinstance(other, FiniteSet): + nums = [m for m in other.args if m.is_number] + if nums == []: + return None + + return Set._complement(self, other) + + @property + def _boundary(self): + finite_points = [p for p in (self.start, self.end) + if abs(p) != S.Infinity] + return FiniteSet(*finite_points) + + def _contains(self, other): + if (not isinstance(other, Expr) or other is S.NaN + or other.is_real is False or other.has(S.ComplexInfinity)): + # if an expression has zoo it will be zoo or nan + # and neither of those is real + return false + + if self.start is S.NegativeInfinity and self.end is S.Infinity: + if other.is_real is not None: + return tfn[other.is_real] + + d = Dummy() + return self.as_relational(d).subs(d, other) + + def as_relational(self, x): + """Rewrite an interval in terms of inequalities and logic operators.""" + x = sympify(x) + if self.right_open: + right = x < self.end + else: + right = x <= self.end + if self.left_open: + left = self.start < x + else: + left = self.start <= x + return And(left, right) + + @property + def _measure(self): + return self.end - self.start + + def _kind(self): + return SetKind(NumberKind) + + def to_mpi(self, prec=53): + return mpi(mpf(self.start._eval_evalf(prec)), + mpf(self.end._eval_evalf(prec))) + + def _eval_evalf(self, prec): + return Interval(self.left._evalf(prec), self.right._evalf(prec), + left_open=self.left_open, right_open=self.right_open) + + def _is_comparable(self, other): + is_comparable = self.start.is_comparable + is_comparable &= self.end.is_comparable + is_comparable &= other.start.is_comparable + is_comparable &= other.end.is_comparable + + return is_comparable + + @property + def is_left_unbounded(self): + """Return ``True`` if the left endpoint is negative infinity. """ + return self.left is S.NegativeInfinity or self.left == Float("-inf") + + @property + def is_right_unbounded(self): + """Return ``True`` if the right endpoint is positive infinity. """ + return self.right is S.Infinity or self.right == Float("+inf") + + def _eval_Eq(self, other): + if not isinstance(other, Interval): + if isinstance(other, FiniteSet): + return false + elif isinstance(other, Set): + return None + return false + + +class Union(Set, LatticeOp): + """ + Represents a union of sets as a :class:`Set`. + + Examples + ======== + + >>> from sympy import Union, Interval + >>> Union(Interval(1, 2), Interval(3, 4)) + Union(Interval(1, 2), Interval(3, 4)) + + The Union constructor will always try to merge overlapping intervals, + if possible. For example: + + >>> Union(Interval(1, 2), Interval(2, 3)) + Interval(1, 3) + + See Also + ======== + + Intersection + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Union_%28set_theory%29 + """ + is_Union = True + + @property + def identity(self): + return S.EmptySet + + @property + def zero(self): + return S.UniversalSet + + def __new__(cls, *args, **kwargs): + evaluate = kwargs.get('evaluate', global_parameters.evaluate) + + # flatten inputs to merge intersections and iterables + args = _sympify(args) + + # Reduce sets using known rules + if evaluate: + args = list(cls._new_args_filter(args)) + return simplify_union(args) + + args = list(ordered(args, Set._infimum_key)) + + obj = Basic.__new__(cls, *args) + obj._argset = frozenset(args) + return obj + + @property + def args(self): + return self._args + + def _complement(self, universe): + # DeMorgan's Law + return Intersection(s.complement(universe) for s in self.args) + + @property + def _inf(self): + # We use Min so that sup is meaningful in combination with symbolic + # interval end points. + return Min(*[set.inf for set in self.args]) + + @property + def _sup(self): + # We use Max so that sup is meaningful in combination with symbolic + # end points. + return Max(*[set.sup for set in self.args]) + + @property + def is_empty(self): + return fuzzy_and(set.is_empty for set in self.args) + + @property + def is_finite_set(self): + return fuzzy_and(set.is_finite_set for set in self.args) + + @property + def _measure(self): + # Measure of a union is the sum of the measures of the sets minus + # the sum of their pairwise intersections plus the sum of their + # triple-wise intersections minus ... etc... + + # Sets is a collection of intersections and a set of elementary + # sets which made up those intersections (called "sos" for set of sets) + # An example element might of this list might be: + # ( {A,B,C}, A.intersect(B).intersect(C) ) + + # Start with just elementary sets ( ({A}, A), ({B}, B), ... ) + # Then get and subtract ( ({A,B}, (A int B), ... ) while non-zero + sets = [(FiniteSet(s), s) for s in self.args] + measure = 0 + parity = 1 + while sets: + # Add up the measure of these sets and add or subtract it to total + measure += parity * sum(inter.measure for sos, inter in sets) + + # For each intersection in sets, compute the intersection with every + # other set not already part of the intersection. + sets = ((sos + FiniteSet(newset), newset.intersect(intersection)) + for sos, intersection in sets for newset in self.args + if newset not in sos) + + # Clear out sets with no measure + sets = [(sos, inter) for sos, inter in sets if inter.measure != 0] + + # Clear out duplicates + sos_list = [] + sets_list = [] + for _set in sets: + if _set[0] in sos_list: + continue + else: + sos_list.append(_set[0]) + sets_list.append(_set) + sets = sets_list + + # Flip Parity - next time subtract/add if we added/subtracted here + parity *= -1 + return measure + + def _kind(self): + kinds = tuple(arg.kind for arg in self.args if arg is not S.EmptySet) + if not kinds: + return SetKind() + elif all(i == kinds[0] for i in kinds): + return kinds[0] + else: + return SetKind(UndefinedKind) + + @property + def _boundary(self): + def boundary_of_set(i): + """ The boundary of set i minus interior of all other sets """ + b = self.args[i].boundary + for j, a in enumerate(self.args): + if j != i: + b = b - a.interior + return b + return Union(*map(boundary_of_set, range(len(self.args)))) + + def _contains(self, other): + return Or(*[s.contains(other) for s in self.args]) + + def is_subset(self, other): + return fuzzy_and(s.is_subset(other) for s in self.args) + + def as_relational(self, symbol): + """Rewrite a Union in terms of equalities and logic operators. """ + if (len(self.args) == 2 and + all(isinstance(i, Interval) for i in self.args)): + # optimization to give 3 args as (x > 1) & (x < 5) & Ne(x, 3) + # instead of as 4, ((1 <= x) & (x < 3)) | ((x <= 5) & (3 < x)) + # XXX: This should be ideally be improved to handle any number of + # intervals and also not to assume that the intervals are in any + # particular sorted order. + a, b = self.args + if a.sup == b.inf and a.right_open and b.left_open: + mincond = symbol > a.inf if a.left_open else symbol >= a.inf + maxcond = symbol < b.sup if b.right_open else symbol <= b.sup + necond = Ne(symbol, a.sup) + return And(necond, mincond, maxcond) + return Or(*[i.as_relational(symbol) for i in self.args]) + + @property + def is_iterable(self): + return all(arg.is_iterable for arg in self.args) + + def __iter__(self): + return roundrobin(*(iter(arg) for arg in self.args)) + + +class Intersection(Set, LatticeOp): + """ + Represents an intersection of sets as a :class:`Set`. + + Examples + ======== + + >>> from sympy import Intersection, Interval + >>> Intersection(Interval(1, 3), Interval(2, 4)) + Interval(2, 3) + + We often use the .intersect method + + >>> Interval(1,3).intersect(Interval(2,4)) + Interval(2, 3) + + See Also + ======== + + Union + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Intersection_%28set_theory%29 + """ + is_Intersection = True + + @property + def identity(self): + return S.UniversalSet + + @property + def zero(self): + return S.EmptySet + + def __new__(cls, *args , evaluate=None): + if evaluate is None: + evaluate = global_parameters.evaluate + + # flatten inputs to merge intersections and iterables + args = list(ordered(set(_sympify(args)))) + + # Reduce sets using known rules + if evaluate: + args = list(cls._new_args_filter(args)) + return simplify_intersection(args) + + args = list(ordered(args, Set._infimum_key)) + + obj = Basic.__new__(cls, *args) + obj._argset = frozenset(args) + return obj + + @property + def args(self): + return self._args + + @property + def is_iterable(self): + return any(arg.is_iterable for arg in self.args) + + @property + def is_finite_set(self): + if fuzzy_or(arg.is_finite_set for arg in self.args): + return True + + def _kind(self): + kinds = tuple(arg.kind for arg in self.args if arg is not S.UniversalSet) + if not kinds: + return SetKind(UndefinedKind) + elif all(i == kinds[0] for i in kinds): + return kinds[0] + else: + return SetKind() + + @property + def _inf(self): + raise NotImplementedError() + + @property + def _sup(self): + raise NotImplementedError() + + def _contains(self, other): + return And(*[set.contains(other) for set in self.args]) + + def __iter__(self): + sets_sift = sift(self.args, lambda x: x.is_iterable) + + completed = False + candidates = sets_sift[True] + sets_sift[None] + + finite_candidates, others = [], [] + for candidate in candidates: + length = None + try: + length = len(candidate) + except TypeError: + others.append(candidate) + + if length is not None: + finite_candidates.append(candidate) + finite_candidates.sort(key=len) + + for s in finite_candidates + others: + other_sets = set(self.args) - {s} + other = Intersection(*other_sets, evaluate=False) + completed = True + for x in s: + try: + if x in other: + yield x + except TypeError: + completed = False + if completed: + return + + if not completed: + if not candidates: + raise TypeError("None of the constituent sets are iterable") + raise TypeError( + "The computation had not completed because of the " + "undecidable set membership is found in every candidates.") + + @staticmethod + def _handle_finite_sets(args): + '''Simplify intersection of one or more FiniteSets and other sets''' + + # First separate the FiniteSets from the others + fs_args, others = sift(args, lambda x: x.is_FiniteSet, binary=True) + + # Let the caller handle intersection of non-FiniteSets + if not fs_args: + return + + # Convert to Python sets and build the set of all elements + fs_sets = [set(fs) for fs in fs_args] + all_elements = reduce(lambda a, b: a | b, fs_sets, set()) + + # Extract elements that are definitely in or definitely not in the + # intersection. Here we check contains for all of args. + definite = set() + for e in all_elements: + inall = fuzzy_and(s.contains(e) for s in args) + if inall is True: + definite.add(e) + if inall is not None: + for s in fs_sets: + s.discard(e) + + # At this point all elements in all of fs_sets are possibly in the + # intersection. In some cases this is because they are definitely in + # the intersection of the finite sets but it's not clear if they are + # members of others. We might have {m, n}, {m}, and Reals where we + # don't know if m or n is real. We want to remove n here but it is + # possibly in because it might be equal to m. So what we do now is + # extract the elements that are definitely in the remaining finite + # sets iteratively until we end up with {n}, {}. At that point if we + # get any empty set all remaining elements are discarded. + + fs_elements = reduce(lambda a, b: a | b, fs_sets, set()) + + # Need fuzzy containment testing + fs_symsets = [FiniteSet(*s) for s in fs_sets] + + while fs_elements: + for e in fs_elements: + infs = fuzzy_and(s.contains(e) for s in fs_symsets) + if infs is True: + definite.add(e) + if infs is not None: + for n, s in enumerate(fs_sets): + # Update Python set and FiniteSet + if e in s: + s.remove(e) + fs_symsets[n] = FiniteSet(*s) + fs_elements.remove(e) + break + # If we completed the for loop without removing anything we are + # done so quit the outer while loop + else: + break + + # If any of the sets of remainder elements is empty then we discard + # all of them for the intersection. + if not all(fs_sets): + fs_sets = [set()] + + # Here we fold back the definitely included elements into each fs. + # Since they are definitely included they must have been members of + # each FiniteSet to begin with. We could instead fold these in with a + # Union at the end to get e.g. {3}|({x}&{y}) rather than {3,x}&{3,y}. + if definite: + fs_sets = [fs | definite for fs in fs_sets] + + if fs_sets == [set()]: + return S.EmptySet + + sets = [FiniteSet(*s) for s in fs_sets] + + # Any set in others is redundant if it contains all the elements that + # are in the finite sets so we don't need it in the Intersection + all_elements = reduce(lambda a, b: a | b, fs_sets, set()) + is_redundant = lambda o: all(fuzzy_bool(o.contains(e)) for e in all_elements) + others = [o for o in others if not is_redundant(o)] + + if others: + rest = Intersection(*others) + # XXX: Maybe this shortcut should be at the beginning. For large + # FiniteSets it could much more efficient to process the other + # sets first... + if rest is S.EmptySet: + return S.EmptySet + # Flatten the Intersection + if rest.is_Intersection: + sets.extend(rest.args) + else: + sets.append(rest) + + if len(sets) == 1: + return sets[0] + else: + return Intersection(*sets, evaluate=False) + + def as_relational(self, symbol): + """Rewrite an Intersection in terms of equalities and logic operators""" + return And(*[set.as_relational(symbol) for set in self.args]) + + +class Complement(Set): + r"""Represents the set difference or relative complement of a set with + another set. + + $$A - B = \{x \in A \mid x \notin B\}$$ + + + Examples + ======== + + >>> from sympy import Complement, FiniteSet + >>> Complement(FiniteSet(0, 1, 2), FiniteSet(1)) + {0, 2} + + See Also + ========= + + Intersection, Union + + References + ========== + + .. [1] https://mathworld.wolfram.com/ComplementSet.html + """ + + is_Complement = True + + def __new__(cls, a, b, evaluate=True): + a, b = map(_sympify, (a, b)) + if evaluate: + return Complement.reduce(a, b) + + return Basic.__new__(cls, a, b) + + @staticmethod + def reduce(A, B): + """ + Simplify a :class:`Complement`. + + """ + if B == S.UniversalSet or A.is_subset(B): + return S.EmptySet + + if isinstance(B, Union): + return Intersection(*(s.complement(A) for s in B.args)) + + result = B._complement(A) + if result is not None: + return result + else: + return Complement(A, B, evaluate=False) + + def _contains(self, other): + A = self.args[0] + B = self.args[1] + return And(A.contains(other), Not(B.contains(other))) + + def as_relational(self, symbol): + """Rewrite a complement in terms of equalities and logic + operators""" + A, B = self.args + + A_rel = A.as_relational(symbol) + B_rel = Not(B.as_relational(symbol)) + + return And(A_rel, B_rel) + + def _kind(self): + return self.args[0].kind + + @property + def is_iterable(self): + if self.args[0].is_iterable: + return True + + @property + def is_finite_set(self): + A, B = self.args + a_finite = A.is_finite_set + if a_finite is True: + return True + elif a_finite is False and B.is_finite_set: + return False + + def __iter__(self): + A, B = self.args + for a in A: + if a not in B: + yield a + else: + continue + + +class EmptySet(Set, metaclass=Singleton): + """ + Represents the empty set. The empty set is available as a singleton + as ``S.EmptySet``. + + Examples + ======== + + >>> from sympy import S, Interval + >>> S.EmptySet + EmptySet + + >>> Interval(1, 2).intersect(S.EmptySet) + EmptySet + + See Also + ======== + + UniversalSet + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Empty_set + """ + is_empty = True + is_finite_set = True + is_FiniteSet = True + + @property # type: ignore + @deprecated( + """ + The is_EmptySet attribute of Set objects is deprecated. + Use 's is S.EmptySet" or 's.is_empty' instead. + """, + deprecated_since_version="1.5", + active_deprecations_target="deprecated-is-emptyset", + ) + def is_EmptySet(self): + return True + + @property + def _measure(self): + return 0 + + def _contains(self, other): + return false + + def as_relational(self, symbol): + return false + + def __len__(self): + return 0 + + def __iter__(self): + return iter([]) + + def _eval_powerset(self): + return FiniteSet(self) + + @property + def _boundary(self): + return self + + def _complement(self, other): + return other + + def _kind(self): + return SetKind() + + def _symmetric_difference(self, other): + return other + + +class UniversalSet(Set, metaclass=Singleton): + """ + Represents the set of all things. + The universal set is available as a singleton as ``S.UniversalSet``. + + Examples + ======== + + >>> from sympy import S, Interval + >>> S.UniversalSet + UniversalSet + + >>> Interval(1, 2).intersect(S.UniversalSet) + Interval(1, 2) + + See Also + ======== + + EmptySet + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Universal_set + """ + + is_UniversalSet = True + is_empty = False + is_finite_set = False + + def _complement(self, other): + return S.EmptySet + + def _symmetric_difference(self, other): + return other + + @property + def _measure(self): + return S.Infinity + + def _kind(self): + return SetKind(UndefinedKind) + + def _contains(self, other): + return true + + def as_relational(self, symbol): + return true + + @property + def _boundary(self): + return S.EmptySet + + +class FiniteSet(Set): + """ + Represents a finite set of Sympy expressions. + + Examples + ======== + + >>> from sympy import FiniteSet, Symbol, Interval, Naturals0 + >>> FiniteSet(1, 2, 3, 4) + {1, 2, 3, 4} + >>> 3 in FiniteSet(1, 2, 3, 4) + True + >>> FiniteSet(1, (1, 2), Symbol('x')) + {1, x, (1, 2)} + >>> FiniteSet(Interval(1, 2), Naturals0, {1, 2}) + FiniteSet({1, 2}, Interval(1, 2), Naturals0) + >>> members = [1, 2, 3, 4] + >>> f = FiniteSet(*members) + >>> f + {1, 2, 3, 4} + >>> f - FiniteSet(2) + {1, 3, 4} + >>> f + FiniteSet(2, 5) + {1, 2, 3, 4, 5} + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Finite_set + """ + is_FiniteSet = True + is_iterable = True + is_empty = False + is_finite_set = True + + def __new__(cls, *args, **kwargs): + evaluate = kwargs.get('evaluate', global_parameters.evaluate) + if evaluate: + args = list(map(sympify, args)) + + if len(args) == 0: + return S.EmptySet + else: + args = list(map(sympify, args)) + + # keep the form of the first canonical arg + dargs = {} + for i in reversed(list(ordered(args))): + if i.is_Symbol: + dargs[i] = i + else: + try: + dargs[i.as_dummy()] = i + except TypeError: + # e.g. i = class without args like `Interval` + dargs[i] = i + _args_set = set(dargs.values()) + args = list(ordered(_args_set, Set._infimum_key)) + obj = Basic.__new__(cls, *args) + obj._args_set = _args_set + return obj + + + def __iter__(self): + return iter(self.args) + + def _complement(self, other): + if isinstance(other, Interval): + # Splitting in sub-intervals is only done for S.Reals; + # other cases that need splitting will first pass through + # Set._complement(). + nums, syms = [], [] + for m in self.args: + if m.is_number and m.is_real: + nums.append(m) + elif m.is_real == False: + pass # drop non-reals + else: + syms.append(m) # various symbolic expressions + if other == S.Reals and nums != []: + nums.sort() + intervals = [] # Build up a list of intervals between the elements + intervals += [Interval(S.NegativeInfinity, nums[0], True, True)] + for a, b in zip(nums[:-1], nums[1:]): + intervals.append(Interval(a, b, True, True)) # both open + intervals.append(Interval(nums[-1], S.Infinity, True, True)) + if syms != []: + return Complement(Union(*intervals, evaluate=False), + FiniteSet(*syms), evaluate=False) + else: + return Union(*intervals, evaluate=False) + elif nums == []: # no splitting necessary or possible: + if syms: + return Complement(other, FiniteSet(*syms), evaluate=False) + else: + return other + + elif isinstance(other, FiniteSet): + unk = [] + for i in self: + c = sympify(other.contains(i)) + if c is not S.true and c is not S.false: + unk.append(i) + unk = FiniteSet(*unk) + if unk == self: + return + not_true = [] + for i in other: + c = sympify(self.contains(i)) + if c is not S.true: + not_true.append(i) + return Complement(FiniteSet(*not_true), unk) + + return Set._complement(self, other) + + def _contains(self, other): + """ + Tests whether an element, other, is in the set. + + Explanation + =========== + + The actual test is for mathematical equality (as opposed to + syntactical equality). In the worst case all elements of the + set must be checked. + + Examples + ======== + + >>> from sympy import FiniteSet + >>> 1 in FiniteSet(1, 2) + True + >>> 5 in FiniteSet(1, 2) + False + + """ + if other in self._args_set: + return S.true + else: + # evaluate=True is needed to override evaluate=False context; + # we need Eq to do the evaluation + return Or(*[Eq(e, other, evaluate=True) for e in self.args]) + + def _eval_is_subset(self, other): + return fuzzy_and(other._contains(e) for e in self.args) + + @property + def _boundary(self): + return self + + @property + def _inf(self): + return Min(*self) + + @property + def _sup(self): + return Max(*self) + + @property + def measure(self): + return 0 + + def _kind(self): + if not self.args: + return SetKind() + elif all(i.kind == self.args[0].kind for i in self.args): + return SetKind(self.args[0].kind) + else: + return SetKind(UndefinedKind) + + def __len__(self): + return len(self.args) + + def as_relational(self, symbol): + """Rewrite a FiniteSet in terms of equalities and logic operators. """ + return Or(*[Eq(symbol, elem) for elem in self]) + + def compare(self, other): + return (hash(self) - hash(other)) + + def _eval_evalf(self, prec): + dps = prec_to_dps(prec) + return FiniteSet(*[elem.evalf(n=dps) for elem in self]) + + def _eval_simplify(self, **kwargs): + from sympy.simplify import simplify + return FiniteSet(*[simplify(elem, **kwargs) for elem in self]) + + @property + def _sorted_args(self): + return self.args + + def _eval_powerset(self): + return self.func(*[self.func(*s) for s in subsets(self.args)]) + + def _eval_rewrite_as_PowerSet(self, *args, **kwargs): + """Rewriting method for a finite set to a power set.""" + from .powerset import PowerSet + + is2pow = lambda n: bool(n and not n & (n - 1)) + if not is2pow(len(self)): + return None + + fs_test = lambda arg: isinstance(arg, Set) and arg.is_FiniteSet + if not all(fs_test(arg) for arg in args): + return None + + biggest = max(args, key=len) + for arg in subsets(biggest.args): + arg_set = FiniteSet(*arg) + if arg_set not in args: + return None + return PowerSet(biggest) + + def __ge__(self, other): + if not isinstance(other, Set): + raise TypeError("Invalid comparison of set with %s" % func_name(other)) + return other.is_subset(self) + + def __gt__(self, other): + if not isinstance(other, Set): + raise TypeError("Invalid comparison of set with %s" % func_name(other)) + return self.is_proper_superset(other) + + def __le__(self, other): + if not isinstance(other, Set): + raise TypeError("Invalid comparison of set with %s" % func_name(other)) + return self.is_subset(other) + + def __lt__(self, other): + if not isinstance(other, Set): + raise TypeError("Invalid comparison of set with %s" % func_name(other)) + return self.is_proper_subset(other) + + def __eq__(self, other): + if isinstance(other, (set, frozenset)): + return self._args_set == other + return super().__eq__(other) + + __hash__ : Callable[[Basic], Any] = Basic.__hash__ + +_sympy_converter[set] = lambda x: FiniteSet(*x) +_sympy_converter[frozenset] = lambda x: FiniteSet(*x) + + +class SymmetricDifference(Set): + """Represents the set of elements which are in either of the + sets and not in their intersection. + + Examples + ======== + + >>> from sympy import SymmetricDifference, FiniteSet + >>> SymmetricDifference(FiniteSet(1, 2, 3), FiniteSet(3, 4, 5)) + {1, 2, 4, 5} + + See Also + ======== + + Complement, Union + + References + ========== + + .. [1] https://en.wikipedia.org/wiki/Symmetric_difference + """ + + is_SymmetricDifference = True + + def __new__(cls, a, b, evaluate=True): + if evaluate: + return SymmetricDifference.reduce(a, b) + + return Basic.__new__(cls, a, b) + + @staticmethod + def reduce(A, B): + result = B._symmetric_difference(A) + if result is not None: + return result + else: + return SymmetricDifference(A, B, evaluate=False) + + def as_relational(self, symbol): + """Rewrite a symmetric_difference in terms of equalities and + logic operators""" + A, B = self.args + + A_rel = A.as_relational(symbol) + B_rel = B.as_relational(symbol) + + return Xor(A_rel, B_rel) + + @property + def is_iterable(self): + if all(arg.is_iterable for arg in self.args): + return True + + def __iter__(self): + + args = self.args + union = roundrobin(*(iter(arg) for arg in args)) + + for item in union: + count = 0 + for s in args: + if item in s: + count += 1 + + if count % 2 == 1: + yield item + + + +class DisjointUnion(Set): + """ Represents the disjoint union (also known as the external disjoint union) + of a finite number of sets. + + Examples + ======== + + >>> from sympy import DisjointUnion, FiniteSet, Interval, Union, Symbol + >>> A = FiniteSet(1, 2, 3) + >>> B = Interval(0, 5) + >>> DisjointUnion(A, B) + DisjointUnion({1, 2, 3}, Interval(0, 5)) + >>> DisjointUnion(A, B).rewrite(Union) + Union(ProductSet({1, 2, 3}, {0}), ProductSet(Interval(0, 5), {1})) + >>> C = FiniteSet(Symbol('x'), Symbol('y'), Symbol('z')) + >>> DisjointUnion(C, C) + DisjointUnion({x, y, z}, {x, y, z}) + >>> DisjointUnion(C, C).rewrite(Union) + ProductSet({x, y, z}, {0, 1}) + + References + ========== + + https://en.wikipedia.org/wiki/Disjoint_union + """ + + def __new__(cls, *sets): + dj_collection = [] + for set_i in sets: + if isinstance(set_i, Set): + dj_collection.append(set_i) + else: + raise TypeError("Invalid input: '%s', input args \ + to DisjointUnion must be Sets" % set_i) + obj = Basic.__new__(cls, *dj_collection) + return obj + + @property + def sets(self): + return self.args + + @property + def is_empty(self): + return fuzzy_and(s.is_empty for s in self.sets) + + @property + def is_finite_set(self): + all_finite = fuzzy_and(s.is_finite_set for s in self.sets) + return fuzzy_or([self.is_empty, all_finite]) + + @property + def is_iterable(self): + if self.is_empty: + return False + iter_flag = True + for set_i in self.sets: + if not set_i.is_empty: + iter_flag = iter_flag and set_i.is_iterable + return iter_flag + + def _eval_rewrite_as_Union(self, *sets, **kwargs): + """ + Rewrites the disjoint union as the union of (``set`` x {``i``}) + where ``set`` is the element in ``sets`` at index = ``i`` + """ + + dj_union = S.EmptySet + index = 0 + for set_i in sets: + if isinstance(set_i, Set): + cross = ProductSet(set_i, FiniteSet(index)) + dj_union = Union(dj_union, cross) + index = index + 1 + return dj_union + + def _contains(self, element): + """ + ``in`` operator for DisjointUnion + + Examples + ======== + + >>> from sympy import Interval, DisjointUnion + >>> D = DisjointUnion(Interval(0, 1), Interval(0, 2)) + >>> (0.5, 0) in D + True + >>> (0.5, 1) in D + True + >>> (1.5, 0) in D + False + >>> (1.5, 1) in D + True + + Passes operation on to constituent sets + """ + if not isinstance(element, Tuple) or len(element) != 2: + return S.false + + if not element[1].is_Integer: + return S.false + + if element[1] >= len(self.sets) or element[1] < 0: + return S.false + + return self.sets[element[1]]._contains(element[0]) + + def _kind(self): + if not self.args: + return SetKind() + elif all(i.kind == self.args[0].kind for i in self.args): + return self.args[0].kind + else: + return SetKind(UndefinedKind) + + def __iter__(self): + if self.is_iterable: + + iters = [] + for i, s in enumerate(self.sets): + iters.append(iproduct(s, {Integer(i)})) + + return iter(roundrobin(*iters)) + else: + raise ValueError("'%s' is not iterable." % self) + + def __len__(self): + """ + Returns the length of the disjoint union, i.e., the number of elements in the set. + + Examples + ======== + + >>> from sympy import FiniteSet, DisjointUnion, EmptySet + >>> D1 = DisjointUnion(FiniteSet(1, 2, 3, 4), EmptySet, FiniteSet(3, 4, 5)) + >>> len(D1) + 7 + >>> D2 = DisjointUnion(FiniteSet(3, 5, 7), EmptySet, FiniteSet(3, 5, 7)) + >>> len(D2) + 6 + >>> D3 = DisjointUnion(EmptySet, EmptySet) + >>> len(D3) + 0 + + Adds up the lengths of the constituent sets. + """ + + if self.is_finite_set: + size = 0 + for set in self.sets: + size += len(set) + return size + else: + raise ValueError("'%s' is not a finite set." % self) + + +def imageset(*args): + r""" + Return an image of the set under transformation ``f``. + + Explanation + =========== + + If this function cannot compute the image, it returns an + unevaluated ImageSet object. + + .. math:: + \{ f(x) \mid x \in \mathrm{self} \} + + Examples + ======== + + >>> from sympy import S, Interval, imageset, sin, Lambda + >>> from sympy.abc import x + + >>> imageset(x, 2*x, Interval(0, 2)) + Interval(0, 4) + + >>> imageset(lambda x: 2*x, Interval(0, 2)) + Interval(0, 4) + + >>> imageset(Lambda(x, sin(x)), Interval(-2, 1)) + ImageSet(Lambda(x, sin(x)), Interval(-2, 1)) + + >>> imageset(sin, Interval(-2, 1)) + ImageSet(Lambda(x, sin(x)), Interval(-2, 1)) + >>> imageset(lambda y: x + y, Interval(-2, 1)) + ImageSet(Lambda(y, x + y), Interval(-2, 1)) + + Expressions applied to the set of Integers are simplified + to show as few negatives as possible and linear expressions + are converted to a canonical form. If this is not desirable + then the unevaluated ImageSet should be used. + + >>> imageset(x, -2*x + 5, S.Integers) + ImageSet(Lambda(x, 2*x + 1), Integers) + + See Also + ======== + + sympy.sets.fancysets.ImageSet + + """ + from .fancysets import ImageSet + from .setexpr import set_function + + if len(args) < 2: + raise ValueError('imageset expects at least 2 args, got: %s' % len(args)) + + if isinstance(args[0], (Symbol, tuple)) and len(args) > 2: + f = Lambda(args[0], args[1]) + set_list = args[2:] + else: + f = args[0] + set_list = args[1:] + + if isinstance(f, Lambda): + pass + elif callable(f): + nargs = getattr(f, 'nargs', {}) + if nargs: + if len(nargs) != 1: + raise NotImplementedError(filldedent(''' + This function can take more than 1 arg + but the potentially complicated set input + has not been analyzed at this point to + know its dimensions. TODO + ''')) + N = nargs.args[0] + if N == 1: + s = 'x' + else: + s = [Symbol('x%i' % i) for i in range(1, N + 1)] + else: + s = inspect.signature(f).parameters + + dexpr = _sympify(f(*[Dummy() for i in s])) + var = tuple(uniquely_named_symbol( + Symbol(i), dexpr) for i in s) + f = Lambda(var, f(*var)) + else: + raise TypeError(filldedent(''' + expecting lambda, Lambda, or FunctionClass, + not \'%s\'.''' % func_name(f))) + + if any(not isinstance(s, Set) for s in set_list): + name = [func_name(s) for s in set_list] + raise ValueError( + 'arguments after mapping should be sets, not %s' % name) + + if len(set_list) == 1: + set = set_list[0] + try: + # TypeError if arg count != set dimensions + r = set_function(f, set) + if r is None: + raise TypeError + if not r: + return r + except TypeError: + r = ImageSet(f, set) + if isinstance(r, ImageSet): + f, set = r.args + + if f.variables[0] == f.expr: + return set + + if isinstance(set, ImageSet): + # XXX: Maybe this should just be: + # f2 = set.lambda + # fun = Lambda(f2.signature, f(*f2.expr)) + # return imageset(fun, *set.base_sets) + if len(set.lamda.variables) == 1 and len(f.variables) == 1: + x = set.lamda.variables[0] + y = f.variables[0] + return imageset( + Lambda(x, f.expr.subs(y, set.lamda.expr)), *set.base_sets) + + if r is not None: + return r + + return ImageSet(f, *set_list) + + +def is_function_invertible_in_set(func, setv): + """ + Checks whether function ``func`` is invertible when the domain is + restricted to set ``setv``. + """ + # Functions known to always be invertible: + if func in (exp, log): + return True + u = Dummy("u") + fdiff = func(u).diff(u) + # monotonous functions: + # TODO: check subsets (`func` in `setv`) + if (fdiff > 0) == True or (fdiff < 0) == True: + return True + # TODO: support more + return None + + +def simplify_union(args): + """ + Simplify a :class:`Union` using known rules. + + Explanation + =========== + + We first start with global rules like 'Merge all FiniteSets' + + Then we iterate through all pairs and ask the constituent sets if they + can simplify themselves with any other constituent. This process depends + on ``union_sets(a, b)`` functions. + """ + from sympy.sets.handlers.union import union_sets + + # ===== Global Rules ===== + if not args: + return S.EmptySet + + for arg in args: + if not isinstance(arg, Set): + raise TypeError("Input args to Union must be Sets") + + # Merge all finite sets + finite_sets = [x for x in args if x.is_FiniteSet] + if len(finite_sets) > 1: + a = (x for set in finite_sets for x in set) + finite_set = FiniteSet(*a) + args = [finite_set] + [x for x in args if not x.is_FiniteSet] + + # ===== Pair-wise Rules ===== + # Here we depend on rules built into the constituent sets + args = set(args) + new_args = True + while new_args: + for s in args: + new_args = False + for t in args - {s}: + new_set = union_sets(s, t) + # This returns None if s does not know how to intersect + # with t. Returns the newly intersected set otherwise + if new_set is not None: + if not isinstance(new_set, set): + new_set = {new_set} + new_args = (args - {s, t}).union(new_set) + break + if new_args: + args = new_args + break + + if len(args) == 1: + return args.pop() + else: + return Union(*args, evaluate=False) + + +def simplify_intersection(args): + """ + Simplify an intersection using known rules. + + Explanation + =========== + + We first start with global rules like + 'if any empty sets return empty set' and 'distribute any unions' + + Then we iterate through all pairs and ask the constituent sets if they + can simplify themselves with any other constituent + """ + + # ===== Global Rules ===== + if not args: + return S.UniversalSet + + for arg in args: + if not isinstance(arg, Set): + raise TypeError("Input args to Union must be Sets") + + # If any EmptySets return EmptySet + if S.EmptySet in args: + return S.EmptySet + + # Handle Finite sets + rv = Intersection._handle_finite_sets(args) + + if rv is not None: + return rv + + # If any of the sets are unions, return a Union of Intersections + for s in args: + if s.is_Union: + other_sets = set(args) - {s} + if len(other_sets) > 0: + other = Intersection(*other_sets) + return Union(*(Intersection(arg, other) for arg in s.args)) + else: + return Union(*s.args) + + for s in args: + if s.is_Complement: + args.remove(s) + other_sets = args + [s.args[0]] + return Complement(Intersection(*other_sets), s.args[1]) + + from sympy.sets.handlers.intersection import intersection_sets + + # At this stage we are guaranteed not to have any + # EmptySets, FiniteSets, or Unions in the intersection + + # ===== Pair-wise Rules ===== + # Here we depend on rules built into the constituent sets + args = set(args) + new_args = True + while new_args: + for s in args: + new_args = False + for t in args - {s}: + new_set = intersection_sets(s, t) + # This returns None if s does not know how to intersect + # with t. Returns the newly intersected set otherwise + + if new_set is not None: + new_args = (args - {s, t}).union({new_set}) + break + if new_args: + args = new_args + break + + if len(args) == 1: + return args.pop() + else: + return Intersection(*args, evaluate=False) + + +def _handle_finite_sets(op, x, y, commutative): + # Handle finite sets: + fs_args, other = sift([x, y], lambda x: isinstance(x, FiniteSet), binary=True) + if len(fs_args) == 2: + return FiniteSet(*[op(i, j) for i in fs_args[0] for j in fs_args[1]]) + elif len(fs_args) == 1: + sets = [_apply_operation(op, other[0], i, commutative) for i in fs_args[0]] + return Union(*sets) + else: + return None + + +def _apply_operation(op, x, y, commutative): + from .fancysets import ImageSet + d = Dummy('d') + + out = _handle_finite_sets(op, x, y, commutative) + if out is None: + out = op(x, y) + + if out is None and commutative: + out = op(y, x) + if out is None: + _x, _y = symbols("x y") + if isinstance(x, Set) and not isinstance(y, Set): + out = ImageSet(Lambda(d, op(d, y)), x).doit() + elif not isinstance(x, Set) and isinstance(y, Set): + out = ImageSet(Lambda(d, op(x, d)), y).doit() + else: + out = ImageSet(Lambda((_x, _y), op(_x, _y)), x, y) + return out + + +def set_add(x, y): + from sympy.sets.handlers.add import _set_add + return _apply_operation(_set_add, x, y, commutative=True) + + +def set_sub(x, y): + from sympy.sets.handlers.add import _set_sub + return _apply_operation(_set_sub, x, y, commutative=False) + + +def set_mul(x, y): + from sympy.sets.handlers.mul import _set_mul + return _apply_operation(_set_mul, x, y, commutative=True) + + +def set_div(x, y): + from sympy.sets.handlers.mul import _set_div + return _apply_operation(_set_div, x, y, commutative=False) + + +def set_pow(x, y): + from sympy.sets.handlers.power import _set_pow + return _apply_operation(_set_pow, x, y, commutative=False) + + +def set_function(f, x): + from sympy.sets.handlers.functions import _set_function + return _set_function(f, x) + + +class SetKind(Kind): + """ + SetKind is kind for all Sets + + Every instance of Set will have kind ``SetKind`` parametrised by the kind + of the elements of the ``Set``. The kind of the elements might be + ``NumberKind``, or ``TupleKind`` or something else. When not all elements + have the same kind then the kind of the elements will be given as + ``UndefinedKind``. + + Parameters + ========== + + element_kind: Kind (optional) + The kind of the elements of the set. In a well defined set all elements + will have the same kind. Otherwise the kind should + :class:`sympy.core.kind.UndefinedKind`. The ``element_kind`` argument is optional but + should only be omitted in the case of ``EmptySet`` whose kind is simply + ``SetKind()`` + + Examples + ======== + + >>> from sympy import Interval + >>> Interval(1, 2).kind + SetKind(NumberKind) + >>> Interval(1,2).kind.element_kind + NumberKind + + See Also + ======== + + sympy.core.kind.NumberKind + sympy.matrices.kind.MatrixKind + sympy.core.containers.TupleKind + """ + def __new__(cls, element_kind=None): + obj = super().__new__(cls, element_kind) + obj.element_kind = element_kind + return obj + + def __repr__(self): + if not self.element_kind: + return "SetKind()" + else: + return "SetKind(%s)" % self.element_kind diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/__init__.py b/vllm/lib/python3.10/site-packages/sympy/sets/tests/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/__pycache__/test_contains.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/sets/tests/__pycache__/test_contains.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0e13facf082d86849ca08672e6af9097fbc467dd Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/sets/tests/__pycache__/test_contains.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/__pycache__/test_fancysets.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/sets/tests/__pycache__/test_fancysets.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4991bb8d9bc2354cecee9205494e153ff5e4dabf Binary files /dev/null and b/vllm/lib/python3.10/site-packages/sympy/sets/tests/__pycache__/test_fancysets.cpython-310.pyc differ diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_contains.py b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_contains.py new file mode 100644 index 0000000000000000000000000000000000000000..bb6b98940946f98bf377aad6810f5b32eb6dd069 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_contains.py @@ -0,0 +1,52 @@ +from sympy.core.expr import unchanged +from sympy.core.numbers import oo +from sympy.core.relational import Eq +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.sets.contains import Contains +from sympy.sets.sets import (FiniteSet, Interval) +from sympy.testing.pytest import raises + + +def test_contains_basic(): + raises(TypeError, lambda: Contains(S.Integers, 1)) + assert Contains(2, S.Integers) is S.true + assert Contains(-2, S.Naturals) is S.false + + i = Symbol('i', integer=True) + assert Contains(i, S.Naturals) == Contains(i, S.Naturals, evaluate=False) + + +def test_issue_6194(): + x = Symbol('x') + assert unchanged(Contains, x, Interval(0, 1)) + assert Interval(0, 1).contains(x) == (S.Zero <= x) & (x <= 1) + assert Contains(x, FiniteSet(0)) != S.false + assert Contains(x, Interval(1, 1)) != S.false + assert Contains(x, S.Integers) != S.false + + +def test_issue_10326(): + assert Contains(oo, Interval(-oo, oo)) == False + assert Contains(-oo, Interval(-oo, oo)) == False + + +def test_binary_symbols(): + x = Symbol('x') + y = Symbol('y') + z = Symbol('z') + assert Contains(x, FiniteSet(y, Eq(z, True)) + ).binary_symbols == {y, z} + + +def test_as_set(): + x = Symbol('x') + y = Symbol('y') + assert Contains(x, FiniteSet(y)).as_set() == FiniteSet(y) + assert Contains(x, S.Integers).as_set() == S.Integers + assert Contains(x, S.Reals).as_set() == S.Reals + + +def test_type_error(): + # Pass in a parameter not of type "set" + raises(TypeError, lambda: Contains(2, None)) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_fancysets.py b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_fancysets.py new file mode 100644 index 0000000000000000000000000000000000000000..b23c2a99fce0af5bfe7c667185465ee417de19ce --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_fancysets.py @@ -0,0 +1,1313 @@ + +from sympy.core.expr import unchanged +from sympy.sets.contains import Contains +from sympy.sets.fancysets import (ImageSet, Range, normalize_theta_set, + ComplexRegion) +from sympy.sets.sets import (FiniteSet, Interval, Union, imageset, + Intersection, ProductSet, SetKind) +from sympy.sets.conditionset import ConditionSet +from sympy.simplify.simplify import simplify +from sympy.core.basic import Basic +from sympy.core.containers import Tuple, TupleKind +from sympy.core.function import Lambda +from sympy.core.kind import NumberKind +from sympy.core.numbers import (I, Rational, oo, pi) +from sympy.core.relational import Eq +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, Symbol, symbols) +from sympy.functions.elementary.complexes import Abs +from sympy.functions.elementary.exponential import (exp, log) +from sympy.functions.elementary.integers import floor +from sympy.functions.elementary.miscellaneous import sqrt +from sympy.functions.elementary.trigonometric import (cos, sin, tan) +from sympy.logic.boolalg import And +from sympy.matrices.dense import eye +from sympy.testing.pytest import XFAIL, raises +from sympy.abc import x, y, t, z +from sympy.core.mod import Mod + +import itertools + + +def test_naturals(): + N = S.Naturals + assert 5 in N + assert -5 not in N + assert 5.5 not in N + ni = iter(N) + a, b, c, d = next(ni), next(ni), next(ni), next(ni) + assert (a, b, c, d) == (1, 2, 3, 4) + assert isinstance(a, Basic) + + assert N.intersect(Interval(-5, 5)) == Range(1, 6) + assert N.intersect(Interval(-5, 5, True, True)) == Range(1, 5) + + assert N.boundary == N + assert N.is_open == False + assert N.is_closed == True + + assert N.inf == 1 + assert N.sup is oo + assert not N.contains(oo) + for s in (S.Naturals0, S.Naturals): + assert s.intersection(S.Reals) is s + assert s.is_subset(S.Reals) + + assert N.as_relational(x) == And(Eq(floor(x), x), x >= 1, x < oo) + + +def test_naturals0(): + N = S.Naturals0 + assert 0 in N + assert -1 not in N + assert next(iter(N)) == 0 + assert not N.contains(oo) + assert N.contains(sin(x)) == Contains(sin(x), N) + + +def test_integers(): + Z = S.Integers + assert 5 in Z + assert -5 in Z + assert 5.5 not in Z + assert not Z.contains(oo) + assert not Z.contains(-oo) + + zi = iter(Z) + a, b, c, d = next(zi), next(zi), next(zi), next(zi) + assert (a, b, c, d) == (0, 1, -1, 2) + assert isinstance(a, Basic) + + assert Z.intersect(Interval(-5, 5)) == Range(-5, 6) + assert Z.intersect(Interval(-5, 5, True, True)) == Range(-4, 5) + assert Z.intersect(Interval(5, S.Infinity)) == Range(5, S.Infinity) + assert Z.intersect(Interval.Lopen(5, S.Infinity)) == Range(6, S.Infinity) + + assert Z.inf is -oo + assert Z.sup is oo + + assert Z.boundary == Z + assert Z.is_open == False + assert Z.is_closed == True + + assert Z.as_relational(x) == And(Eq(floor(x), x), -oo < x, x < oo) + + +def test_ImageSet(): + raises(ValueError, lambda: ImageSet(x, S.Integers)) + assert ImageSet(Lambda(x, 1), S.Integers) == FiniteSet(1) + assert ImageSet(Lambda(x, y), S.Integers) == {y} + assert ImageSet(Lambda(x, 1), S.EmptySet) == S.EmptySet + empty = Intersection(FiniteSet(log(2)/pi), S.Integers) + assert unchanged(ImageSet, Lambda(x, 1), empty) # issue #17471 + squares = ImageSet(Lambda(x, x**2), S.Naturals) + assert 4 in squares + assert 5 not in squares + assert FiniteSet(*range(10)).intersect(squares) == FiniteSet(1, 4, 9) + + assert 16 not in squares.intersect(Interval(0, 10)) + + si = iter(squares) + a, b, c, d = next(si), next(si), next(si), next(si) + assert (a, b, c, d) == (1, 4, 9, 16) + + harmonics = ImageSet(Lambda(x, 1/x), S.Naturals) + assert Rational(1, 5) in harmonics + assert Rational(.25) in harmonics + assert harmonics.contains(.25) == Contains( + 0.25, ImageSet(Lambda(x, 1/x), S.Naturals), evaluate=False) + assert Rational(.3) not in harmonics + assert (1, 2) not in harmonics + + assert harmonics.is_iterable + + assert imageset(x, -x, Interval(0, 1)) == Interval(-1, 0) + + assert ImageSet(Lambda(x, x**2), Interval(0, 2)).doit() == Interval(0, 4) + assert ImageSet(Lambda((x, y), 2*x), {4}, {3}).doit() == FiniteSet(8) + assert (ImageSet(Lambda((x, y), x+y), {1, 2, 3}, {10, 20, 30}).doit() == + FiniteSet(11, 12, 13, 21, 22, 23, 31, 32, 33)) + + c = Interval(1, 3) * Interval(1, 3) + assert Tuple(2, 6) in ImageSet(Lambda(((x, y),), (x, 2*y)), c) + assert Tuple(2, S.Half) in ImageSet(Lambda(((x, y),), (x, 1/y)), c) + assert Tuple(2, -2) not in ImageSet(Lambda(((x, y),), (x, y**2)), c) + assert Tuple(2, -2) in ImageSet(Lambda(((x, y),), (x, -2)), c) + c3 = ProductSet(Interval(3, 7), Interval(8, 11), Interval(5, 9)) + assert Tuple(8, 3, 9) in ImageSet(Lambda(((t, y, x),), (y, t, x)), c3) + assert Tuple(Rational(1, 8), 3, 9) in ImageSet(Lambda(((t, y, x),), (1/y, t, x)), c3) + assert 2/pi not in ImageSet(Lambda(((x, y),), 2/x), c) + assert 2/S(100) not in ImageSet(Lambda(((x, y),), 2/x), c) + assert Rational(2, 3) in ImageSet(Lambda(((x, y),), 2/x), c) + + S1 = imageset(lambda x, y: x + y, S.Integers, S.Naturals) + assert S1.base_pset == ProductSet(S.Integers, S.Naturals) + assert S1.base_sets == (S.Integers, S.Naturals) + + # Passing a set instead of a FiniteSet shouldn't raise + assert unchanged(ImageSet, Lambda(x, x**2), {1, 2, 3}) + + S2 = ImageSet(Lambda(((x, y),), x+y), {(1, 2), (3, 4)}) + assert 3 in S2.doit() + # FIXME: This doesn't yet work: + #assert 3 in S2 + assert S2._contains(3) is None + + raises(TypeError, lambda: ImageSet(Lambda(x, x**2), 1)) + + +def test_image_is_ImageSet(): + assert isinstance(imageset(x, sqrt(sin(x)), Range(5)), ImageSet) + + +def test_halfcircle(): + r, th = symbols('r, theta', real=True) + L = Lambda(((r, th),), (r*cos(th), r*sin(th))) + halfcircle = ImageSet(L, Interval(0, 1)*Interval(0, pi)) + + assert (1, 0) in halfcircle + assert (0, -1) not in halfcircle + assert (0, 0) in halfcircle + assert halfcircle._contains((r, 0)) is None + assert not halfcircle.is_iterable + + +@XFAIL +def test_halfcircle_fail(): + r, th = symbols('r, theta', real=True) + L = Lambda(((r, th),), (r*cos(th), r*sin(th))) + halfcircle = ImageSet(L, Interval(0, 1)*Interval(0, pi)) + assert (r, 2*pi) not in halfcircle + + +def test_ImageSet_iterator_not_injective(): + L = Lambda(x, x - x % 2) # produces 0, 2, 2, 4, 4, 6, 6, ... + evens = ImageSet(L, S.Naturals) + i = iter(evens) + # No repeats here + assert (next(i), next(i), next(i), next(i)) == (0, 2, 4, 6) + + +def test_inf_Range_len(): + raises(ValueError, lambda: len(Range(0, oo, 2))) + assert Range(0, oo, 2).size is S.Infinity + assert Range(0, -oo, -2).size is S.Infinity + assert Range(oo, 0, -2).size is S.Infinity + assert Range(-oo, 0, 2).size is S.Infinity + + +def test_Range_set(): + empty = Range(0) + + assert Range(5) == Range(0, 5) == Range(0, 5, 1) + + r = Range(10, 20, 2) + assert 12 in r + assert 8 not in r + assert 11 not in r + assert 30 not in r + + assert list(Range(0, 5)) == list(range(5)) + assert list(Range(5, 0, -1)) == list(range(5, 0, -1)) + + + assert Range(5, 15).sup == 14 + assert Range(5, 15).inf == 5 + assert Range(15, 5, -1).sup == 15 + assert Range(15, 5, -1).inf == 6 + assert Range(10, 67, 10).sup == 60 + assert Range(60, 7, -10).inf == 10 + + assert len(Range(10, 38, 10)) == 3 + + assert Range(0, 0, 5) == empty + assert Range(oo, oo, 1) == empty + assert Range(oo, 1, 1) == empty + assert Range(-oo, 1, -1) == empty + assert Range(1, oo, -1) == empty + assert Range(1, -oo, 1) == empty + assert Range(1, -4, oo) == empty + ip = symbols('ip', positive=True) + assert Range(0, ip, -1) == empty + assert Range(0, -ip, 1) == empty + assert Range(1, -4, -oo) == Range(1, 2) + assert Range(1, 4, oo) == Range(1, 2) + assert Range(-oo, oo).size == oo + assert Range(oo, -oo, -1).size == oo + raises(ValueError, lambda: Range(-oo, oo, 2)) + raises(ValueError, lambda: Range(x, pi, y)) + raises(ValueError, lambda: Range(x, y, 0)) + + assert 5 in Range(0, oo, 5) + assert -5 in Range(-oo, 0, 5) + assert oo not in Range(0, oo) + ni = symbols('ni', integer=False) + assert ni not in Range(oo) + u = symbols('u', integer=None) + assert Range(oo).contains(u) is not False + inf = symbols('inf', infinite=True) + assert inf not in Range(-oo, oo) + raises(ValueError, lambda: Range(0, oo, 2)[-1]) + raises(ValueError, lambda: Range(0, -oo, -2)[-1]) + assert Range(-oo, 1, 1)[-1] is S.Zero + assert Range(oo, 1, -1)[-1] == 2 + assert inf not in Range(oo) + assert Range(1, 10, 1)[-1] == 9 + assert all(i.is_Integer for i in Range(0, -1, 1)) + it = iter(Range(-oo, 0, 2)) + raises(TypeError, lambda: next(it)) + + assert empty.intersect(S.Integers) == empty + assert Range(-1, 10, 1).intersect(S.Complexes) == Range(-1, 10, 1) + assert Range(-1, 10, 1).intersect(S.Reals) == Range(-1, 10, 1) + assert Range(-1, 10, 1).intersect(S.Rationals) == Range(-1, 10, 1) + assert Range(-1, 10, 1).intersect(S.Integers) == Range(-1, 10, 1) + assert Range(-1, 10, 1).intersect(S.Naturals) == Range(1, 10, 1) + assert Range(-1, 10, 1).intersect(S.Naturals0) == Range(0, 10, 1) + + # test slicing + assert Range(1, 10, 1)[5] == 6 + assert Range(1, 12, 2)[5] == 11 + assert Range(1, 10, 1)[-1] == 9 + assert Range(1, 10, 3)[-1] == 7 + raises(ValueError, lambda: Range(oo,0,-1)[1:3:0]) + raises(ValueError, lambda: Range(oo,0,-1)[:1]) + raises(ValueError, lambda: Range(1, oo)[-2]) + raises(ValueError, lambda: Range(-oo, 1)[2]) + raises(IndexError, lambda: Range(10)[-20]) + raises(IndexError, lambda: Range(10)[20]) + raises(ValueError, lambda: Range(2, -oo, -2)[2:2:0]) + assert Range(2, -oo, -2)[2:2:2] == empty + assert Range(2, -oo, -2)[:2:2] == Range(2, -2, -4) + raises(ValueError, lambda: Range(-oo, 4, 2)[:2:2]) + assert Range(-oo, 4, 2)[::-2] == Range(2, -oo, -4) + raises(ValueError, lambda: Range(-oo, 4, 2)[::2]) + assert Range(oo, 2, -2)[::] == Range(oo, 2, -2) + assert Range(-oo, 4, 2)[:-2:-2] == Range(2, 0, -4) + assert Range(-oo, 4, 2)[:-2:2] == Range(-oo, 0, 4) + raises(ValueError, lambda: Range(-oo, 4, 2)[:0:-2]) + raises(ValueError, lambda: Range(-oo, 4, 2)[:2:-2]) + assert Range(-oo, 4, 2)[-2::-2] == Range(0, -oo, -4) + raises(ValueError, lambda: Range(-oo, 4, 2)[-2:0:-2]) + raises(ValueError, lambda: Range(-oo, 4, 2)[0::2]) + assert Range(oo, 2, -2)[0::] == Range(oo, 2, -2) + raises(ValueError, lambda: Range(-oo, 4, 2)[0:-2:2]) + assert Range(oo, 2, -2)[0:-2:] == Range(oo, 6, -2) + raises(ValueError, lambda: Range(oo, 2, -2)[0:2:]) + raises(ValueError, lambda: Range(-oo, 4, 2)[2::-1]) + assert Range(-oo, 4, 2)[-2::2] == Range(0, 4, 4) + assert Range(oo, 0, -2)[-10:0:2] == empty + raises(ValueError, lambda: Range(oo, 0, -2)[0]) + raises(ValueError, lambda: Range(oo, 0, -2)[-10:10:2]) + raises(ValueError, lambda: Range(oo, 0, -2)[0::-2]) + assert Range(oo, 0, -2)[0:-4:-2] == empty + assert Range(oo, 0, -2)[:0:2] == empty + raises(ValueError, lambda: Range(oo, 0, -2)[:1:-1]) + + # test empty Range + assert Range(x, x, y) == empty + assert empty.reversed == empty + assert 0 not in empty + assert list(empty) == [] + assert len(empty) == 0 + assert empty.size is S.Zero + assert empty.intersect(FiniteSet(0)) is S.EmptySet + assert bool(empty) is False + raises(IndexError, lambda: empty[0]) + assert empty[:0] == empty + raises(NotImplementedError, lambda: empty.inf) + raises(NotImplementedError, lambda: empty.sup) + assert empty.as_relational(x) is S.false + + AB = [None] + list(range(12)) + for R in [ + Range(1, 10), + Range(1, 10, 2), + ]: + r = list(R) + for a, b, c in itertools.product(AB, AB, [-3, -1, None, 1, 3]): + for reverse in range(2): + r = list(reversed(r)) + R = R.reversed + result = list(R[a:b:c]) + ans = r[a:b:c] + txt = ('\n%s[%s:%s:%s] = %s -> %s' % ( + R, a, b, c, result, ans)) + check = ans == result + assert check, txt + + assert Range(1, 10, 1).boundary == Range(1, 10, 1) + + for r in (Range(1, 10, 2), Range(1, oo, 2)): + rev = r.reversed + assert r.inf == rev.inf and r.sup == rev.sup + assert r.step == -rev.step + + builtin_range = range + + raises(TypeError, lambda: Range(builtin_range(1))) + assert S(builtin_range(10)) == Range(10) + assert S(builtin_range(1000000000000)) == Range(1000000000000) + + # test Range.as_relational + assert Range(1, 4).as_relational(x) == (x >= 1) & (x <= 3) & Eq(Mod(x, 1), 0) + assert Range(oo, 1, -2).as_relational(x) == (x >= 3) & (x < oo) & Eq(Mod(x + 1, -2), 0) + + +def test_Range_symbolic(): + # symbolic Range + xr = Range(x, x + 4, 5) + sr = Range(x, y, t) + i = Symbol('i', integer=True) + ip = Symbol('i', integer=True, positive=True) + ipr = Range(ip) + inr = Range(0, -ip, -1) + ir = Range(i, i + 19, 2) + ir2 = Range(i, i*8, 3*i) + i = Symbol('i', integer=True) + inf = symbols('inf', infinite=True) + raises(ValueError, lambda: Range(inf)) + raises(ValueError, lambda: Range(inf, 0, -1)) + raises(ValueError, lambda: Range(inf, inf, 1)) + raises(ValueError, lambda: Range(1, 1, inf)) + # args + assert xr.args == (x, x + 5, 5) + assert sr.args == (x, y, t) + assert ir.args == (i, i + 20, 2) + assert ir2.args == (i, 10*i, 3*i) + # reversed + raises(ValueError, lambda: xr.reversed) + raises(ValueError, lambda: sr.reversed) + assert ipr.reversed.args == (ip - 1, -1, -1) + assert inr.reversed.args == (-ip + 1, 1, 1) + assert ir.reversed.args == (i + 18, i - 2, -2) + assert ir2.reversed.args == (7*i, -2*i, -3*i) + # contains + assert inf not in sr + assert inf not in ir + assert 0 in ipr + assert 0 in inr + raises(TypeError, lambda: 1 in ipr) + raises(TypeError, lambda: -1 in inr) + assert .1 not in sr + assert .1 not in ir + assert i + 1 not in ir + assert i + 2 in ir + raises(TypeError, lambda: x in xr) # XXX is this what contains is supposed to do? + raises(TypeError, lambda: 1 in sr) # XXX is this what contains is supposed to do? + # iter + raises(ValueError, lambda: next(iter(xr))) + raises(ValueError, lambda: next(iter(sr))) + assert next(iter(ir)) == i + assert next(iter(ir2)) == i + assert sr.intersect(S.Integers) == sr + assert sr.intersect(FiniteSet(x)) == Intersection({x}, sr) + raises(ValueError, lambda: sr[:2]) + raises(ValueError, lambda: xr[0]) + raises(ValueError, lambda: sr[0]) + # len + assert len(ir) == ir.size == 10 + assert len(ir2) == ir2.size == 3 + raises(ValueError, lambda: len(xr)) + raises(ValueError, lambda: xr.size) + raises(ValueError, lambda: len(sr)) + raises(ValueError, lambda: sr.size) + # bool + assert bool(Range(0)) == False + assert bool(xr) + assert bool(ir) + assert bool(ipr) + assert bool(inr) + raises(ValueError, lambda: bool(sr)) + raises(ValueError, lambda: bool(ir2)) + # inf + raises(ValueError, lambda: xr.inf) + raises(ValueError, lambda: sr.inf) + assert ipr.inf == 0 + assert inr.inf == -ip + 1 + assert ir.inf == i + raises(ValueError, lambda: ir2.inf) + # sup + raises(ValueError, lambda: xr.sup) + raises(ValueError, lambda: sr.sup) + assert ipr.sup == ip - 1 + assert inr.sup == 0 + assert ir.inf == i + raises(ValueError, lambda: ir2.sup) + # getitem + raises(ValueError, lambda: xr[0]) + raises(ValueError, lambda: sr[0]) + raises(ValueError, lambda: sr[-1]) + raises(ValueError, lambda: sr[:2]) + assert ir[:2] == Range(i, i + 4, 2) + assert ir[0] == i + assert ir[-2] == i + 16 + assert ir[-1] == i + 18 + assert ir2[:2] == Range(i, 7*i, 3*i) + assert ir2[0] == i + assert ir2[-2] == 4*i + assert ir2[-1] == 7*i + raises(ValueError, lambda: Range(i)[-1]) + assert ipr[0] == ipr.inf == 0 + assert ipr[-1] == ipr.sup == ip - 1 + assert inr[0] == inr.sup == 0 + assert inr[-1] == inr.inf == -ip + 1 + raises(ValueError, lambda: ipr[-2]) + assert ir.inf == i + assert ir.sup == i + 18 + raises(ValueError, lambda: Range(i).inf) + # as_relational + assert ir.as_relational(x) == ((x >= i) & (x <= i + 18) & + Eq(Mod(-i + x, 2), 0)) + assert ir2.as_relational(x) == Eq( + Mod(-i + x, 3*i), 0) & (((x >= i) & (x <= 7*i) & (3*i >= 1)) | + ((x <= i) & (x >= 7*i) & (3*i <= -1))) + assert Range(i, i + 1).as_relational(x) == Eq(x, i) + assert sr.as_relational(z) == Eq( + Mod(t, 1), 0) & Eq(Mod(x, 1), 0) & Eq(Mod(-x + z, t), 0 + ) & (((z >= x) & (z <= -t + y) & (t >= 1)) | + ((z <= x) & (z >= -t + y) & (t <= -1))) + assert xr.as_relational(z) == Eq(z, x) & Eq(Mod(x, 1), 0) + # symbols can clash if user wants (but it must be integer) + assert xr.as_relational(x) == Eq(Mod(x, 1), 0) + # contains() for symbolic values (issue #18146) + e = Symbol('e', integer=True, even=True) + o = Symbol('o', integer=True, odd=True) + assert Range(5).contains(i) == And(i >= 0, i <= 4) + assert Range(1).contains(i) == Eq(i, 0) + assert Range(-oo, 5, 1).contains(i) == (i <= 4) + assert Range(-oo, oo).contains(i) == True + assert Range(0, 8, 2).contains(i) == Contains(i, Range(0, 8, 2)) + assert Range(0, 8, 2).contains(e) == And(e >= 0, e <= 6) + assert Range(0, 8, 2).contains(2*i) == And(2*i >= 0, 2*i <= 6) + assert Range(0, 8, 2).contains(o) == False + assert Range(1, 9, 2).contains(e) == False + assert Range(1, 9, 2).contains(o) == And(o >= 1, o <= 7) + assert Range(8, 0, -2).contains(o) == False + assert Range(9, 1, -2).contains(o) == And(o >= 3, o <= 9) + assert Range(-oo, 8, 2).contains(i) == Contains(i, Range(-oo, 8, 2)) + + +def test_range_range_intersection(): + for a, b, r in [ + (Range(0), Range(1), S.EmptySet), + (Range(3), Range(4, oo), S.EmptySet), + (Range(3), Range(-3, -1), S.EmptySet), + (Range(1, 3), Range(0, 3), Range(1, 3)), + (Range(1, 3), Range(1, 4), Range(1, 3)), + (Range(1, oo, 2), Range(2, oo, 2), S.EmptySet), + (Range(0, oo, 2), Range(oo), Range(0, oo, 2)), + (Range(0, oo, 2), Range(100), Range(0, 100, 2)), + (Range(2, oo, 2), Range(oo), Range(2, oo, 2)), + (Range(0, oo, 2), Range(5, 6), S.EmptySet), + (Range(2, 80, 1), Range(55, 71, 4), Range(55, 71, 4)), + (Range(0, 6, 3), Range(-oo, 5, 3), S.EmptySet), + (Range(0, oo, 2), Range(5, oo, 3), Range(8, oo, 6)), + (Range(4, 6, 2), Range(2, 16, 7), S.EmptySet),]: + assert a.intersect(b) == r + assert a.intersect(b.reversed) == r + assert a.reversed.intersect(b) == r + assert a.reversed.intersect(b.reversed) == r + a, b = b, a + assert a.intersect(b) == r + assert a.intersect(b.reversed) == r + assert a.reversed.intersect(b) == r + assert a.reversed.intersect(b.reversed) == r + + +def test_range_interval_intersection(): + p = symbols('p', positive=True) + assert isinstance(Range(3).intersect(Interval(p, p + 2)), Intersection) + assert Range(4).intersect(Interval(0, 3)) == Range(4) + assert Range(4).intersect(Interval(-oo, oo)) == Range(4) + assert Range(4).intersect(Interval(1, oo)) == Range(1, 4) + assert Range(4).intersect(Interval(1.1, oo)) == Range(2, 4) + assert Range(4).intersect(Interval(0.1, 3)) == Range(1, 4) + assert Range(4).intersect(Interval(0.1, 3.1)) == Range(1, 4) + assert Range(4).intersect(Interval.open(0, 3)) == Range(1, 3) + assert Range(4).intersect(Interval.open(0.1, 0.5)) is S.EmptySet + assert Interval(-1, 5).intersect(S.Complexes) == Interval(-1, 5) + assert Interval(-1, 5).intersect(S.Reals) == Interval(-1, 5) + assert Interval(-1, 5).intersect(S.Integers) == Range(-1, 6) + assert Interval(-1, 5).intersect(S.Naturals) == Range(1, 6) + assert Interval(-1, 5).intersect(S.Naturals0) == Range(0, 6) + + # Null Range intersections + assert Range(0).intersect(Interval(0.2, 0.8)) is S.EmptySet + assert Range(0).intersect(Interval(-oo, oo)) is S.EmptySet + + +def test_range_is_finite_set(): + assert Range(-100, 100).is_finite_set is True + assert Range(2, oo).is_finite_set is False + assert Range(-oo, 50).is_finite_set is False + assert Range(-oo, oo).is_finite_set is False + assert Range(oo, -oo).is_finite_set is True + assert Range(0, 0).is_finite_set is True + assert Range(oo, oo).is_finite_set is True + assert Range(-oo, -oo).is_finite_set is True + n = Symbol('n', integer=True) + m = Symbol('m', integer=True) + assert Range(n, n + 49).is_finite_set is True + assert Range(n, 0).is_finite_set is True + assert Range(-3, n + 7).is_finite_set is True + assert Range(n, m).is_finite_set is True + assert Range(n + m, m - n).is_finite_set is True + assert Range(n, n + m + n).is_finite_set is True + assert Range(n, oo).is_finite_set is False + assert Range(-oo, n).is_finite_set is False + assert Range(n, -oo).is_finite_set is True + assert Range(oo, n).is_finite_set is True + + +def test_Range_is_iterable(): + assert Range(-100, 100).is_iterable is True + assert Range(2, oo).is_iterable is False + assert Range(-oo, 50).is_iterable is False + assert Range(-oo, oo).is_iterable is False + assert Range(oo, -oo).is_iterable is True + assert Range(0, 0).is_iterable is True + assert Range(oo, oo).is_iterable is True + assert Range(-oo, -oo).is_iterable is True + n = Symbol('n', integer=True) + m = Symbol('m', integer=True) + p = Symbol('p', integer=True, positive=True) + assert Range(n, n + 49).is_iterable is True + assert Range(n, 0).is_iterable is False + assert Range(-3, n + 7).is_iterable is False + assert Range(-3, p + 7).is_iterable is False # Should work with better __iter__ + assert Range(n, m).is_iterable is False + assert Range(n + m, m - n).is_iterable is False + assert Range(n, n + m + n).is_iterable is False + assert Range(n, oo).is_iterable is False + assert Range(-oo, n).is_iterable is False + x = Symbol('x') + assert Range(x, x + 49).is_iterable is False + assert Range(x, 0).is_iterable is False + assert Range(-3, x + 7).is_iterable is False + assert Range(x, m).is_iterable is False + assert Range(x + m, m - x).is_iterable is False + assert Range(x, x + m + x).is_iterable is False + assert Range(x, oo).is_iterable is False + assert Range(-oo, x).is_iterable is False + + +def test_Integers_eval_imageset(): + ans = ImageSet(Lambda(x, 2*x + Rational(3, 7)), S.Integers) + im = imageset(Lambda(x, -2*x + Rational(3, 7)), S.Integers) + assert im == ans + im = imageset(Lambda(x, -2*x - Rational(11, 7)), S.Integers) + assert im == ans + y = Symbol('y') + L = imageset(x, 2*x + y, S.Integers) + assert y + 4 in L + a, b, c = 0.092, 0.433, 0.341 + assert a in imageset(x, a + c*x, S.Integers) + assert b in imageset(x, b + c*x, S.Integers) + + _x = symbols('x', negative=True) + eq = _x**2 - _x + 1 + assert imageset(_x, eq, S.Integers).lamda.expr == _x**2 + _x + 1 + eq = 3*_x - 1 + assert imageset(_x, eq, S.Integers).lamda.expr == 3*_x + 2 + + assert imageset(x, (x, 1/x), S.Integers) == \ + ImageSet(Lambda(x, (x, 1/x)), S.Integers) + + +def test_Range_eval_imageset(): + a, b, c = symbols('a b c') + assert imageset(x, a*(x + b) + c, Range(3)) == \ + imageset(x, a*x + a*b + c, Range(3)) + eq = (x + 1)**2 + assert imageset(x, eq, Range(3)).lamda.expr == eq + eq = a*(x + b) + c + r = Range(3, -3, -2) + imset = imageset(x, eq, r) + assert imset.lamda.expr != eq + assert list(imset) == [eq.subs(x, i).expand() for i in list(r)] + + +def test_fun(): + assert (FiniteSet(*ImageSet(Lambda(x, sin(pi*x/4)), + Range(-10, 11))) == FiniteSet(-1, -sqrt(2)/2, 0, sqrt(2)/2, 1)) + + +def test_Range_is_empty(): + i = Symbol('i', integer=True) + n = Symbol('n', negative=True, integer=True) + p = Symbol('p', positive=True, integer=True) + + assert Range(0).is_empty + assert not Range(1).is_empty + assert Range(1, 0).is_empty + assert not Range(-1, 0).is_empty + assert Range(i).is_empty is None + assert Range(n).is_empty + assert Range(p).is_empty is False + assert Range(n, 0).is_empty is False + assert Range(n, p).is_empty is False + assert Range(p, n).is_empty + assert Range(n, -1).is_empty is None + assert Range(p, n, -1).is_empty is False + + +def test_Reals(): + assert 5 in S.Reals + assert S.Pi in S.Reals + assert -sqrt(2) in S.Reals + assert (2, 5) not in S.Reals + assert sqrt(-1) not in S.Reals + assert S.Reals == Interval(-oo, oo) + assert S.Reals != Interval(0, oo) + assert S.Reals.is_subset(Interval(-oo, oo)) + assert S.Reals.intersect(Range(-oo, oo)) == Range(-oo, oo) + assert S.ComplexInfinity not in S.Reals + assert S.NaN not in S.Reals + assert x + S.ComplexInfinity not in S.Reals + + +def test_Complex(): + assert 5 in S.Complexes + assert 5 + 4*I in S.Complexes + assert S.Pi in S.Complexes + assert -sqrt(2) in S.Complexes + assert -I in S.Complexes + assert sqrt(-1) in S.Complexes + assert S.Complexes.intersect(S.Reals) == S.Reals + assert S.Complexes.union(S.Reals) == S.Complexes + assert S.Complexes == ComplexRegion(S.Reals*S.Reals) + assert (S.Complexes == ComplexRegion(Interval(1, 2)*Interval(3, 4))) == False + assert str(S.Complexes) == "Complexes" + assert repr(S.Complexes) == "Complexes" + + +def take(n, iterable): + "Return first n items of the iterable as a list" + return list(itertools.islice(iterable, n)) + + +def test_intersections(): + assert S.Integers.intersect(S.Reals) == S.Integers + assert 5 in S.Integers.intersect(S.Reals) + assert 5 in S.Integers.intersect(S.Reals) + assert -5 not in S.Naturals.intersect(S.Reals) + assert 5.5 not in S.Integers.intersect(S.Reals) + assert 5 in S.Integers.intersect(Interval(3, oo)) + assert -5 in S.Integers.intersect(Interval(-oo, 3)) + assert all(x.is_Integer + for x in take(10, S.Integers.intersect(Interval(3, oo)) )) + + +def test_infinitely_indexed_set_1(): + from sympy.abc import n, m + assert imageset(Lambda(n, n), S.Integers) == imageset(Lambda(m, m), S.Integers) + + assert imageset(Lambda(n, 2*n), S.Integers).intersect( + imageset(Lambda(m, 2*m + 1), S.Integers)) is S.EmptySet + + assert imageset(Lambda(n, 2*n), S.Integers).intersect( + imageset(Lambda(n, 2*n + 1), S.Integers)) is S.EmptySet + + assert imageset(Lambda(m, 2*m), S.Integers).intersect( + imageset(Lambda(n, 3*n), S.Integers)).dummy_eq( + ImageSet(Lambda(t, 6*t), S.Integers)) + + assert imageset(x, x/2 + Rational(1, 3), S.Integers).intersect(S.Integers) is S.EmptySet + assert imageset(x, x/2 + S.Half, S.Integers).intersect(S.Integers) is S.Integers + + # https://github.com/sympy/sympy/issues/17355 + S53 = ImageSet(Lambda(n, 5*n + 3), S.Integers) + assert S53.intersect(S.Integers) == S53 + + +def test_infinitely_indexed_set_2(): + from sympy.abc import n + a = Symbol('a', integer=True) + assert imageset(Lambda(n, n), S.Integers) == \ + imageset(Lambda(n, n + a), S.Integers) + assert imageset(Lambda(n, n + pi), S.Integers) == \ + imageset(Lambda(n, n + a + pi), S.Integers) + assert imageset(Lambda(n, n), S.Integers) == \ + imageset(Lambda(n, -n + a), S.Integers) + assert imageset(Lambda(n, -6*n), S.Integers) == \ + ImageSet(Lambda(n, 6*n), S.Integers) + assert imageset(Lambda(n, 2*n + pi), S.Integers) == \ + ImageSet(Lambda(n, 2*n + pi - 2), S.Integers) + + +def test_imageset_intersect_real(): + from sympy.abc import n + assert imageset(Lambda(n, n + (n - 1)*(n + 1)*I), S.Integers).intersect(S.Reals) == FiniteSet(-1, 1) + im = (n - 1)*(n + S.Half) + assert imageset(Lambda(n, n + im*I), S.Integers + ).intersect(S.Reals) == FiniteSet(1) + assert imageset(Lambda(n, n + im*(n + 1)*I), S.Naturals0 + ).intersect(S.Reals) == FiniteSet(1) + assert imageset(Lambda(n, n/2 + im.expand()*I), S.Integers + ).intersect(S.Reals) == ImageSet(Lambda(x, x/2), ConditionSet( + n, Eq(n**2 - n/2 - S(1)/2, 0), S.Integers)) + assert imageset(Lambda(n, n/(1/n - 1) + im*(n + 1)*I), S.Integers + ).intersect(S.Reals) == FiniteSet(S.Half) + assert imageset(Lambda(n, n/(n - 6) + + (n - 3)*(n + 1)*I/(2*n + 2)), S.Integers).intersect( + S.Reals) == FiniteSet(-1) + assert imageset(Lambda(n, n/(n**2 - 9) + + (n - 3)*(n + 1)*I/(2*n + 2)), S.Integers).intersect( + S.Reals) is S.EmptySet + s = ImageSet( + Lambda(n, -I*(I*(2*pi*n - pi/4) + log(Abs(sqrt(-I))))), + S.Integers) + # s is unevaluated, but after intersection the result + # should be canonical + assert s.intersect(S.Reals) == imageset( + Lambda(n, 2*n*pi - pi/4), S.Integers) == ImageSet( + Lambda(n, 2*pi*n + pi*Rational(7, 4)), S.Integers) + + +def test_imageset_intersect_interval(): + from sympy.abc import n + f1 = ImageSet(Lambda(n, n*pi), S.Integers) + f2 = ImageSet(Lambda(n, 2*n), Interval(0, pi)) + f3 = ImageSet(Lambda(n, 2*n*pi + pi/2), S.Integers) + # complex expressions + f4 = ImageSet(Lambda(n, n*I*pi), S.Integers) + f5 = ImageSet(Lambda(n, 2*I*n*pi + pi/2), S.Integers) + # non-linear expressions + f6 = ImageSet(Lambda(n, log(n)), S.Integers) + f7 = ImageSet(Lambda(n, n**2), S.Integers) + f8 = ImageSet(Lambda(n, Abs(n)), S.Integers) + f9 = ImageSet(Lambda(n, exp(n)), S.Naturals0) + + assert f1.intersect(Interval(-1, 1)) == FiniteSet(0) + assert f1.intersect(Interval(0, 2*pi, False, True)) == FiniteSet(0, pi) + assert f2.intersect(Interval(1, 2)) == Interval(1, 2) + assert f3.intersect(Interval(-1, 1)) == S.EmptySet + assert f3.intersect(Interval(-5, 5)) == FiniteSet(pi*Rational(-3, 2), pi/2) + assert f4.intersect(Interval(-1, 1)) == FiniteSet(0) + assert f4.intersect(Interval(1, 2)) == S.EmptySet + assert f5.intersect(Interval(0, 1)) == S.EmptySet + assert f6.intersect(Interval(0, 1)) == FiniteSet(S.Zero, log(2)) + assert f7.intersect(Interval(0, 10)) == Intersection(f7, Interval(0, 10)) + assert f8.intersect(Interval(0, 2)) == Intersection(f8, Interval(0, 2)) + assert f9.intersect(Interval(1, 2)) == Intersection(f9, Interval(1, 2)) + + +def test_imageset_intersect_diophantine(): + from sympy.abc import m, n + # Check that same lambda variable for both ImageSets is handled correctly + img1 = ImageSet(Lambda(n, 2*n + 1), S.Integers) + img2 = ImageSet(Lambda(n, 4*n + 1), S.Integers) + assert img1.intersect(img2) == img2 + # Empty solution set returned by diophantine: + assert ImageSet(Lambda(n, 2*n), S.Integers).intersect( + ImageSet(Lambda(n, 2*n + 1), S.Integers)) == S.EmptySet + # Check intersection with S.Integers: + assert ImageSet(Lambda(n, 9/n + 20*n/3), S.Integers).intersect( + S.Integers) == FiniteSet(-61, -23, 23, 61) + # Single solution (2, 3) for diophantine solution: + assert ImageSet(Lambda(n, (n - 2)**2), S.Integers).intersect( + ImageSet(Lambda(n, -(n - 3)**2), S.Integers)) == FiniteSet(0) + # Single parametric solution for diophantine solution: + assert ImageSet(Lambda(n, n**2 + 5), S.Integers).intersect( + ImageSet(Lambda(m, 2*m), S.Integers)).dummy_eq(ImageSet( + Lambda(n, 4*n**2 + 4*n + 6), S.Integers)) + # 4 non-parametric solution couples for dioph. equation: + assert ImageSet(Lambda(n, n**2 - 9), S.Integers).intersect( + ImageSet(Lambda(m, -m**2), S.Integers)) == FiniteSet(-9, 0) + # Double parametric solution for diophantine solution: + assert ImageSet(Lambda(m, m**2 + 40), S.Integers).intersect( + ImageSet(Lambda(n, 41*n), S.Integers)).dummy_eq(Intersection( + ImageSet(Lambda(m, m**2 + 40), S.Integers), + ImageSet(Lambda(n, 41*n), S.Integers))) + # Check that diophantine returns *all* (8) solutions (permute=True) + assert ImageSet(Lambda(n, n**4 - 2**4), S.Integers).intersect( + ImageSet(Lambda(m, -m**4 + 3**4), S.Integers)) == FiniteSet(0, 65) + assert ImageSet(Lambda(n, pi/12 + n*5*pi/12), S.Integers).intersect( + ImageSet(Lambda(n, 7*pi/12 + n*11*pi/12), S.Integers)).dummy_eq(ImageSet( + Lambda(n, 55*pi*n/12 + 17*pi/4), S.Integers)) + # TypeError raised by diophantine (#18081) + assert ImageSet(Lambda(n, n*log(2)), S.Integers).intersection( + S.Integers).dummy_eq(Intersection(ImageSet( + Lambda(n, n*log(2)), S.Integers), S.Integers)) + # NotImplementedError raised by diophantine (no solver for cubic_thue) + assert ImageSet(Lambda(n, n**3 + 1), S.Integers).intersect( + ImageSet(Lambda(n, n**3), S.Integers)).dummy_eq(Intersection( + ImageSet(Lambda(n, n**3 + 1), S.Integers), + ImageSet(Lambda(n, n**3), S.Integers))) + + +def test_infinitely_indexed_set_3(): + from sympy.abc import n, m + assert imageset(Lambda(m, 2*pi*m), S.Integers).intersect( + imageset(Lambda(n, 3*pi*n), S.Integers)).dummy_eq( + ImageSet(Lambda(t, 6*pi*t), S.Integers)) + assert imageset(Lambda(n, 2*n + 1), S.Integers) == \ + imageset(Lambda(n, 2*n - 1), S.Integers) + assert imageset(Lambda(n, 3*n + 2), S.Integers) == \ + imageset(Lambda(n, 3*n - 1), S.Integers) + + +def test_ImageSet_simplification(): + from sympy.abc import n, m + assert imageset(Lambda(n, n), S.Integers) == S.Integers + assert imageset(Lambda(n, sin(n)), + imageset(Lambda(m, tan(m)), S.Integers)) == \ + imageset(Lambda(m, sin(tan(m))), S.Integers) + assert imageset(n, 1 + 2*n, S.Naturals) == Range(3, oo, 2) + assert imageset(n, 1 + 2*n, S.Naturals0) == Range(1, oo, 2) + assert imageset(n, 1 - 2*n, S.Naturals) == Range(-1, -oo, -2) + + +def test_ImageSet_contains(): + assert (2, S.Half) in imageset(x, (x, 1/x), S.Integers) + assert imageset(x, x + I*3, S.Integers).intersection(S.Reals) is S.EmptySet + i = Dummy(integer=True) + q = imageset(x, x + I*y, S.Integers).intersection(S.Reals) + assert q.subs(y, I*i).intersection(S.Integers) is S.Integers + q = imageset(x, x + I*y/x, S.Integers).intersection(S.Reals) + assert q.subs(y, 0) is S.Integers + assert q.subs(y, I*i*x).intersection(S.Integers) is S.Integers + z = cos(1)**2 + sin(1)**2 - 1 + q = imageset(x, x + I*z, S.Integers).intersection(S.Reals) + assert q is not S.EmptySet + + +def test_ComplexRegion_contains(): + r = Symbol('r', real=True) + # contains in ComplexRegion + a = Interval(2, 3) + b = Interval(4, 6) + c = Interval(7, 9) + c1 = ComplexRegion(a*b) + c2 = ComplexRegion(Union(a*b, c*a)) + assert 2.5 + 4.5*I in c1 + assert 2 + 4*I in c1 + assert 3 + 4*I in c1 + assert 8 + 2.5*I in c2 + assert 2.5 + 6.1*I not in c1 + assert 4.5 + 3.2*I not in c1 + assert c1.contains(x) == Contains(x, c1, evaluate=False) + assert c1.contains(r) == False + assert c2.contains(x) == Contains(x, c2, evaluate=False) + assert c2.contains(r) == False + + r1 = Interval(0, 1) + theta1 = Interval(0, 2*S.Pi) + c3 = ComplexRegion(r1*theta1, polar=True) + assert (0.5 + I*6/10) in c3 + assert (S.Half + I*6/10) in c3 + assert (S.Half + .6*I) in c3 + assert (0.5 + .6*I) in c3 + assert I in c3 + assert 1 in c3 + assert 0 in c3 + assert 1 + I not in c3 + assert 1 - I not in c3 + assert c3.contains(x) == Contains(x, c3, evaluate=False) + assert c3.contains(r + 2*I) == Contains( + r + 2*I, c3, evaluate=False) # is in fact False + assert c3.contains(1/(1 + r**2)) == Contains( + 1/(1 + r**2), c3, evaluate=False) # is in fact True + + r2 = Interval(0, 3) + theta2 = Interval(pi, 2*pi, left_open=True) + c4 = ComplexRegion(r2*theta2, polar=True) + assert c4.contains(0) == True + assert c4.contains(2 + I) == False + assert c4.contains(-2 + I) == False + assert c4.contains(-2 - I) == True + assert c4.contains(2 - I) == True + assert c4.contains(-2) == False + assert c4.contains(2) == True + assert c4.contains(x) == Contains(x, c4, evaluate=False) + assert c4.contains(3/(1 + r**2)) == Contains( + 3/(1 + r**2), c4, evaluate=False) # is in fact True + + raises(ValueError, lambda: ComplexRegion(r1*theta1, polar=2)) + + +def test_symbolic_Range(): + n = Symbol('n') + raises(ValueError, lambda: Range(n)[0]) + raises(IndexError, lambda: Range(n, n)[0]) + raises(ValueError, lambda: Range(n, n+1)[0]) + raises(ValueError, lambda: Range(n).size) + + n = Symbol('n', integer=True) + raises(ValueError, lambda: Range(n)[0]) + raises(IndexError, lambda: Range(n, n)[0]) + assert Range(n, n+1)[0] == n + raises(ValueError, lambda: Range(n).size) + assert Range(n, n+1).size == 1 + + n = Symbol('n', integer=True, nonnegative=True) + raises(ValueError, lambda: Range(n)[0]) + raises(IndexError, lambda: Range(n, n)[0]) + assert Range(n+1)[0] == 0 + assert Range(n, n+1)[0] == n + assert Range(n).size == n + assert Range(n+1).size == n+1 + assert Range(n, n+1).size == 1 + + n = Symbol('n', integer=True, positive=True) + assert Range(n)[0] == 0 + assert Range(n, n+1)[0] == n + assert Range(n).size == n + assert Range(n, n+1).size == 1 + + m = Symbol('m', integer=True, positive=True) + + assert Range(n, n+m)[0] == n + assert Range(n, n+m).size == m + assert Range(n, n+1).size == 1 + assert Range(n, n+m, 2).size == floor(m/2) + + m = Symbol('m', integer=True, positive=True, even=True) + assert Range(n, n+m, 2).size == m/2 + + +def test_issue_18400(): + n = Symbol('n', integer=True) + raises(ValueError, lambda: imageset(lambda x: x*2, Range(n))) + + n = Symbol('n', integer=True, positive=True) + # No exception + assert imageset(lambda x: x*2, Range(n)) == imageset(lambda x: x*2, Range(n)) + + +def test_ComplexRegion_intersect(): + # Polar form + X_axis = ComplexRegion(Interval(0, oo)*FiniteSet(0, S.Pi), polar=True) + + unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) + upper_half_unit_disk = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True) + upper_half_disk = ComplexRegion(Interval(0, oo)*Interval(0, S.Pi), polar=True) + lower_half_disk = ComplexRegion(Interval(0, oo)*Interval(S.Pi, 2*S.Pi), polar=True) + right_half_disk = ComplexRegion(Interval(0, oo)*Interval(-S.Pi/2, S.Pi/2), polar=True) + first_quad_disk = ComplexRegion(Interval(0, oo)*Interval(0, S.Pi/2), polar=True) + + assert upper_half_disk.intersect(unit_disk) == upper_half_unit_disk + assert right_half_disk.intersect(first_quad_disk) == first_quad_disk + assert upper_half_disk.intersect(right_half_disk) == first_quad_disk + assert upper_half_disk.intersect(lower_half_disk) == X_axis + + c1 = ComplexRegion(Interval(0, 4)*Interval(0, 2*S.Pi), polar=True) + assert c1.intersect(Interval(1, 5)) == Interval(1, 4) + assert c1.intersect(Interval(4, 9)) == FiniteSet(4) + assert c1.intersect(Interval(5, 12)) is S.EmptySet + + # Rectangular form + X_axis = ComplexRegion(Interval(-oo, oo)*FiniteSet(0)) + + unit_square = ComplexRegion(Interval(-1, 1)*Interval(-1, 1)) + upper_half_unit_square = ComplexRegion(Interval(-1, 1)*Interval(0, 1)) + upper_half_plane = ComplexRegion(Interval(-oo, oo)*Interval(0, oo)) + lower_half_plane = ComplexRegion(Interval(-oo, oo)*Interval(-oo, 0)) + right_half_plane = ComplexRegion(Interval(0, oo)*Interval(-oo, oo)) + first_quad_plane = ComplexRegion(Interval(0, oo)*Interval(0, oo)) + + assert upper_half_plane.intersect(unit_square) == upper_half_unit_square + assert right_half_plane.intersect(first_quad_plane) == first_quad_plane + assert upper_half_plane.intersect(right_half_plane) == first_quad_plane + assert upper_half_plane.intersect(lower_half_plane) == X_axis + + c1 = ComplexRegion(Interval(-5, 5)*Interval(-10, 10)) + assert c1.intersect(Interval(2, 7)) == Interval(2, 5) + assert c1.intersect(Interval(5, 7)) == FiniteSet(5) + assert c1.intersect(Interval(6, 9)) is S.EmptySet + + # unevaluated object + C1 = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) + C2 = ComplexRegion(Interval(-1, 1)*Interval(-1, 1)) + assert C1.intersect(C2) == Intersection(C1, C2, evaluate=False) + + +def test_ComplexRegion_union(): + # Polar form + c1 = ComplexRegion(Interval(0, 1)*Interval(0, 2*S.Pi), polar=True) + c2 = ComplexRegion(Interval(0, 1)*Interval(0, S.Pi), polar=True) + c3 = ComplexRegion(Interval(0, oo)*Interval(0, S.Pi), polar=True) + c4 = ComplexRegion(Interval(0, oo)*Interval(S.Pi, 2*S.Pi), polar=True) + + p1 = Union(Interval(0, 1)*Interval(0, 2*S.Pi), Interval(0, 1)*Interval(0, S.Pi)) + p2 = Union(Interval(0, oo)*Interval(0, S.Pi), Interval(0, oo)*Interval(S.Pi, 2*S.Pi)) + + assert c1.union(c2) == ComplexRegion(p1, polar=True) + assert c3.union(c4) == ComplexRegion(p2, polar=True) + + # Rectangular form + c5 = ComplexRegion(Interval(2, 5)*Interval(6, 9)) + c6 = ComplexRegion(Interval(4, 6)*Interval(10, 12)) + c7 = ComplexRegion(Interval(0, 10)*Interval(-10, 0)) + c8 = ComplexRegion(Interval(12, 16)*Interval(14, 20)) + + p3 = Union(Interval(2, 5)*Interval(6, 9), Interval(4, 6)*Interval(10, 12)) + p4 = Union(Interval(0, 10)*Interval(-10, 0), Interval(12, 16)*Interval(14, 20)) + + assert c5.union(c6) == ComplexRegion(p3) + assert c7.union(c8) == ComplexRegion(p4) + + assert c1.union(Interval(2, 4)) == Union(c1, Interval(2, 4), evaluate=False) + assert c5.union(Interval(2, 4)) == Union(c5, ComplexRegion.from_real(Interval(2, 4))) + + +def test_ComplexRegion_from_real(): + c1 = ComplexRegion(Interval(0, 1) * Interval(0, 2 * S.Pi), polar=True) + + raises(ValueError, lambda: c1.from_real(c1)) + assert c1.from_real(Interval(-1, 1)) == ComplexRegion(Interval(-1, 1) * FiniteSet(0), False) + + +def test_ComplexRegion_measure(): + a, b = Interval(2, 5), Interval(4, 8) + theta1, theta2 = Interval(0, 2*S.Pi), Interval(0, S.Pi) + c1 = ComplexRegion(a*b) + c2 = ComplexRegion(Union(a*theta1, b*theta2), polar=True) + + assert c1.measure == 12 + assert c2.measure == 9*pi + + +def test_normalize_theta_set(): + # Interval + assert normalize_theta_set(Interval(pi, 2*pi)) == \ + Union(FiniteSet(0), Interval.Ropen(pi, 2*pi)) + assert normalize_theta_set(Interval(pi*Rational(9, 2), 5*pi)) == Interval(pi/2, pi) + assert normalize_theta_set(Interval(pi*Rational(-3, 2), pi/2)) == Interval.Ropen(0, 2*pi) + assert normalize_theta_set(Interval.open(pi*Rational(-3, 2), pi/2)) == \ + Union(Interval.Ropen(0, pi/2), Interval.open(pi/2, 2*pi)) + assert normalize_theta_set(Interval.open(pi*Rational(-7, 2), pi*Rational(-3, 2))) == \ + Union(Interval.Ropen(0, pi/2), Interval.open(pi/2, 2*pi)) + assert normalize_theta_set(Interval(-pi/2, pi/2)) == \ + Union(Interval(0, pi/2), Interval.Ropen(pi*Rational(3, 2), 2*pi)) + assert normalize_theta_set(Interval.open(-pi/2, pi/2)) == \ + Union(Interval.Ropen(0, pi/2), Interval.open(pi*Rational(3, 2), 2*pi)) + assert normalize_theta_set(Interval(-4*pi, 3*pi)) == Interval.Ropen(0, 2*pi) + assert normalize_theta_set(Interval(pi*Rational(-3, 2), -pi/2)) == Interval(pi/2, pi*Rational(3, 2)) + assert normalize_theta_set(Interval.open(0, 2*pi)) == Interval.open(0, 2*pi) + assert normalize_theta_set(Interval.Ropen(-pi/2, pi/2)) == \ + Union(Interval.Ropen(0, pi/2), Interval.Ropen(pi*Rational(3, 2), 2*pi)) + assert normalize_theta_set(Interval.Lopen(-pi/2, pi/2)) == \ + Union(Interval(0, pi/2), Interval.open(pi*Rational(3, 2), 2*pi)) + assert normalize_theta_set(Interval(-pi/2, pi/2)) == \ + Union(Interval(0, pi/2), Interval.Ropen(pi*Rational(3, 2), 2*pi)) + assert normalize_theta_set(Interval.open(4*pi, pi*Rational(9, 2))) == Interval.open(0, pi/2) + assert normalize_theta_set(Interval.Lopen(4*pi, pi*Rational(9, 2))) == Interval.Lopen(0, pi/2) + assert normalize_theta_set(Interval.Ropen(4*pi, pi*Rational(9, 2))) == Interval.Ropen(0, pi/2) + assert normalize_theta_set(Interval.open(3*pi, 5*pi)) == \ + Union(Interval.Ropen(0, pi), Interval.open(pi, 2*pi)) + + # FiniteSet + assert normalize_theta_set(FiniteSet(0, pi, 3*pi)) == FiniteSet(0, pi) + assert normalize_theta_set(FiniteSet(0, pi/2, pi, 2*pi)) == FiniteSet(0, pi/2, pi) + assert normalize_theta_set(FiniteSet(0, -pi/2, -pi, -2*pi)) == FiniteSet(0, pi, pi*Rational(3, 2)) + assert normalize_theta_set(FiniteSet(pi*Rational(-3, 2), pi/2)) == \ + FiniteSet(pi/2) + assert normalize_theta_set(FiniteSet(2*pi)) == FiniteSet(0) + + # Unions + assert normalize_theta_set(Union(Interval(0, pi/3), Interval(pi/2, pi))) == \ + Union(Interval(0, pi/3), Interval(pi/2, pi)) + assert normalize_theta_set(Union(Interval(0, pi), Interval(2*pi, pi*Rational(7, 3)))) == \ + Interval(0, pi) + + # ValueError for non-real sets + raises(ValueError, lambda: normalize_theta_set(S.Complexes)) + + # NotImplementedError for subset of reals + raises(NotImplementedError, lambda: normalize_theta_set(Interval(0, 1))) + + # NotImplementedError without pi as coefficient + raises(NotImplementedError, lambda: normalize_theta_set(Interval(1, 2*pi))) + raises(NotImplementedError, lambda: normalize_theta_set(Interval(2*pi, 10))) + raises(NotImplementedError, lambda: normalize_theta_set(FiniteSet(0, 3, 3*pi))) + + +def test_ComplexRegion_FiniteSet(): + x, y, z, a, b, c = symbols('x y z a b c') + + # Issue #9669 + assert ComplexRegion(FiniteSet(a, b, c)*FiniteSet(x, y, z)) == \ + FiniteSet(a + I*x, a + I*y, a + I*z, b + I*x, b + I*y, + b + I*z, c + I*x, c + I*y, c + I*z) + assert ComplexRegion(FiniteSet(2)*FiniteSet(3)) == FiniteSet(2 + 3*I) + + +def test_union_RealSubSet(): + assert (S.Complexes).union(Interval(1, 2)) == S.Complexes + assert (S.Complexes).union(S.Integers) == S.Complexes + + +def test_SetKind_fancySet(): + G = lambda *args: ImageSet(Lambda(x, x ** 2), *args) + assert G(Interval(1, 4)).kind is SetKind(NumberKind) + assert G(FiniteSet(1, 4)).kind is SetKind(NumberKind) + assert S.Rationals.kind is SetKind(NumberKind) + assert S.Naturals.kind is SetKind(NumberKind) + assert S.Integers.kind is SetKind(NumberKind) + assert Range(3).kind is SetKind(NumberKind) + a = Interval(2, 3) + b = Interval(4, 6) + c1 = ComplexRegion(a*b) + assert c1.kind is SetKind(TupleKind(NumberKind, NumberKind)) + + +def test_issue_9980(): + c1 = ComplexRegion(Interval(1, 2)*Interval(2, 3)) + c2 = ComplexRegion(Interval(1, 5)*Interval(1, 3)) + R = Union(c1, c2) + assert simplify(R) == ComplexRegion(Union(Interval(1, 2)*Interval(2, 3), \ + Interval(1, 5)*Interval(1, 3)), False) + assert c1.func(*c1.args) == c1 + assert R.func(*R.args) == R + + +def test_issue_11732(): + interval12 = Interval(1, 2) + finiteset1234 = FiniteSet(1, 2, 3, 4) + pointComplex = Tuple(1, 5) + + assert (interval12 in S.Naturals) == False + assert (interval12 in S.Naturals0) == False + assert (interval12 in S.Integers) == False + assert (interval12 in S.Complexes) == False + + assert (finiteset1234 in S.Naturals) == False + assert (finiteset1234 in S.Naturals0) == False + assert (finiteset1234 in S.Integers) == False + assert (finiteset1234 in S.Complexes) == False + + assert (pointComplex in S.Naturals) == False + assert (pointComplex in S.Naturals0) == False + assert (pointComplex in S.Integers) == False + assert (pointComplex in S.Complexes) == True + + +def test_issue_11730(): + unit = Interval(0, 1) + square = ComplexRegion(unit ** 2) + + assert Union(S.Complexes, FiniteSet(oo)) != S.Complexes + assert Union(S.Complexes, FiniteSet(eye(4))) != S.Complexes + assert Union(unit, square) == square + assert Intersection(S.Reals, square) == unit + + +def test_issue_11938(): + unit = Interval(0, 1) + ival = Interval(1, 2) + cr1 = ComplexRegion(ival * unit) + + assert Intersection(cr1, S.Reals) == ival + assert Intersection(cr1, unit) == FiniteSet(1) + + arg1 = Interval(0, S.Pi) + arg2 = FiniteSet(S.Pi) + arg3 = Interval(S.Pi / 4, 3 * S.Pi / 4) + cp1 = ComplexRegion(unit * arg1, polar=True) + cp2 = ComplexRegion(unit * arg2, polar=True) + cp3 = ComplexRegion(unit * arg3, polar=True) + + assert Intersection(cp1, S.Reals) == Interval(-1, 1) + assert Intersection(cp2, S.Reals) == Interval(-1, 0) + assert Intersection(cp3, S.Reals) == FiniteSet(0) + + +def test_issue_11914(): + a, b = Interval(0, 1), Interval(0, pi) + c, d = Interval(2, 3), Interval(pi, 3 * pi / 2) + cp1 = ComplexRegion(a * b, polar=True) + cp2 = ComplexRegion(c * d, polar=True) + + assert -3 in cp1.union(cp2) + assert -3 in cp2.union(cp1) + assert -5 not in cp1.union(cp2) + + +def test_issue_9543(): + assert ImageSet(Lambda(x, x**2), S.Naturals).is_subset(S.Reals) + + +def test_issue_16871(): + assert ImageSet(Lambda(x, x), FiniteSet(1)) == {1} + assert ImageSet(Lambda(x, x - 3), S.Integers + ).intersection(S.Integers) is S.Integers + + +@XFAIL +def test_issue_16871b(): + assert ImageSet(Lambda(x, x - 3), S.Integers).is_subset(S.Integers) + + +def test_issue_18050(): + assert imageset(Lambda(x, I*x + 1), S.Integers + ) == ImageSet(Lambda(x, I*x + 1), S.Integers) + assert imageset(Lambda(x, 3*I*x + 4 + 8*I), S.Integers + ) == ImageSet(Lambda(x, 3*I*x + 4 + 2*I), S.Integers) + # no 'Mod' for next 2 tests: + assert imageset(Lambda(x, 2*x + 3*I), S.Integers + ) == ImageSet(Lambda(x, 2*x + 3*I), S.Integers) + r = Symbol('r', positive=True) + assert imageset(Lambda(x, r*x + 10), S.Integers + ) == ImageSet(Lambda(x, r*x + 10), S.Integers) + # reduce real part: + assert imageset(Lambda(x, 3*x + 8 + 5*I), S.Integers + ) == ImageSet(Lambda(x, 3*x + 2 + 5*I), S.Integers) + + +def test_Rationals(): + assert S.Integers.is_subset(S.Rationals) + assert S.Naturals.is_subset(S.Rationals) + assert S.Naturals0.is_subset(S.Rationals) + assert S.Rationals.is_subset(S.Reals) + assert S.Rationals.inf is -oo + assert S.Rationals.sup is oo + it = iter(S.Rationals) + assert [next(it) for i in range(12)] == [ + 0, 1, -1, S.Half, 2, Rational(-1, 2), -2, + Rational(1, 3), 3, Rational(-1, 3), -3, Rational(2, 3)] + assert Basic() not in S.Rationals + assert S.Half in S.Rationals + assert S.Rationals.contains(0.5) == Contains( + 0.5, S.Rationals, evaluate=False) + assert 2 in S.Rationals + r = symbols('r', rational=True) + assert r in S.Rationals + raises(TypeError, lambda: x in S.Rationals) + # issue #18134: + assert S.Rationals.boundary == S.Reals + assert S.Rationals.closure == S.Reals + assert S.Rationals.is_open == False + assert S.Rationals.is_closed == False + + +def test_NZQRC_unions(): + # check that all trivial number set unions are simplified: + nbrsets = (S.Naturals, S.Naturals0, S.Integers, S.Rationals, + S.Reals, S.Complexes) + unions = (Union(a, b) for a in nbrsets for b in nbrsets) + assert all(u.is_Union is False for u in unions) + + +def test_imageset_intersection(): + n = Dummy() + s = ImageSet(Lambda(n, -I*(I*(2*pi*n - pi/4) + + log(Abs(sqrt(-I))))), S.Integers) + assert s.intersect(S.Reals) == ImageSet( + Lambda(n, 2*pi*n + pi*Rational(7, 4)), S.Integers) + + +def test_issue_17858(): + assert 1 in Range(-oo, oo) + assert 0 in Range(oo, -oo, -1) + assert oo not in Range(-oo, oo) + assert -oo not in Range(-oo, oo) + +def test_issue_17859(): + r = Range(-oo,oo) + raises(ValueError,lambda: r[::2]) + raises(ValueError, lambda: r[::-2]) + r = Range(oo,-oo,-1) + raises(ValueError,lambda: r[::2]) + raises(ValueError, lambda: r[::-2]) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_ordinals.py b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_ordinals.py new file mode 100644 index 0000000000000000000000000000000000000000..973ca329586f3e904f9377c44022c266f81c805c --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_ordinals.py @@ -0,0 +1,67 @@ +from sympy.sets.ordinals import Ordinal, OmegaPower, ord0, omega +from sympy.testing.pytest import raises + +def test_string_ordinals(): + assert str(omega) == 'w' + assert str(Ordinal(OmegaPower(5, 3), OmegaPower(3, 2))) == 'w**5*3 + w**3*2' + assert str(Ordinal(OmegaPower(5, 3), OmegaPower(0, 5))) == 'w**5*3 + 5' + assert str(Ordinal(OmegaPower(1, 3), OmegaPower(0, 5))) == 'w*3 + 5' + assert str(Ordinal(OmegaPower(omega + 1, 1), OmegaPower(3, 2))) == 'w**(w + 1) + w**3*2' + +def test_addition_with_integers(): + assert 3 + Ordinal(OmegaPower(5, 3)) == Ordinal(OmegaPower(5, 3)) + assert Ordinal(OmegaPower(5, 3))+3 == Ordinal(OmegaPower(5, 3), OmegaPower(0, 3)) + assert Ordinal(OmegaPower(5, 3), OmegaPower(0, 2))+3 == \ + Ordinal(OmegaPower(5, 3), OmegaPower(0, 5)) + + +def test_addition_with_ordinals(): + assert Ordinal(OmegaPower(5, 3), OmegaPower(3, 2)) + Ordinal(OmegaPower(3, 3)) == \ + Ordinal(OmegaPower(5, 3), OmegaPower(3, 5)) + assert Ordinal(OmegaPower(5, 3), OmegaPower(3, 2)) + Ordinal(OmegaPower(4, 2)) == \ + Ordinal(OmegaPower(5, 3), OmegaPower(4, 2)) + assert Ordinal(OmegaPower(omega, 2), OmegaPower(3, 2)) + Ordinal(OmegaPower(4, 2)) == \ + Ordinal(OmegaPower(omega, 2), OmegaPower(4, 2)) + +def test_comparison(): + assert Ordinal(OmegaPower(5, 3)) > Ordinal(OmegaPower(4, 3), OmegaPower(2, 1)) + assert Ordinal(OmegaPower(5, 3), OmegaPower(3, 2)) < Ordinal(OmegaPower(5, 4)) + assert Ordinal(OmegaPower(5, 4)) < Ordinal(OmegaPower(5, 5), OmegaPower(4, 1)) + + assert Ordinal(OmegaPower(5, 3), OmegaPower(3, 2)) == \ + Ordinal(OmegaPower(5, 3), OmegaPower(3, 2)) + assert not Ordinal(OmegaPower(5, 3), OmegaPower(3, 2)) == Ordinal(OmegaPower(5, 3)) + assert Ordinal(OmegaPower(omega, 3)) > Ordinal(OmegaPower(5, 3)) + +def test_multiplication_with_integers(): + w = omega + assert 3*w == w + assert w*9 == Ordinal(OmegaPower(1, 9)) + +def test_multiplication(): + w = omega + assert w*(w + 1) == w*w + w + assert (w + 1)*(w + 1) == w*w + w + 1 + assert w*1 == w + assert 1*w == w + assert w*ord0 == ord0 + assert ord0*w == ord0 + assert w**w == w * w**w + assert (w**w)*w*w == w**(w + 2) + +def test_exponentiation(): + w = omega + assert w**2 == w*w + assert w**3 == w*w*w + assert w**(w + 1) == Ordinal(OmegaPower(omega + 1, 1)) + assert (w**w)*(w**w) == w**(w*2) + +def test_comapre_not_instance(): + w = OmegaPower(omega + 1, 1) + assert(not (w == None)) + assert(not (w < 5)) + raises(TypeError, lambda: w < 6.66) + +def test_is_successort(): + w = Ordinal(OmegaPower(5, 1)) + assert not w.is_successor_ordinal diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_powerset.py b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_powerset.py new file mode 100644 index 0000000000000000000000000000000000000000..2e3a407d565f6b9537a296af103ec0a4e137cff9 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_powerset.py @@ -0,0 +1,141 @@ +from sympy.core.expr import unchanged +from sympy.core.singleton import S +from sympy.core.symbol import Symbol +from sympy.sets.contains import Contains +from sympy.sets.fancysets import Interval +from sympy.sets.powerset import PowerSet +from sympy.sets.sets import FiniteSet +from sympy.testing.pytest import raises, XFAIL + + +def test_powerset_creation(): + assert unchanged(PowerSet, FiniteSet(1, 2)) + assert unchanged(PowerSet, S.EmptySet) + raises(ValueError, lambda: PowerSet(123)) + assert unchanged(PowerSet, S.Reals) + assert unchanged(PowerSet, S.Integers) + + +def test_powerset_rewrite_FiniteSet(): + assert PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) == \ + FiniteSet(S.EmptySet, FiniteSet(1), FiniteSet(2), FiniteSet(1, 2)) + assert PowerSet(S.EmptySet).rewrite(FiniteSet) == FiniteSet(S.EmptySet) + assert PowerSet(S.Naturals).rewrite(FiniteSet) == PowerSet(S.Naturals) + + +def test_finiteset_rewrite_powerset(): + assert FiniteSet(S.EmptySet).rewrite(PowerSet) == PowerSet(S.EmptySet) + assert FiniteSet( + S.EmptySet, FiniteSet(1), + FiniteSet(2), FiniteSet(1, 2)).rewrite(PowerSet) == \ + PowerSet(FiniteSet(1, 2)) + assert FiniteSet(1, 2, 3).rewrite(PowerSet) == FiniteSet(1, 2, 3) + + +def test_powerset__contains__(): + subset_series = [ + S.EmptySet, + FiniteSet(1, 2), + S.Naturals, + S.Naturals0, + S.Integers, + S.Rationals, + S.Reals, + S.Complexes] + + l = len(subset_series) + for i in range(l): + for j in range(l): + if i <= j: + assert subset_series[i] in \ + PowerSet(subset_series[j], evaluate=False) + else: + assert subset_series[i] not in \ + PowerSet(subset_series[j], evaluate=False) + + +@XFAIL +def test_failing_powerset__contains__(): + # XXX These are failing when evaluate=True, + # but using unevaluated PowerSet works fine. + assert FiniteSet(1, 2) not in PowerSet(S.EmptySet).rewrite(FiniteSet) + assert S.Naturals not in PowerSet(S.EmptySet).rewrite(FiniteSet) + assert S.Naturals not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) + assert S.Naturals0 not in PowerSet(S.EmptySet).rewrite(FiniteSet) + assert S.Naturals0 not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) + assert S.Integers not in PowerSet(S.EmptySet).rewrite(FiniteSet) + assert S.Integers not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) + assert S.Rationals not in PowerSet(S.EmptySet).rewrite(FiniteSet) + assert S.Rationals not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) + assert S.Reals not in PowerSet(S.EmptySet).rewrite(FiniteSet) + assert S.Reals not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) + assert S.Complexes not in PowerSet(S.EmptySet).rewrite(FiniteSet) + assert S.Complexes not in PowerSet(FiniteSet(1, 2)).rewrite(FiniteSet) + + +def test_powerset__len__(): + A = PowerSet(S.EmptySet, evaluate=False) + assert len(A) == 1 + A = PowerSet(A, evaluate=False) + assert len(A) == 2 + A = PowerSet(A, evaluate=False) + assert len(A) == 4 + A = PowerSet(A, evaluate=False) + assert len(A) == 16 + + +def test_powerset__iter__(): + a = PowerSet(FiniteSet(1, 2)).__iter__() + assert next(a) == S.EmptySet + assert next(a) == FiniteSet(1) + assert next(a) == FiniteSet(2) + assert next(a) == FiniteSet(1, 2) + + a = PowerSet(S.Naturals).__iter__() + assert next(a) == S.EmptySet + assert next(a) == FiniteSet(1) + assert next(a) == FiniteSet(2) + assert next(a) == FiniteSet(1, 2) + assert next(a) == FiniteSet(3) + assert next(a) == FiniteSet(1, 3) + assert next(a) == FiniteSet(2, 3) + assert next(a) == FiniteSet(1, 2, 3) + + +def test_powerset_contains(): + A = PowerSet(FiniteSet(1), evaluate=False) + assert A.contains(2) == Contains(2, A) + + x = Symbol('x') + + A = PowerSet(FiniteSet(x), evaluate=False) + assert A.contains(FiniteSet(1)) == Contains(FiniteSet(1), A) + + +def test_powerset_method(): + # EmptySet + A = FiniteSet() + pset = A.powerset() + assert len(pset) == 1 + assert pset == FiniteSet(S.EmptySet) + + # FiniteSets + A = FiniteSet(1, 2) + pset = A.powerset() + assert len(pset) == 2**len(A) + assert pset == FiniteSet(FiniteSet(), FiniteSet(1), + FiniteSet(2), A) + # Not finite sets + A = Interval(0, 1) + assert A.powerset() == PowerSet(A) + +def test_is_subset(): + # covers line 101-102 + # initialize powerset(1), which is a subset of powerset(1,2) + subset = PowerSet(FiniteSet(1)) + pset = PowerSet(FiniteSet(1, 2)) + bad_set = PowerSet(FiniteSet(2, 3)) + # assert "subset" is subset of pset == True + assert subset.is_subset(pset) + # assert "bad_set" is subset of pset == False + assert not pset.is_subset(bad_set) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_setexpr.py b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_setexpr.py new file mode 100644 index 0000000000000000000000000000000000000000..faab1261c8d3e86901b04d30e8bc94de31642b93 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_setexpr.py @@ -0,0 +1,317 @@ +from sympy.sets.setexpr import SetExpr +from sympy.sets import Interval, FiniteSet, Intersection, ImageSet, Union + +from sympy.core.expr import Expr +from sympy.core.function import Lambda +from sympy.core.numbers import (I, Rational, oo) +from sympy.core.singleton import S +from sympy.core.symbol import (Dummy, Symbol, symbols) +from sympy.functions.elementary.exponential import (exp, log) +from sympy.functions.elementary.miscellaneous import (Max, Min, sqrt) +from sympy.functions.elementary.trigonometric import cos +from sympy.sets.sets import Set + + +a, x = symbols("a, x") +_d = Dummy("d") + + +def test_setexpr(): + se = SetExpr(Interval(0, 1)) + assert isinstance(se.set, Set) + assert isinstance(se, Expr) + + +def test_scalar_funcs(): + assert SetExpr(Interval(0, 1)).set == Interval(0, 1) + a, b = Symbol('a', real=True), Symbol('b', real=True) + a, b = 1, 2 + # TODO: add support for more functions in the future: + for f in [exp, log]: + input_se = f(SetExpr(Interval(a, b))) + output = input_se.set + expected = Interval(Min(f(a), f(b)), Max(f(a), f(b))) + assert output == expected + + +def test_Add_Mul(): + assert (SetExpr(Interval(0, 1)) + 1).set == Interval(1, 2) + assert (SetExpr(Interval(0, 1))*2).set == Interval(0, 2) + + +def test_Pow(): + assert (SetExpr(Interval(0, 2))**2).set == Interval(0, 4) + + +def test_compound(): + assert (exp(SetExpr(Interval(0, 1))*2 + 1)).set == \ + Interval(exp(1), exp(3)) + + +def test_Interval_Interval(): + assert (SetExpr(Interval(1, 2)) + SetExpr(Interval(10, 20))).set == \ + Interval(11, 22) + assert (SetExpr(Interval(1, 2))*SetExpr(Interval(10, 20))).set == \ + Interval(10, 40) + + +def test_FiniteSet_FiniteSet(): + assert (SetExpr(FiniteSet(1, 2, 3)) + SetExpr(FiniteSet(1, 2))).set == \ + FiniteSet(2, 3, 4, 5) + assert (SetExpr(FiniteSet(1, 2, 3))*SetExpr(FiniteSet(1, 2))).set == \ + FiniteSet(1, 2, 3, 4, 6) + + +def test_Interval_FiniteSet(): + assert (SetExpr(FiniteSet(1, 2)) + SetExpr(Interval(0, 10))).set == \ + Interval(1, 12) + + +def test_Many_Sets(): + assert (SetExpr(Interval(0, 1)) + + SetExpr(Interval(2, 3)) + + SetExpr(FiniteSet(10, 11, 12))).set == Interval(12, 16) + + +def test_same_setexprs_are_not_identical(): + a = SetExpr(FiniteSet(0, 1)) + b = SetExpr(FiniteSet(0, 1)) + assert (a + b).set == FiniteSet(0, 1, 2) + + # Cannot detect the set being the same: + # assert (a + a).set == FiniteSet(0, 2) + + +def test_Interval_arithmetic(): + i12cc = SetExpr(Interval(1, 2)) + i12lo = SetExpr(Interval.Lopen(1, 2)) + i12ro = SetExpr(Interval.Ropen(1, 2)) + i12o = SetExpr(Interval.open(1, 2)) + + n23cc = SetExpr(Interval(-2, 3)) + n23lo = SetExpr(Interval.Lopen(-2, 3)) + n23ro = SetExpr(Interval.Ropen(-2, 3)) + n23o = SetExpr(Interval.open(-2, 3)) + + n3n2cc = SetExpr(Interval(-3, -2)) + + assert i12cc + i12cc == SetExpr(Interval(2, 4)) + assert i12cc - i12cc == SetExpr(Interval(-1, 1)) + assert i12cc*i12cc == SetExpr(Interval(1, 4)) + assert i12cc/i12cc == SetExpr(Interval(S.Half, 2)) + assert i12cc**2 == SetExpr(Interval(1, 4)) + assert i12cc**3 == SetExpr(Interval(1, 8)) + + assert i12lo + i12ro == SetExpr(Interval.open(2, 4)) + assert i12lo - i12ro == SetExpr(Interval.Lopen(-1, 1)) + assert i12lo*i12ro == SetExpr(Interval.open(1, 4)) + assert i12lo/i12ro == SetExpr(Interval.Lopen(S.Half, 2)) + assert i12lo + i12lo == SetExpr(Interval.Lopen(2, 4)) + assert i12lo - i12lo == SetExpr(Interval.open(-1, 1)) + assert i12lo*i12lo == SetExpr(Interval.Lopen(1, 4)) + assert i12lo/i12lo == SetExpr(Interval.open(S.Half, 2)) + assert i12lo + i12cc == SetExpr(Interval.Lopen(2, 4)) + assert i12lo - i12cc == SetExpr(Interval.Lopen(-1, 1)) + assert i12lo*i12cc == SetExpr(Interval.Lopen(1, 4)) + assert i12lo/i12cc == SetExpr(Interval.Lopen(S.Half, 2)) + assert i12lo + i12o == SetExpr(Interval.open(2, 4)) + assert i12lo - i12o == SetExpr(Interval.open(-1, 1)) + assert i12lo*i12o == SetExpr(Interval.open(1, 4)) + assert i12lo/i12o == SetExpr(Interval.open(S.Half, 2)) + assert i12lo**2 == SetExpr(Interval.Lopen(1, 4)) + assert i12lo**3 == SetExpr(Interval.Lopen(1, 8)) + + assert i12ro + i12ro == SetExpr(Interval.Ropen(2, 4)) + assert i12ro - i12ro == SetExpr(Interval.open(-1, 1)) + assert i12ro*i12ro == SetExpr(Interval.Ropen(1, 4)) + assert i12ro/i12ro == SetExpr(Interval.open(S.Half, 2)) + assert i12ro + i12cc == SetExpr(Interval.Ropen(2, 4)) + assert i12ro - i12cc == SetExpr(Interval.Ropen(-1, 1)) + assert i12ro*i12cc == SetExpr(Interval.Ropen(1, 4)) + assert i12ro/i12cc == SetExpr(Interval.Ropen(S.Half, 2)) + assert i12ro + i12o == SetExpr(Interval.open(2, 4)) + assert i12ro - i12o == SetExpr(Interval.open(-1, 1)) + assert i12ro*i12o == SetExpr(Interval.open(1, 4)) + assert i12ro/i12o == SetExpr(Interval.open(S.Half, 2)) + assert i12ro**2 == SetExpr(Interval.Ropen(1, 4)) + assert i12ro**3 == SetExpr(Interval.Ropen(1, 8)) + + assert i12o + i12lo == SetExpr(Interval.open(2, 4)) + assert i12o - i12lo == SetExpr(Interval.open(-1, 1)) + assert i12o*i12lo == SetExpr(Interval.open(1, 4)) + assert i12o/i12lo == SetExpr(Interval.open(S.Half, 2)) + assert i12o + i12ro == SetExpr(Interval.open(2, 4)) + assert i12o - i12ro == SetExpr(Interval.open(-1, 1)) + assert i12o*i12ro == SetExpr(Interval.open(1, 4)) + assert i12o/i12ro == SetExpr(Interval.open(S.Half, 2)) + assert i12o + i12cc == SetExpr(Interval.open(2, 4)) + assert i12o - i12cc == SetExpr(Interval.open(-1, 1)) + assert i12o*i12cc == SetExpr(Interval.open(1, 4)) + assert i12o/i12cc == SetExpr(Interval.open(S.Half, 2)) + assert i12o**2 == SetExpr(Interval.open(1, 4)) + assert i12o**3 == SetExpr(Interval.open(1, 8)) + + assert n23cc + n23cc == SetExpr(Interval(-4, 6)) + assert n23cc - n23cc == SetExpr(Interval(-5, 5)) + assert n23cc*n23cc == SetExpr(Interval(-6, 9)) + assert n23cc/n23cc == SetExpr(Interval.open(-oo, oo)) + assert n23cc + n23ro == SetExpr(Interval.Ropen(-4, 6)) + assert n23cc - n23ro == SetExpr(Interval.Lopen(-5, 5)) + assert n23cc*n23ro == SetExpr(Interval.Ropen(-6, 9)) + assert n23cc/n23ro == SetExpr(Interval.Lopen(-oo, oo)) + assert n23cc + n23lo == SetExpr(Interval.Lopen(-4, 6)) + assert n23cc - n23lo == SetExpr(Interval.Ropen(-5, 5)) + assert n23cc*n23lo == SetExpr(Interval(-6, 9)) + assert n23cc/n23lo == SetExpr(Interval.open(-oo, oo)) + assert n23cc + n23o == SetExpr(Interval.open(-4, 6)) + assert n23cc - n23o == SetExpr(Interval.open(-5, 5)) + assert n23cc*n23o == SetExpr(Interval.open(-6, 9)) + assert n23cc/n23o == SetExpr(Interval.open(-oo, oo)) + assert n23cc**2 == SetExpr(Interval(0, 9)) + assert n23cc**3 == SetExpr(Interval(-8, 27)) + + n32cc = SetExpr(Interval(-3, 2)) + n32lo = SetExpr(Interval.Lopen(-3, 2)) + n32ro = SetExpr(Interval.Ropen(-3, 2)) + assert n32cc*n32lo == SetExpr(Interval.Ropen(-6, 9)) + assert n32cc*n32cc == SetExpr(Interval(-6, 9)) + assert n32lo*n32cc == SetExpr(Interval.Ropen(-6, 9)) + assert n32cc*n32ro == SetExpr(Interval(-6, 9)) + assert n32lo*n32ro == SetExpr(Interval.Ropen(-6, 9)) + assert n32cc/n32lo == SetExpr(Interval.Ropen(-oo, oo)) + assert i12cc/n32lo == SetExpr(Interval.Ropen(-oo, oo)) + + assert n3n2cc**2 == SetExpr(Interval(4, 9)) + assert n3n2cc**3 == SetExpr(Interval(-27, -8)) + + assert n23cc + i12cc == SetExpr(Interval(-1, 5)) + assert n23cc - i12cc == SetExpr(Interval(-4, 2)) + assert n23cc*i12cc == SetExpr(Interval(-4, 6)) + assert n23cc/i12cc == SetExpr(Interval(-2, 3)) + + +def test_SetExpr_Intersection(): + x, y, z, w = symbols("x y z w") + set1 = Interval(x, y) + set2 = Interval(w, z) + inter = Intersection(set1, set2) + se = SetExpr(inter) + assert exp(se).set == Intersection( + ImageSet(Lambda(x, exp(x)), set1), + ImageSet(Lambda(x, exp(x)), set2)) + assert cos(se).set == ImageSet(Lambda(x, cos(x)), inter) + + +def test_SetExpr_Interval_div(): + # TODO: some expressions cannot be calculated due to bugs (currently + # commented): + assert SetExpr(Interval(-3, -2))/SetExpr(Interval(-2, 1)) == SetExpr(Interval(-oo, oo)) + assert SetExpr(Interval(2, 3))/SetExpr(Interval(-2, 2)) == SetExpr(Interval(-oo, oo)) + + assert SetExpr(Interval(-3, -2))/SetExpr(Interval(0, 4)) == SetExpr(Interval(-oo, Rational(-1, 2))) + assert SetExpr(Interval(2, 4))/SetExpr(Interval(-3, 0)) == SetExpr(Interval(-oo, Rational(-2, 3))) + assert SetExpr(Interval(2, 4))/SetExpr(Interval(0, 3)) == SetExpr(Interval(Rational(2, 3), oo)) + + # assert SetExpr(Interval(0, 1))/SetExpr(Interval(0, 1)) == SetExpr(Interval(0, oo)) + # assert SetExpr(Interval(-1, 0))/SetExpr(Interval(0, 1)) == SetExpr(Interval(-oo, 0)) + assert SetExpr(Interval(-1, 2))/SetExpr(Interval(-2, 2)) == SetExpr(Interval(-oo, oo)) + + assert 1/SetExpr(Interval(-1, 2)) == SetExpr(Union(Interval(-oo, -1), Interval(S.Half, oo))) + + assert 1/SetExpr(Interval(0, 2)) == SetExpr(Interval(S.Half, oo)) + assert (-1)/SetExpr(Interval(0, 2)) == SetExpr(Interval(-oo, Rational(-1, 2))) + assert 1/SetExpr(Interval(-oo, 0)) == SetExpr(Interval.open(-oo, 0)) + assert 1/SetExpr(Interval(-1, 0)) == SetExpr(Interval(-oo, -1)) + # assert (-2)/SetExpr(Interval(-oo, 0)) == SetExpr(Interval(0, oo)) + # assert 1/SetExpr(Interval(-oo, -1)) == SetExpr(Interval(-1, 0)) + + # assert SetExpr(Interval(1, 2))/a == Mul(SetExpr(Interval(1, 2)), 1/a, evaluate=False) + + # assert SetExpr(Interval(1, 2))/0 == SetExpr(Interval(1, 2))*zoo + # assert SetExpr(Interval(1, oo))/oo == SetExpr(Interval(0, oo)) + # assert SetExpr(Interval(1, oo))/(-oo) == SetExpr(Interval(-oo, 0)) + # assert SetExpr(Interval(-oo, -1))/oo == SetExpr(Interval(-oo, 0)) + # assert SetExpr(Interval(-oo, -1))/(-oo) == SetExpr(Interval(0, oo)) + # assert SetExpr(Interval(-oo, oo))/oo == SetExpr(Interval(-oo, oo)) + # assert SetExpr(Interval(-oo, oo))/(-oo) == SetExpr(Interval(-oo, oo)) + # assert SetExpr(Interval(-1, oo))/oo == SetExpr(Interval(0, oo)) + # assert SetExpr(Interval(-1, oo))/(-oo) == SetExpr(Interval(-oo, 0)) + # assert SetExpr(Interval(-oo, 1))/oo == SetExpr(Interval(-oo, 0)) + # assert SetExpr(Interval(-oo, 1))/(-oo) == SetExpr(Interval(0, oo)) + + +def test_SetExpr_Interval_pow(): + assert SetExpr(Interval(0, 2))**2 == SetExpr(Interval(0, 4)) + assert SetExpr(Interval(-1, 1))**2 == SetExpr(Interval(0, 1)) + assert SetExpr(Interval(1, 2))**2 == SetExpr(Interval(1, 4)) + assert SetExpr(Interval(-1, 2))**3 == SetExpr(Interval(-1, 8)) + assert SetExpr(Interval(-1, 1))**0 == SetExpr(FiniteSet(1)) + + + assert SetExpr(Interval(1, 2))**Rational(5, 2) == SetExpr(Interval(1, 4*sqrt(2))) + #assert SetExpr(Interval(-1, 2))**Rational(1, 3) == SetExpr(Interval(-1, 2**Rational(1, 3))) + #assert SetExpr(Interval(0, 2))**S.Half == SetExpr(Interval(0, sqrt(2))) + + #assert SetExpr(Interval(-4, 2))**Rational(2, 3) == SetExpr(Interval(0, 2*2**Rational(1, 3))) + + #assert SetExpr(Interval(-1, 5))**S.Half == SetExpr(Interval(0, sqrt(5))) + #assert SetExpr(Interval(-oo, 2))**S.Half == SetExpr(Interval(0, sqrt(2))) + #assert SetExpr(Interval(-2, 3))**(Rational(-1, 4)) == SetExpr(Interval(0, oo)) + + assert SetExpr(Interval(1, 5))**(-2) == SetExpr(Interval(Rational(1, 25), 1)) + assert SetExpr(Interval(-1, 3))**(-2) == SetExpr(Interval(0, oo)) + + assert SetExpr(Interval(0, 2))**(-2) == SetExpr(Interval(Rational(1, 4), oo)) + assert SetExpr(Interval(-1, 2))**(-3) == SetExpr(Union(Interval(-oo, -1), Interval(Rational(1, 8), oo))) + assert SetExpr(Interval(-3, -2))**(-3) == SetExpr(Interval(Rational(-1, 8), Rational(-1, 27))) + assert SetExpr(Interval(-3, -2))**(-2) == SetExpr(Interval(Rational(1, 9), Rational(1, 4))) + #assert SetExpr(Interval(0, oo))**S.Half == SetExpr(Interval(0, oo)) + #assert SetExpr(Interval(-oo, -1))**Rational(1, 3) == SetExpr(Interval(-oo, -1)) + #assert SetExpr(Interval(-2, 3))**(Rational(-1, 3)) == SetExpr(Interval(-oo, oo)) + + assert SetExpr(Interval(-oo, 0))**(-2) == SetExpr(Interval.open(0, oo)) + assert SetExpr(Interval(-2, 0))**(-2) == SetExpr(Interval(Rational(1, 4), oo)) + + assert SetExpr(Interval(Rational(1, 3), S.Half))**oo == SetExpr(FiniteSet(0)) + assert SetExpr(Interval(0, S.Half))**oo == SetExpr(FiniteSet(0)) + assert SetExpr(Interval(S.Half, 1))**oo == SetExpr(Interval(0, oo)) + assert SetExpr(Interval(0, 1))**oo == SetExpr(Interval(0, oo)) + assert SetExpr(Interval(2, 3))**oo == SetExpr(FiniteSet(oo)) + assert SetExpr(Interval(1, 2))**oo == SetExpr(Interval(0, oo)) + assert SetExpr(Interval(S.Half, 3))**oo == SetExpr(Interval(0, oo)) + assert SetExpr(Interval(Rational(-1, 3), Rational(-1, 4)))**oo == SetExpr(FiniteSet(0)) + assert SetExpr(Interval(-1, Rational(-1, 2)))**oo == SetExpr(Interval(-oo, oo)) + assert SetExpr(Interval(-3, -2))**oo == SetExpr(FiniteSet(-oo, oo)) + assert SetExpr(Interval(-2, -1))**oo == SetExpr(Interval(-oo, oo)) + assert SetExpr(Interval(-2, Rational(-1, 2)))**oo == SetExpr(Interval(-oo, oo)) + assert SetExpr(Interval(Rational(-1, 2), S.Half))**oo == SetExpr(FiniteSet(0)) + assert SetExpr(Interval(Rational(-1, 2), 1))**oo == SetExpr(Interval(0, oo)) + assert SetExpr(Interval(Rational(-2, 3), 2))**oo == SetExpr(Interval(0, oo)) + assert SetExpr(Interval(-1, 1))**oo == SetExpr(Interval(-oo, oo)) + assert SetExpr(Interval(-1, S.Half))**oo == SetExpr(Interval(-oo, oo)) + assert SetExpr(Interval(-1, 2))**oo == SetExpr(Interval(-oo, oo)) + assert SetExpr(Interval(-2, S.Half))**oo == SetExpr(Interval(-oo, oo)) + + assert (SetExpr(Interval(1, 2))**x).dummy_eq(SetExpr(ImageSet(Lambda(_d, _d**x), Interval(1, 2)))) + + assert SetExpr(Interval(2, 3))**(-oo) == SetExpr(FiniteSet(0)) + assert SetExpr(Interval(0, 2))**(-oo) == SetExpr(Interval(0, oo)) + assert (SetExpr(Interval(-1, 2))**(-oo)).dummy_eq(SetExpr(ImageSet(Lambda(_d, _d**(-oo)), Interval(-1, 2)))) + + +def test_SetExpr_Integers(): + assert SetExpr(S.Integers) + 1 == SetExpr(S.Integers) + assert (SetExpr(S.Integers) + I).dummy_eq( + SetExpr(ImageSet(Lambda(_d, _d + I), S.Integers))) + assert SetExpr(S.Integers)*(-1) == SetExpr(S.Integers) + assert (SetExpr(S.Integers)*2).dummy_eq( + SetExpr(ImageSet(Lambda(_d, 2*_d), S.Integers))) + assert (SetExpr(S.Integers)*I).dummy_eq( + SetExpr(ImageSet(Lambda(_d, I*_d), S.Integers))) + # issue #18050: + assert SetExpr(S.Integers)._eval_func(Lambda(x, I*x + 1)).dummy_eq( + SetExpr(ImageSet(Lambda(_d, I*_d + 1), S.Integers))) + # needs improvement: + assert (SetExpr(S.Integers)*I + 1).dummy_eq( + SetExpr(ImageSet(Lambda(x, x + 1), + ImageSet(Lambda(_d, _d*I), S.Integers)))) diff --git a/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_sets.py b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_sets.py new file mode 100644 index 0000000000000000000000000000000000000000..657ab19a90eb88ca48f266f7a5cf050504caed43 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/sets/tests/test_sets.py @@ -0,0 +1,1753 @@ +from sympy.concrete.summations import Sum +from sympy.core.add import Add +from sympy.core.containers import TupleKind +from sympy.core.function import Lambda +from sympy.core.kind import NumberKind, UndefinedKind +from sympy.core.numbers import (Float, I, Rational, nan, oo, pi, zoo) +from sympy.core.power import Pow +from sympy.core.singleton import S +from sympy.core.symbol import (Symbol, symbols) +from sympy.core.sympify import sympify +from sympy.functions.elementary.miscellaneous import (Max, Min, sqrt) +from sympy.functions.elementary.piecewise import Piecewise +from sympy.functions.elementary.trigonometric import (cos, sin) +from sympy.logic.boolalg import (false, true) +from sympy.matrices.kind import MatrixKind +from sympy.matrices.dense import Matrix +from sympy.polys.rootoftools import rootof +from sympy.sets.contains import Contains +from sympy.sets.fancysets import (ImageSet, Range) +from sympy.sets.sets import (Complement, DisjointUnion, FiniteSet, Intersection, Interval, ProductSet, Set, SymmetricDifference, Union, imageset, SetKind) +from mpmath import mpi + +from sympy.core.expr import unchanged +from sympy.core.relational import Eq, Ne, Le, Lt, LessThan +from sympy.logic import And, Or, Xor +from sympy.testing.pytest import raises, XFAIL, warns_deprecated_sympy +from sympy.utilities.iterables import cartes + +from sympy.abc import x, y, z, m, n + +EmptySet = S.EmptySet + +def test_imageset(): + ints = S.Integers + assert imageset(x, x - 1, S.Naturals) is S.Naturals0 + assert imageset(x, x + 1, S.Naturals0) is S.Naturals + assert imageset(x, abs(x), S.Naturals0) is S.Naturals0 + assert imageset(x, abs(x), S.Naturals) is S.Naturals + assert imageset(x, abs(x), S.Integers) is S.Naturals0 + # issue 16878a + r = symbols('r', real=True) + assert imageset(x, (x, x), S.Reals)._contains((1, r)) == None + assert imageset(x, (x, x), S.Reals)._contains((1, 2)) == False + assert (r, r) in imageset(x, (x, x), S.Reals) + assert 1 + I in imageset(x, x + I, S.Reals) + assert {1} not in imageset(x, (x,), S.Reals) + assert (1, 1) not in imageset(x, (x,), S.Reals) + raises(TypeError, lambda: imageset(x, ints)) + raises(ValueError, lambda: imageset(x, y, z, ints)) + raises(ValueError, lambda: imageset(Lambda(x, cos(x)), y)) + assert (1, 2) in imageset(Lambda((x, y), (x, y)), ints, ints) + raises(ValueError, lambda: imageset(Lambda(x, x), ints, ints)) + assert imageset(cos, ints) == ImageSet(Lambda(x, cos(x)), ints) + def f(x): + return cos(x) + assert imageset(f, ints) == imageset(x, cos(x), ints) + f = lambda x: cos(x) + assert imageset(f, ints) == ImageSet(Lambda(x, cos(x)), ints) + assert imageset(x, 1, ints) == FiniteSet(1) + assert imageset(x, y, ints) == {y} + assert imageset((x, y), (1, z), ints, S.Reals) == {(1, z)} + clash = Symbol('x', integer=true) + assert (str(imageset(lambda x: x + clash, Interval(-2, 1)).lamda.expr) + in ('x0 + x', 'x + x0')) + x1, x2 = symbols("x1, x2") + assert imageset(lambda x, y: + Add(x, y), Interval(1, 2), Interval(2, 3)).dummy_eq( + ImageSet(Lambda((x1, x2), x1 + x2), + Interval(1, 2), Interval(2, 3))) + + +def test_is_empty(): + for s in [S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals, + S.UniversalSet]: + assert s.is_empty is False + + assert S.EmptySet.is_empty is True + + +def test_is_finiteset(): + for s in [S.Naturals, S.Naturals0, S.Integers, S.Rationals, S.Reals, + S.UniversalSet]: + assert s.is_finite_set is False + + assert S.EmptySet.is_finite_set is True + + assert FiniteSet(1, 2).is_finite_set is True + assert Interval(1, 2).is_finite_set is False + assert Interval(x, y).is_finite_set is None + assert ProductSet(FiniteSet(1), FiniteSet(2)).is_finite_set is True + assert ProductSet(FiniteSet(1), Interval(1, 2)).is_finite_set is False + assert ProductSet(FiniteSet(1), Interval(x, y)).is_finite_set is None + assert Union(Interval(0, 1), Interval(2, 3)).is_finite_set is False + assert Union(FiniteSet(1), Interval(2, 3)).is_finite_set is False + assert Union(FiniteSet(1), FiniteSet(2)).is_finite_set is True + assert Union(FiniteSet(1), Interval(x, y)).is_finite_set is None + assert Intersection(Interval(x, y), FiniteSet(1)).is_finite_set is True + assert Intersection(Interval(x, y), Interval(1, 2)).is_finite_set is None + assert Intersection(FiniteSet(x), FiniteSet(y)).is_finite_set is True + assert Complement(FiniteSet(1), Interval(x, y)).is_finite_set is True + assert Complement(Interval(x, y), FiniteSet(1)).is_finite_set is None + assert Complement(Interval(1, 2), FiniteSet(x)).is_finite_set is False + assert DisjointUnion(Interval(-5, 3), FiniteSet(x, y)).is_finite_set is False + assert DisjointUnion(S.EmptySet, FiniteSet(x, y), S.EmptySet).is_finite_set is True + + +def test_deprecated_is_EmptySet(): + with warns_deprecated_sympy(): + S.EmptySet.is_EmptySet + + with warns_deprecated_sympy(): + FiniteSet(1).is_EmptySet + + +def test_interval_arguments(): + assert Interval(0, oo) == Interval(0, oo, False, True) + assert Interval(0, oo).right_open is true + assert Interval(-oo, 0) == Interval(-oo, 0, True, False) + assert Interval(-oo, 0).left_open is true + assert Interval(oo, -oo) == S.EmptySet + assert Interval(oo, oo) == S.EmptySet + assert Interval(-oo, -oo) == S.EmptySet + assert Interval(oo, x) == S.EmptySet + assert Interval(oo, oo) == S.EmptySet + assert Interval(x, -oo) == S.EmptySet + assert Interval(x, x) == {x} + + assert isinstance(Interval(1, 1), FiniteSet) + e = Sum(x, (x, 1, 3)) + assert isinstance(Interval(e, e), FiniteSet) + + assert Interval(1, 0) == S.EmptySet + assert Interval(1, 1).measure == 0 + + assert Interval(1, 1, False, True) == S.EmptySet + assert Interval(1, 1, True, False) == S.EmptySet + assert Interval(1, 1, True, True) == S.EmptySet + + + assert isinstance(Interval(0, Symbol('a')), Interval) + assert Interval(Symbol('a', positive=True), 0) == S.EmptySet + raises(ValueError, lambda: Interval(0, S.ImaginaryUnit)) + raises(ValueError, lambda: Interval(0, Symbol('z', extended_real=False))) + raises(ValueError, lambda: Interval(x, x + S.ImaginaryUnit)) + + raises(NotImplementedError, lambda: Interval(0, 1, And(x, y))) + raises(NotImplementedError, lambda: Interval(0, 1, False, And(x, y))) + raises(NotImplementedError, lambda: Interval(0, 1, z, And(x, y))) + + +def test_interval_symbolic_end_points(): + a = Symbol('a', real=True) + + assert Union(Interval(0, a), Interval(0, 3)).sup == Max(a, 3) + assert Union(Interval(a, 0), Interval(-3, 0)).inf == Min(-3, a) + + assert Interval(0, a).contains(1) == LessThan(1, a) + + +def test_interval_is_empty(): + x, y = symbols('x, y') + r = Symbol('r', real=True) + p = Symbol('p', positive=True) + n = Symbol('n', negative=True) + nn = Symbol('nn', nonnegative=True) + assert Interval(1, 2).is_empty == False + assert Interval(3, 3).is_empty == False # FiniteSet + assert Interval(r, r).is_empty == False # FiniteSet + assert Interval(r, r + nn).is_empty == False + assert Interval(x, x).is_empty == False + assert Interval(1, oo).is_empty == False + assert Interval(-oo, oo).is_empty == False + assert Interval(-oo, 1).is_empty == False + assert Interval(x, y).is_empty == None + assert Interval(r, oo).is_empty == False # real implies finite + assert Interval(n, 0).is_empty == False + assert Interval(n, 0, left_open=True).is_empty == False + assert Interval(p, 0).is_empty == True # EmptySet + assert Interval(nn, 0).is_empty == None + assert Interval(n, p).is_empty == False + assert Interval(0, p, left_open=True).is_empty == False + assert Interval(0, p, right_open=True).is_empty == False + assert Interval(0, nn, left_open=True).is_empty == None + assert Interval(0, nn, right_open=True).is_empty == None + + +def test_union(): + assert Union(Interval(1, 2), Interval(2, 3)) == Interval(1, 3) + assert Union(Interval(1, 2), Interval(2, 3, True)) == Interval(1, 3) + assert Union(Interval(1, 3), Interval(2, 4)) == Interval(1, 4) + assert Union(Interval(1, 2), Interval(1, 3)) == Interval(1, 3) + assert Union(Interval(1, 3), Interval(1, 2)) == Interval(1, 3) + assert Union(Interval(1, 3, False, True), Interval(1, 2)) == \ + Interval(1, 3, False, True) + assert Union(Interval(1, 3), Interval(1, 2, False, True)) == Interval(1, 3) + assert Union(Interval(1, 2, True), Interval(1, 3)) == Interval(1, 3) + assert Union(Interval(1, 2, True), Interval(1, 3, True)) == \ + Interval(1, 3, True) + assert Union(Interval(1, 2, True), Interval(1, 3, True, True)) == \ + Interval(1, 3, True, True) + assert Union(Interval(1, 2, True, True), Interval(1, 3, True)) == \ + Interval(1, 3, True) + assert Union(Interval(1, 3), Interval(2, 3)) == Interval(1, 3) + assert Union(Interval(1, 3, False, True), Interval(2, 3)) == \ + Interval(1, 3) + assert Union(Interval(1, 2, False, True), Interval(2, 3, True)) != \ + Interval(1, 3) + assert Union(Interval(1, 2), S.EmptySet) == Interval(1, 2) + assert Union(S.EmptySet) == S.EmptySet + + assert Union(Interval(0, 1), *[FiniteSet(1.0/n) for n in range(1, 10)]) == \ + Interval(0, 1) + # issue #18241: + x = Symbol('x') + assert Union(Interval(0, 1), FiniteSet(1, x)) == Union( + Interval(0, 1), FiniteSet(x)) + assert unchanged(Union, Interval(0, 1), FiniteSet(2, x)) + + assert Interval(1, 2).union(Interval(2, 3)) == \ + Interval(1, 2) + Interval(2, 3) + + assert Interval(1, 2).union(Interval(2, 3)) == Interval(1, 3) + + assert Union(Set()) == Set() + + assert FiniteSet(1) + FiniteSet(2) + FiniteSet(3) == FiniteSet(1, 2, 3) + assert FiniteSet('ham') + FiniteSet('eggs') == FiniteSet('ham', 'eggs') + assert FiniteSet(1, 2, 3) + S.EmptySet == FiniteSet(1, 2, 3) + + assert FiniteSet(1, 2, 3) & FiniteSet(2, 3, 4) == FiniteSet(2, 3) + assert FiniteSet(1, 2, 3) | FiniteSet(2, 3, 4) == FiniteSet(1, 2, 3, 4) + + assert FiniteSet(1, 2, 3) & S.EmptySet == S.EmptySet + assert FiniteSet(1, 2, 3) | S.EmptySet == FiniteSet(1, 2, 3) + + x = Symbol("x") + y = Symbol("y") + z = Symbol("z") + assert S.EmptySet | FiniteSet(x, FiniteSet(y, z)) == \ + FiniteSet(x, FiniteSet(y, z)) + + # Test that Intervals and FiniteSets play nicely + assert Interval(1, 3) + FiniteSet(2) == Interval(1, 3) + assert Interval(1, 3, True, True) + FiniteSet(3) == \ + Interval(1, 3, True, False) + X = Interval(1, 3) + FiniteSet(5) + Y = Interval(1, 2) + FiniteSet(3) + XandY = X.intersect(Y) + assert 2 in X and 3 in X and 3 in XandY + assert XandY.is_subset(X) and XandY.is_subset(Y) + + raises(TypeError, lambda: Union(1, 2, 3)) + + assert X.is_iterable is False + + # issue 7843 + assert Union(S.EmptySet, FiniteSet(-sqrt(-I), sqrt(-I))) == \ + FiniteSet(-sqrt(-I), sqrt(-I)) + + assert Union(S.Reals, S.Integers) == S.Reals + + +def test_union_iter(): + # Use Range because it is ordered + u = Union(Range(3), Range(5), Range(4), evaluate=False) + + # Round robin + assert list(u) == [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 4] + + +def test_union_is_empty(): + assert (Interval(x, y) + FiniteSet(1)).is_empty == False + assert (Interval(x, y) + Interval(-x, y)).is_empty == None + + +def test_difference(): + assert Interval(1, 3) - Interval(1, 2) == Interval(2, 3, True) + assert Interval(1, 3) - Interval(2, 3) == Interval(1, 2, False, True) + assert Interval(1, 3, True) - Interval(2, 3) == Interval(1, 2, True, True) + assert Interval(1, 3, True) - Interval(2, 3, True) == \ + Interval(1, 2, True, False) + assert Interval(0, 2) - FiniteSet(1) == \ + Union(Interval(0, 1, False, True), Interval(1, 2, True, False)) + + # issue #18119 + assert S.Reals - FiniteSet(I) == S.Reals + assert S.Reals - FiniteSet(-I, I) == S.Reals + assert Interval(0, 10) - FiniteSet(-I, I) == Interval(0, 10) + assert Interval(0, 10) - FiniteSet(1, I) == Union( + Interval.Ropen(0, 1), Interval.Lopen(1, 10)) + assert S.Reals - FiniteSet(1, 2 + I, x, y**2) == Complement( + Union(Interval.open(-oo, 1), Interval.open(1, oo)), FiniteSet(x, y**2), + evaluate=False) + + assert FiniteSet(1, 2, 3) - FiniteSet(2) == FiniteSet(1, 3) + assert FiniteSet('ham', 'eggs') - FiniteSet('eggs') == FiniteSet('ham') + assert FiniteSet(1, 2, 3, 4) - Interval(2, 10, True, False) == \ + FiniteSet(1, 2) + assert FiniteSet(1, 2, 3, 4) - S.EmptySet == FiniteSet(1, 2, 3, 4) + assert Union(Interval(0, 2), FiniteSet(2, 3, 4)) - Interval(1, 3) == \ + Union(Interval(0, 1, False, True), FiniteSet(4)) + + assert -1 in S.Reals - S.Naturals + + +def test_Complement(): + A = FiniteSet(1, 3, 4) + B = FiniteSet(3, 4) + C = Interval(1, 3) + D = Interval(1, 2) + + assert Complement(A, B, evaluate=False).is_iterable is True + assert Complement(A, C, evaluate=False).is_iterable is True + assert Complement(C, D, evaluate=False).is_iterable is None + + assert FiniteSet(*Complement(A, B, evaluate=False)) == FiniteSet(1) + assert FiniteSet(*Complement(A, C, evaluate=False)) == FiniteSet(4) + raises(TypeError, lambda: FiniteSet(*Complement(C, A, evaluate=False))) + + assert Complement(Interval(1, 3), Interval(1, 2)) == Interval(2, 3, True) + assert Complement(FiniteSet(1, 3, 4), FiniteSet(3, 4)) == FiniteSet(1) + assert Complement(Union(Interval(0, 2), FiniteSet(2, 3, 4)), + Interval(1, 3)) == \ + Union(Interval(0, 1, False, True), FiniteSet(4)) + + assert 3 not in Complement(Interval(0, 5), Interval(1, 4), evaluate=False) + assert -1 in Complement(S.Reals, S.Naturals, evaluate=False) + assert 1 not in Complement(S.Reals, S.Naturals, evaluate=False) + + assert Complement(S.Integers, S.UniversalSet) == EmptySet + assert S.UniversalSet.complement(S.Integers) == EmptySet + + assert (0 not in S.Reals.intersect(S.Integers - FiniteSet(0))) + + assert S.EmptySet - S.Integers == S.EmptySet + + assert (S.Integers - FiniteSet(0)) - FiniteSet(1) == S.Integers - FiniteSet(0, 1) + + assert S.Reals - Union(S.Naturals, FiniteSet(pi)) == \ + Intersection(S.Reals - S.Naturals, S.Reals - FiniteSet(pi)) + # issue 12712 + assert Complement(FiniteSet(x, y, 2), Interval(-10, 10)) == \ + Complement(FiniteSet(x, y), Interval(-10, 10)) + + A = FiniteSet(*symbols('a:c')) + B = FiniteSet(*symbols('d:f')) + assert unchanged(Complement, ProductSet(A, A), B) + + A2 = ProductSet(A, A) + B3 = ProductSet(B, B, B) + assert A2 - B3 == A2 + assert B3 - A2 == B3 + + +def test_set_operations_nonsets(): + '''Tests that e.g. FiniteSet(1) * 2 raises TypeError''' + ops = [ + lambda a, b: a + b, + lambda a, b: a - b, + lambda a, b: a * b, + lambda a, b: a / b, + lambda a, b: a // b, + lambda a, b: a | b, + lambda a, b: a & b, + lambda a, b: a ^ b, + # FiniteSet(1) ** 2 gives a ProductSet + #lambda a, b: a ** b, + ] + Sx = FiniteSet(x) + Sy = FiniteSet(y) + sets = [ + {1}, + FiniteSet(1), + Interval(1, 2), + Union(Sx, Interval(1, 2)), + Intersection(Sx, Sy), + Complement(Sx, Sy), + ProductSet(Sx, Sy), + S.EmptySet, + ] + nums = [0, 1, 2, S(0), S(1), S(2)] + + for si in sets: + for ni in nums: + for op in ops: + raises(TypeError, lambda : op(si, ni)) + raises(TypeError, lambda : op(ni, si)) + raises(TypeError, lambda: si ** object()) + raises(TypeError, lambda: si ** {1}) + + +def test_complement(): + assert Complement({1, 2}, {1}) == {2} + assert Interval(0, 1).complement(S.Reals) == \ + Union(Interval(-oo, 0, True, True), Interval(1, oo, True, True)) + assert Interval(0, 1, True, False).complement(S.Reals) == \ + Union(Interval(-oo, 0, True, False), Interval(1, oo, True, True)) + assert Interval(0, 1, False, True).complement(S.Reals) == \ + Union(Interval(-oo, 0, True, True), Interval(1, oo, False, True)) + assert Interval(0, 1, True, True).complement(S.Reals) == \ + Union(Interval(-oo, 0, True, False), Interval(1, oo, False, True)) + + assert S.UniversalSet.complement(S.EmptySet) == S.EmptySet + assert S.UniversalSet.complement(S.Reals) == S.EmptySet + assert S.UniversalSet.complement(S.UniversalSet) == S.EmptySet + + assert S.EmptySet.complement(S.Reals) == S.Reals + + assert Union(Interval(0, 1), Interval(2, 3)).complement(S.Reals) == \ + Union(Interval(-oo, 0, True, True), Interval(1, 2, True, True), + Interval(3, oo, True, True)) + + assert FiniteSet(0).complement(S.Reals) == \ + Union(Interval(-oo, 0, True, True), Interval(0, oo, True, True)) + + assert (FiniteSet(5) + Interval(S.NegativeInfinity, + 0)).complement(S.Reals) == \ + Interval(0, 5, True, True) + Interval(5, S.Infinity, True, True) + + assert FiniteSet(1, 2, 3).complement(S.Reals) == \ + Interval(S.NegativeInfinity, 1, True, True) + \ + Interval(1, 2, True, True) + Interval(2, 3, True, True) +\ + Interval(3, S.Infinity, True, True) + + assert FiniteSet(x).complement(S.Reals) == Complement(S.Reals, FiniteSet(x)) + + assert FiniteSet(0, x).complement(S.Reals) == Complement(Interval(-oo, 0, True, True) + + Interval(0, oo, True, True) + , FiniteSet(x), evaluate=False) + + square = Interval(0, 1) * Interval(0, 1) + notsquare = square.complement(S.Reals*S.Reals) + + assert all(pt in square for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)]) + assert not any( + pt in notsquare for pt in [(0, 0), (.5, .5), (1, 0), (1, 1)]) + assert not any(pt in square for pt in [(-1, 0), (1.5, .5), (10, 10)]) + assert all(pt in notsquare for pt in [(-1, 0), (1.5, .5), (10, 10)]) + + +def test_intersect1(): + assert all(S.Integers.intersection(i) is i for i in + (S.Naturals, S.Naturals0)) + assert all(i.intersection(S.Integers) is i for i in + (S.Naturals, S.Naturals0)) + s = S.Naturals0 + assert S.Naturals.intersection(s) is S.Naturals + assert s.intersection(S.Naturals) is S.Naturals + x = Symbol('x') + assert Interval(0, 2).intersect(Interval(1, 2)) == Interval(1, 2) + assert Interval(0, 2).intersect(Interval(1, 2, True)) == \ + Interval(1, 2, True) + assert Interval(0, 2, True).intersect(Interval(1, 2)) == \ + Interval(1, 2, False, False) + assert Interval(0, 2, True, True).intersect(Interval(1, 2)) == \ + Interval(1, 2, False, True) + assert Interval(0, 2).intersect(Union(Interval(0, 1), Interval(2, 3))) == \ + Union(Interval(0, 1), Interval(2, 2)) + + assert FiniteSet(1, 2).intersect(FiniteSet(1, 2, 3)) == FiniteSet(1, 2) + assert FiniteSet(1, 2, x).intersect(FiniteSet(x)) == FiniteSet(x) + assert FiniteSet('ham', 'eggs').intersect(FiniteSet('ham')) == \ + FiniteSet('ham') + assert FiniteSet(1, 2, 3, 4, 5).intersect(S.EmptySet) == S.EmptySet + + assert Interval(0, 5).intersect(FiniteSet(1, 3)) == FiniteSet(1, 3) + assert Interval(0, 1, True, True).intersect(FiniteSet(1)) == S.EmptySet + + assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2)) == \ + Union(Interval(1, 1), Interval(2, 2)) + assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(0, 2)) == \ + Union(Interval(0, 1), Interval(2, 2)) + assert Union(Interval(0, 1), Interval(2, 3)).intersect(Interval(1, 2, True, True)) == \ + S.EmptySet + assert Union(Interval(0, 1), Interval(2, 3)).intersect(S.EmptySet) == \ + S.EmptySet + assert Union(Interval(0, 5), FiniteSet('ham')).intersect(FiniteSet(2, 3, 4, 5, 6)) == \ + Intersection(FiniteSet(2, 3, 4, 5, 6), Union(FiniteSet('ham'), Interval(0, 5))) + assert Intersection(FiniteSet(1, 2, 3), Interval(2, x), Interval(3, y)) == \ + Intersection(FiniteSet(3), Interval(2, x), Interval(3, y), evaluate=False) + assert Intersection(FiniteSet(1, 2), Interval(0, 3), Interval(x, y)) == \ + Intersection({1, 2}, Interval(x, y), evaluate=False) + assert Intersection(FiniteSet(1, 2, 4), Interval(0, 3), Interval(x, y)) == \ + Intersection({1, 2}, Interval(x, y), evaluate=False) + # XXX: Is the real=True necessary here? + # https://github.com/sympy/sympy/issues/17532 + m, n = symbols('m, n', real=True) + assert Intersection(FiniteSet(m), FiniteSet(m, n), Interval(m, m+1)) == \ + FiniteSet(m) + + # issue 8217 + assert Intersection(FiniteSet(x), FiniteSet(y)) == \ + Intersection(FiniteSet(x), FiniteSet(y), evaluate=False) + assert FiniteSet(x).intersect(S.Reals) == \ + Intersection(S.Reals, FiniteSet(x), evaluate=False) + + # tests for the intersection alias + assert Interval(0, 5).intersection(FiniteSet(1, 3)) == FiniteSet(1, 3) + assert Interval(0, 1, True, True).intersection(FiniteSet(1)) == S.EmptySet + + assert Union(Interval(0, 1), Interval(2, 3)).intersection(Interval(1, 2)) == \ + Union(Interval(1, 1), Interval(2, 2)) + + # canonical boundary selected + a = sqrt(2*sqrt(6) + 5) + b = sqrt(2) + sqrt(3) + assert Interval(a, 4).intersection(Interval(b, 5)) == Interval(b, 4) + assert Interval(1, a).intersection(Interval(0, b)) == Interval(1, b) + + +def test_intersection_interval_float(): + # intersection of Intervals with mixed Rational/Float boundaries should + # lead to Float boundaries in all cases regardless of which Interval is + # open or closed. + typs = [ + (Interval, Interval, Interval), + (Interval, Interval.open, Interval.open), + (Interval, Interval.Lopen, Interval.Lopen), + (Interval, Interval.Ropen, Interval.Ropen), + (Interval.open, Interval.open, Interval.open), + (Interval.open, Interval.Lopen, Interval.open), + (Interval.open, Interval.Ropen, Interval.open), + (Interval.Lopen, Interval.Lopen, Interval.Lopen), + (Interval.Lopen, Interval.Ropen, Interval.open), + (Interval.Ropen, Interval.Ropen, Interval.Ropen), + ] + + as_float = lambda a1, a2: a2 if isinstance(a2, float) else a1 + + for t1, t2, t3 in typs: + for t1i, t2i in [(t1, t2), (t2, t1)]: + for a1, a2, b1, b2 in cartes([2, 2.0], [2, 2.0], [3, 3.0], [3, 3.0]): + I1 = t1(a1, b1) + I2 = t2(a2, b2) + I3 = t3(as_float(a1, a2), as_float(b1, b2)) + assert I1.intersect(I2) == I3 + + +def test_intersection(): + # iterable + i = Intersection(FiniteSet(1, 2, 3), Interval(2, 5), evaluate=False) + assert i.is_iterable + assert set(i) == {S(2), S(3)} + + # challenging intervals + x = Symbol('x', real=True) + i = Intersection(Interval(0, 3), Interval(x, 6)) + assert (5 in i) is False + raises(TypeError, lambda: 2 in i) + + # Singleton special cases + assert Intersection(Interval(0, 1), S.EmptySet) == S.EmptySet + assert Intersection(Interval(-oo, oo), Interval(-oo, x)) == Interval(-oo, x) + + # Products + line = Interval(0, 5) + i = Intersection(line**2, line**3, evaluate=False) + assert (2, 2) not in i + assert (2, 2, 2) not in i + raises(TypeError, lambda: list(i)) + + a = Intersection(Intersection(S.Integers, S.Naturals, evaluate=False), S.Reals, evaluate=False) + assert a._argset == frozenset([Intersection(S.Naturals, S.Integers, evaluate=False), S.Reals]) + + assert Intersection(S.Complexes, FiniteSet(S.ComplexInfinity)) == S.EmptySet + + # issue 12178 + assert Intersection() == S.UniversalSet + + # issue 16987 + assert Intersection({1}, {1}, {x}) == Intersection({1}, {x}) + + +def test_issue_9623(): + n = Symbol('n') + + a = S.Reals + b = Interval(0, oo) + c = FiniteSet(n) + + assert Intersection(a, b, c) == Intersection(b, c) + assert Intersection(Interval(1, 2), Interval(3, 4), FiniteSet(n)) == EmptySet + + +def test_is_disjoint(): + assert Interval(0, 2).is_disjoint(Interval(1, 2)) == False + assert Interval(0, 2).is_disjoint(Interval(3, 4)) == True + + +def test_ProductSet__len__(): + A = FiniteSet(1, 2) + B = FiniteSet(1, 2, 3) + assert ProductSet(A).__len__() == 2 + assert ProductSet(A).__len__() is not S(2) + assert ProductSet(A, B).__len__() == 6 + assert ProductSet(A, B).__len__() is not S(6) + + +def test_ProductSet(): + # ProductSet is always a set of Tuples + assert ProductSet(S.Reals) == S.Reals ** 1 + assert ProductSet(S.Reals, S.Reals) == S.Reals ** 2 + assert ProductSet(S.Reals, S.Reals, S.Reals) == S.Reals ** 3 + + assert ProductSet(S.Reals) != S.Reals + assert ProductSet(S.Reals, S.Reals) == S.Reals * S.Reals + assert ProductSet(S.Reals, S.Reals, S.Reals) != S.Reals * S.Reals * S.Reals + assert ProductSet(S.Reals, S.Reals, S.Reals) == (S.Reals * S.Reals * S.Reals).flatten() + + assert 1 not in ProductSet(S.Reals) + assert (1,) in ProductSet(S.Reals) + + assert 1 not in ProductSet(S.Reals, S.Reals) + assert (1, 2) in ProductSet(S.Reals, S.Reals) + assert (1, I) not in ProductSet(S.Reals, S.Reals) + + assert (1, 2, 3) in ProductSet(S.Reals, S.Reals, S.Reals) + assert (1, 2, 3) in S.Reals ** 3 + assert (1, 2, 3) not in S.Reals * S.Reals * S.Reals + assert ((1, 2), 3) in S.Reals * S.Reals * S.Reals + assert (1, (2, 3)) not in S.Reals * S.Reals * S.Reals + assert (1, (2, 3)) in S.Reals * (S.Reals * S.Reals) + + assert ProductSet() == FiniteSet(()) + assert ProductSet(S.Reals, S.EmptySet) == S.EmptySet + + # See GH-17458 + + for ni in range(5): + Rn = ProductSet(*(S.Reals,) * ni) + assert (1,) * ni in Rn + assert 1 not in Rn + + assert (S.Reals * S.Reals) * S.Reals != S.Reals * (S.Reals * S.Reals) + + S1 = S.Reals + S2 = S.Integers + x1 = pi + x2 = 3 + assert x1 in S1 + assert x2 in S2 + assert (x1, x2) in S1 * S2 + S3 = S1 * S2 + x3 = (x1, x2) + assert x3 in S3 + assert (x3, x3) in S3 * S3 + assert x3 + x3 not in S3 * S3 + + raises(ValueError, lambda: S.Reals**-1) + with warns_deprecated_sympy(): + ProductSet(FiniteSet(s) for s in range(2)) + raises(TypeError, lambda: ProductSet(None)) + + S1 = FiniteSet(1, 2) + S2 = FiniteSet(3, 4) + S3 = ProductSet(S1, S2) + assert (S3.as_relational(x, y) + == And(S1.as_relational(x), S2.as_relational(y)) + == And(Or(Eq(x, 1), Eq(x, 2)), Or(Eq(y, 3), Eq(y, 4)))) + raises(ValueError, lambda: S3.as_relational(x)) + raises(ValueError, lambda: S3.as_relational(x, 1)) + raises(ValueError, lambda: ProductSet(Interval(0, 1)).as_relational(x, y)) + + Z2 = ProductSet(S.Integers, S.Integers) + assert Z2.contains((1, 2)) is S.true + assert Z2.contains((1,)) is S.false + assert Z2.contains(x) == Contains(x, Z2, evaluate=False) + assert Z2.contains(x).subs(x, 1) is S.false + assert Z2.contains((x, 1)).subs(x, 2) is S.true + assert Z2.contains((x, y)) == Contains(x, S.Integers) & Contains(y, S.Integers) + assert unchanged(Contains, (x, y), Z2) + assert Contains((1, 2), Z2) is S.true + + +def test_ProductSet_of_single_arg_is_not_arg(): + assert unchanged(ProductSet, Interval(0, 1)) + assert unchanged(ProductSet, ProductSet(Interval(0, 1))) + + +def test_ProductSet_is_empty(): + assert ProductSet(S.Integers, S.Reals).is_empty == False + assert ProductSet(Interval(x, 1), S.Reals).is_empty == None + + +def test_interval_subs(): + a = Symbol('a', real=True) + + assert Interval(0, a).subs(a, 2) == Interval(0, 2) + assert Interval(a, 0).subs(a, 2) == S.EmptySet + + +def test_interval_to_mpi(): + assert Interval(0, 1).to_mpi() == mpi(0, 1) + assert Interval(0, 1, True, False).to_mpi() == mpi(0, 1) + assert type(Interval(0, 1).to_mpi()) == type(mpi(0, 1)) + + +def test_set_evalf(): + assert Interval(S(11)/64, S.Half).evalf() == Interval( + Float('0.171875'), Float('0.5')) + assert Interval(x, S.Half, right_open=True).evalf() == Interval( + x, Float('0.5'), right_open=True) + assert Interval(-oo, S.Half).evalf() == Interval(-oo, Float('0.5')) + assert FiniteSet(2, x).evalf() == FiniteSet(Float('2.0'), x) + + +def test_measure(): + a = Symbol('a', real=True) + + assert Interval(1, 3).measure == 2 + assert Interval(0, a).measure == a + assert Interval(1, a).measure == a - 1 + + assert Union(Interval(1, 2), Interval(3, 4)).measure == 2 + assert Union(Interval(1, 2), Interval(3, 4), FiniteSet(5, 6, 7)).measure \ + == 2 + + assert FiniteSet(1, 2, oo, a, -oo, -5).measure == 0 + + assert S.EmptySet.measure == 0 + + square = Interval(0, 10) * Interval(0, 10) + offsetsquare = Interval(5, 15) * Interval(5, 15) + band = Interval(-oo, oo) * Interval(2, 4) + + assert square.measure == offsetsquare.measure == 100 + assert (square + offsetsquare).measure == 175 # there is some overlap + assert (square - offsetsquare).measure == 75 + assert (square * FiniteSet(1, 2, 3)).measure == 0 + assert (square.intersect(band)).measure == 20 + assert (square + band).measure is oo + assert (band * FiniteSet(1, 2, 3)).measure is nan + + +def test_is_subset(): + assert Interval(0, 1).is_subset(Interval(0, 2)) is True + assert Interval(0, 3).is_subset(Interval(0, 2)) is False + assert Interval(0, 1).is_subset(FiniteSet(0, 1)) is False + + assert FiniteSet(1, 2).is_subset(FiniteSet(1, 2, 3, 4)) + assert FiniteSet(4, 5).is_subset(FiniteSet(1, 2, 3, 4)) is False + assert FiniteSet(1).is_subset(Interval(0, 2)) + assert FiniteSet(1, 2).is_subset(Interval(0, 2, True, True)) is False + assert (Interval(1, 2) + FiniteSet(3)).is_subset( + Interval(0, 2, False, True) + FiniteSet(2, 3)) + + assert Interval(3, 4).is_subset(Union(Interval(0, 1), Interval(2, 5))) is True + assert Interval(3, 6).is_subset(Union(Interval(0, 1), Interval(2, 5))) is False + + assert FiniteSet(1, 2, 3, 4).is_subset(Interval(0, 5)) is True + assert S.EmptySet.is_subset(FiniteSet(1, 2, 3)) is True + + assert Interval(0, 1).is_subset(S.EmptySet) is False + assert S.EmptySet.is_subset(S.EmptySet) is True + + raises(ValueError, lambda: S.EmptySet.is_subset(1)) + + # tests for the issubset alias + assert FiniteSet(1, 2, 3, 4).issubset(Interval(0, 5)) is True + assert S.EmptySet.issubset(FiniteSet(1, 2, 3)) is True + + assert S.Naturals.is_subset(S.Integers) + assert S.Naturals0.is_subset(S.Integers) + + assert FiniteSet(x).is_subset(FiniteSet(y)) is None + assert FiniteSet(x).is_subset(FiniteSet(y).subs(y, x)) is True + assert FiniteSet(x).is_subset(FiniteSet(y).subs(y, x+1)) is False + + assert Interval(0, 1).is_subset(Interval(0, 1, left_open=True)) is False + assert Interval(-2, 3).is_subset(Union(Interval(-oo, -2), Interval(3, oo))) is False + + n = Symbol('n', integer=True) + assert Range(-3, 4, 1).is_subset(FiniteSet(-10, 10)) is False + assert Range(S(10)**100).is_subset(FiniteSet(0, 1, 2)) is False + assert Range(6, 0, -2).is_subset(FiniteSet(2, 4, 6)) is True + assert Range(1, oo).is_subset(FiniteSet(1, 2)) is False + assert Range(-oo, 1).is_subset(FiniteSet(1)) is False + assert Range(3).is_subset(FiniteSet(0, 1, n)) is None + assert Range(n, n + 2).is_subset(FiniteSet(n, n + 1)) is True + assert Range(5).is_subset(Interval(0, 4, right_open=True)) is False + #issue 19513 + assert imageset(Lambda(n, 1/n), S.Integers).is_subset(S.Reals) is None + +def test_is_proper_subset(): + assert Interval(0, 1).is_proper_subset(Interval(0, 2)) is True + assert Interval(0, 3).is_proper_subset(Interval(0, 2)) is False + assert S.EmptySet.is_proper_subset(FiniteSet(1, 2, 3)) is True + + raises(ValueError, lambda: Interval(0, 1).is_proper_subset(0)) + + +def test_is_superset(): + assert Interval(0, 1).is_superset(Interval(0, 2)) == False + assert Interval(0, 3).is_superset(Interval(0, 2)) + + assert FiniteSet(1, 2).is_superset(FiniteSet(1, 2, 3, 4)) == False + assert FiniteSet(4, 5).is_superset(FiniteSet(1, 2, 3, 4)) == False + assert FiniteSet(1).is_superset(Interval(0, 2)) == False + assert FiniteSet(1, 2).is_superset(Interval(0, 2, True, True)) == False + assert (Interval(1, 2) + FiniteSet(3)).is_superset( + Interval(0, 2, False, True) + FiniteSet(2, 3)) == False + + assert Interval(3, 4).is_superset(Union(Interval(0, 1), Interval(2, 5))) == False + + assert FiniteSet(1, 2, 3, 4).is_superset(Interval(0, 5)) == False + assert S.EmptySet.is_superset(FiniteSet(1, 2, 3)) == False + + assert Interval(0, 1).is_superset(S.EmptySet) == True + assert S.EmptySet.is_superset(S.EmptySet) == True + + raises(ValueError, lambda: S.EmptySet.is_superset(1)) + + # tests for the issuperset alias + assert Interval(0, 1).issuperset(S.EmptySet) == True + assert S.EmptySet.issuperset(S.EmptySet) == True + + +def test_is_proper_superset(): + assert Interval(0, 1).is_proper_superset(Interval(0, 2)) is False + assert Interval(0, 3).is_proper_superset(Interval(0, 2)) is True + assert FiniteSet(1, 2, 3).is_proper_superset(S.EmptySet) is True + + raises(ValueError, lambda: Interval(0, 1).is_proper_superset(0)) + + +def test_contains(): + assert Interval(0, 2).contains(1) is S.true + assert Interval(0, 2).contains(3) is S.false + assert Interval(0, 2, True, False).contains(0) is S.false + assert Interval(0, 2, True, False).contains(2) is S.true + assert Interval(0, 2, False, True).contains(0) is S.true + assert Interval(0, 2, False, True).contains(2) is S.false + assert Interval(0, 2, True, True).contains(0) is S.false + assert Interval(0, 2, True, True).contains(2) is S.false + + assert (Interval(0, 2) in Interval(0, 2)) is False + + assert FiniteSet(1, 2, 3).contains(2) is S.true + assert FiniteSet(1, 2, Symbol('x')).contains(Symbol('x')) is S.true + + assert FiniteSet(y)._contains(x) == Eq(y, x, evaluate=False) + raises(TypeError, lambda: x in FiniteSet(y)) + assert FiniteSet({x, y})._contains({x}) == Eq({x, y}, {x}, evaluate=False) + assert FiniteSet({x, y}).subs(y, x)._contains({x}) is S.true + assert FiniteSet({x, y}).subs(y, x+1)._contains({x}) is S.false + + # issue 8197 + from sympy.abc import a, b + assert FiniteSet(b).contains(-a) == Eq(b, -a) + assert FiniteSet(b).contains(a) == Eq(b, a) + assert FiniteSet(a).contains(1) == Eq(a, 1) + raises(TypeError, lambda: 1 in FiniteSet(a)) + + # issue 8209 + rad1 = Pow(Pow(2, Rational(1, 3)) - 1, Rational(1, 3)) + rad2 = Pow(Rational(1, 9), Rational(1, 3)) - Pow(Rational(2, 9), Rational(1, 3)) + Pow(Rational(4, 9), Rational(1, 3)) + s1 = FiniteSet(rad1) + s2 = FiniteSet(rad2) + assert s1 - s2 == S.EmptySet + + items = [1, 2, S.Infinity, S('ham'), -1.1] + fset = FiniteSet(*items) + assert all(item in fset for item in items) + assert all(fset.contains(item) is S.true for item in items) + + assert Union(Interval(0, 1), Interval(2, 5)).contains(3) is S.true + assert Union(Interval(0, 1), Interval(2, 5)).contains(6) is S.false + assert Union(Interval(0, 1), FiniteSet(2, 5)).contains(3) is S.false + + assert S.EmptySet.contains(1) is S.false + assert FiniteSet(rootof(x**3 + x - 1, 0)).contains(S.Infinity) is S.false + + assert rootof(x**5 + x**3 + 1, 0) in S.Reals + assert not rootof(x**5 + x**3 + 1, 1) in S.Reals + + # non-bool results + assert Union(Interval(1, 2), Interval(3, 4)).contains(x) == \ + Or(And(S.One <= x, x <= 2), And(S(3) <= x, x <= 4)) + assert Intersection(Interval(1, x), Interval(2, 3)).contains(y) == \ + And(y <= 3, y <= x, S.One <= y, S(2) <= y) + + assert (S.Complexes).contains(S.ComplexInfinity) == S.false + + +def test_interval_symbolic(): + x = Symbol('x') + e = Interval(0, 1) + assert e.contains(x) == And(S.Zero <= x, x <= 1) + raises(TypeError, lambda: x in e) + e = Interval(0, 1, True, True) + assert e.contains(x) == And(S.Zero < x, x < 1) + c = Symbol('c', real=False) + assert Interval(x, x + 1).contains(c) == False + e = Symbol('e', extended_real=True) + assert Interval(-oo, oo).contains(e) == And( + S.NegativeInfinity < e, e < S.Infinity) + + +def test_union_contains(): + x = Symbol('x') + i1 = Interval(0, 1) + i2 = Interval(2, 3) + i3 = Union(i1, i2) + assert i3.as_relational(x) == Or(And(S.Zero <= x, x <= 1), And(S(2) <= x, x <= 3)) + raises(TypeError, lambda: x in i3) + e = i3.contains(x) + assert e == i3.as_relational(x) + assert e.subs(x, -0.5) is false + assert e.subs(x, 0.5) is true + assert e.subs(x, 1.5) is false + assert e.subs(x, 2.5) is true + assert e.subs(x, 3.5) is false + + U = Interval(0, 2, True, True) + Interval(10, oo) + FiniteSet(-1, 2, 5, 6) + assert all(el not in U for el in [0, 4, -oo]) + assert all(el in U for el in [2, 5, 10]) + + +def test_is_number(): + assert Interval(0, 1).is_number is False + assert Set().is_number is False + + +def test_Interval_is_left_unbounded(): + assert Interval(3, 4).is_left_unbounded is False + assert Interval(-oo, 3).is_left_unbounded is True + assert Interval(Float("-inf"), 3).is_left_unbounded is True + + +def test_Interval_is_right_unbounded(): + assert Interval(3, 4).is_right_unbounded is False + assert Interval(3, oo).is_right_unbounded is True + assert Interval(3, Float("+inf")).is_right_unbounded is True + + +def test_Interval_as_relational(): + x = Symbol('x') + + assert Interval(-1, 2, False, False).as_relational(x) == \ + And(Le(-1, x), Le(x, 2)) + assert Interval(-1, 2, True, False).as_relational(x) == \ + And(Lt(-1, x), Le(x, 2)) + assert Interval(-1, 2, False, True).as_relational(x) == \ + And(Le(-1, x), Lt(x, 2)) + assert Interval(-1, 2, True, True).as_relational(x) == \ + And(Lt(-1, x), Lt(x, 2)) + + assert Interval(-oo, 2, right_open=False).as_relational(x) == And(Lt(-oo, x), Le(x, 2)) + assert Interval(-oo, 2, right_open=True).as_relational(x) == And(Lt(-oo, x), Lt(x, 2)) + + assert Interval(-2, oo, left_open=False).as_relational(x) == And(Le(-2, x), Lt(x, oo)) + assert Interval(-2, oo, left_open=True).as_relational(x) == And(Lt(-2, x), Lt(x, oo)) + + assert Interval(-oo, oo).as_relational(x) == And(Lt(-oo, x), Lt(x, oo)) + x = Symbol('x', real=True) + y = Symbol('y', real=True) + assert Interval(x, y).as_relational(x) == (x <= y) + assert Interval(y, x).as_relational(x) == (y <= x) + + +def test_Finite_as_relational(): + x = Symbol('x') + y = Symbol('y') + + assert FiniteSet(1, 2).as_relational(x) == Or(Eq(x, 1), Eq(x, 2)) + assert FiniteSet(y, -5).as_relational(x) == Or(Eq(x, y), Eq(x, -5)) + + +def test_Union_as_relational(): + x = Symbol('x') + assert (Interval(0, 1) + FiniteSet(2)).as_relational(x) == \ + Or(And(Le(0, x), Le(x, 1)), Eq(x, 2)) + assert (Interval(0, 1, True, True) + FiniteSet(1)).as_relational(x) == \ + And(Lt(0, x), Le(x, 1)) + assert Or(x < 0, x > 0).as_set().as_relational(x) == \ + And((x > -oo), (x < oo), Ne(x, 0)) + assert (Interval.Ropen(1, 3) + Interval.Lopen(3, 5) + ).as_relational(x) == And(Ne(x,3),(x>=1),(x<=5)) + + +def test_Intersection_as_relational(): + x = Symbol('x') + assert (Intersection(Interval(0, 1), FiniteSet(2), + evaluate=False).as_relational(x) + == And(And(Le(0, x), Le(x, 1)), Eq(x, 2))) + + +def test_Complement_as_relational(): + x = Symbol('x') + expr = Complement(Interval(0, 1), FiniteSet(2), evaluate=False) + assert expr.as_relational(x) == \ + And(Le(0, x), Le(x, 1), Ne(x, 2)) + + +@XFAIL +def test_Complement_as_relational_fail(): + x = Symbol('x') + expr = Complement(Interval(0, 1), FiniteSet(2), evaluate=False) + # XXX This example fails because 0 <= x changes to x >= 0 + # during the evaluation. + assert expr.as_relational(x) == \ + (0 <= x) & (x <= 1) & Ne(x, 2) + + +def test_SymmetricDifference_as_relational(): + x = Symbol('x') + expr = SymmetricDifference(Interval(0, 1), FiniteSet(2), evaluate=False) + assert expr.as_relational(x) == Xor(Eq(x, 2), Le(0, x) & Le(x, 1)) + + +def test_EmptySet(): + assert S.EmptySet.as_relational(Symbol('x')) is S.false + assert S.EmptySet.intersect(S.UniversalSet) == S.EmptySet + assert S.EmptySet.boundary == S.EmptySet + + +def test_finite_basic(): + x = Symbol('x') + A = FiniteSet(1, 2, 3) + B = FiniteSet(3, 4, 5) + AorB = Union(A, B) + AandB = A.intersect(B) + assert A.is_subset(AorB) and B.is_subset(AorB) + assert AandB.is_subset(A) + assert AandB == FiniteSet(3) + + assert A.inf == 1 and A.sup == 3 + assert AorB.inf == 1 and AorB.sup == 5 + assert FiniteSet(x, 1, 5).sup == Max(x, 5) + assert FiniteSet(x, 1, 5).inf == Min(x, 1) + + # issue 7335 + assert FiniteSet(S.EmptySet) != S.EmptySet + assert FiniteSet(FiniteSet(1, 2, 3)) != FiniteSet(1, 2, 3) + assert FiniteSet((1, 2, 3)) != FiniteSet(1, 2, 3) + + # Ensure a variety of types can exist in a FiniteSet + assert FiniteSet((1, 2), A, -5, x, 'eggs', x**2) + + assert (A > B) is False + assert (A >= B) is False + assert (A < B) is False + assert (A <= B) is False + assert AorB > A and AorB > B + assert AorB >= A and AorB >= B + assert A >= A and A <= A + assert A >= AandB and B >= AandB + assert A > AandB and B > AandB + + +def test_product_basic(): + H, T = 'H', 'T' + unit_line = Interval(0, 1) + d6 = FiniteSet(1, 2, 3, 4, 5, 6) + d4 = FiniteSet(1, 2, 3, 4) + coin = FiniteSet(H, T) + + square = unit_line * unit_line + + assert (0, 0) in square + assert 0 not in square + assert (H, T) in coin ** 2 + assert (.5, .5, .5) in (square * unit_line).flatten() + assert ((.5, .5), .5) in square * unit_line + assert (H, 3, 3) in (coin * d6 * d6).flatten() + assert ((H, 3), 3) in coin * d6 * d6 + HH, TT = sympify(H), sympify(T) + assert set(coin**2) == {(HH, HH), (HH, TT), (TT, HH), (TT, TT)} + + assert (d4*d4).is_subset(d6*d6) + + assert square.complement(Interval(-oo, oo)*Interval(-oo, oo)) == Union( + (Interval(-oo, 0, True, True) + + Interval(1, oo, True, True))*Interval(-oo, oo), + Interval(-oo, oo)*(Interval(-oo, 0, True, True) + + Interval(1, oo, True, True))) + + assert (Interval(-5, 5)**3).is_subset(Interval(-10, 10)**3) + assert not (Interval(-10, 10)**3).is_subset(Interval(-5, 5)**3) + assert not (Interval(-5, 5)**2).is_subset(Interval(-10, 10)**3) + + assert (Interval(.2, .5)*FiniteSet(.5)).is_subset(square) # segment in square + + assert len(coin*coin*coin) == 8 + assert len(S.EmptySet*S.EmptySet) == 0 + assert len(S.EmptySet*coin) == 0 + raises(TypeError, lambda: len(coin*Interval(0, 2))) + + +def test_real(): + x = Symbol('x', real=True) + + I = Interval(0, 5) + J = Interval(10, 20) + A = FiniteSet(1, 2, 30, x, S.Pi) + B = FiniteSet(-4, 0) + C = FiniteSet(100) + D = FiniteSet('Ham', 'Eggs') + + assert all(s.is_subset(S.Reals) for s in [I, J, A, B, C]) + assert not D.is_subset(S.Reals) + assert all((a + b).is_subset(S.Reals) for a in [I, J, A, B, C] for b in [I, J, A, B, C]) + assert not any((a + D).is_subset(S.Reals) for a in [I, J, A, B, C, D]) + + assert not (I + A + D).is_subset(S.Reals) + + +def test_supinf(): + x = Symbol('x', real=True) + y = Symbol('y', real=True) + + assert (Interval(0, 1) + FiniteSet(2)).sup == 2 + assert (Interval(0, 1) + FiniteSet(2)).inf == 0 + assert (Interval(0, 1) + FiniteSet(x)).sup == Max(1, x) + assert (Interval(0, 1) + FiniteSet(x)).inf == Min(0, x) + assert FiniteSet(5, 1, x).sup == Max(5, x) + assert FiniteSet(5, 1, x).inf == Min(1, x) + assert FiniteSet(5, 1, x, y).sup == Max(5, x, y) + assert FiniteSet(5, 1, x, y).inf == Min(1, x, y) + assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).sup == \ + S.Infinity + assert FiniteSet(5, 1, x, y, S.Infinity, S.NegativeInfinity).inf == \ + S.NegativeInfinity + assert FiniteSet('Ham', 'Eggs').sup == Max('Ham', 'Eggs') + + +def test_universalset(): + U = S.UniversalSet + x = Symbol('x') + assert U.as_relational(x) is S.true + assert U.union(Interval(2, 4)) == U + + assert U.intersect(Interval(2, 4)) == Interval(2, 4) + assert U.measure is S.Infinity + assert U.boundary == S.EmptySet + assert U.contains(0) is S.true + + +def test_Union_of_ProductSets_shares(): + line = Interval(0, 2) + points = FiniteSet(0, 1, 2) + assert Union(line * line, line * points) == line * line + + +def test_Interval_free_symbols(): + # issue 6211 + assert Interval(0, 1).free_symbols == set() + x = Symbol('x', real=True) + assert Interval(0, x).free_symbols == {x} + + +def test_image_interval(): + x = Symbol('x', real=True) + a = Symbol('a', real=True) + assert imageset(x, 2*x, Interval(-2, 1)) == Interval(-4, 2) + assert imageset(x, 2*x, Interval(-2, 1, True, False)) == \ + Interval(-4, 2, True, False) + assert imageset(x, x**2, Interval(-2, 1, True, False)) == \ + Interval(0, 4, False, True) + assert imageset(x, x**2, Interval(-2, 1)) == Interval(0, 4) + assert imageset(x, x**2, Interval(-2, 1, True, False)) == \ + Interval(0, 4, False, True) + assert imageset(x, x**2, Interval(-2, 1, True, True)) == \ + Interval(0, 4, False, True) + assert imageset(x, (x - 2)**2, Interval(1, 3)) == Interval(0, 1) + assert imageset(x, 3*x**4 - 26*x**3 + 78*x**2 - 90*x, Interval(0, 4)) == \ + Interval(-35, 0) # Multiple Maxima + assert imageset(x, x + 1/x, Interval(-oo, oo)) == Interval(-oo, -2) \ + + Interval(2, oo) # Single Infinite discontinuity + assert imageset(x, 1/x + 1/(x-1)**2, Interval(0, 2, True, False)) == \ + Interval(Rational(3, 2), oo, False) # Multiple Infinite discontinuities + + # Test for Python lambda + assert imageset(lambda x: 2*x, Interval(-2, 1)) == Interval(-4, 2) + + assert imageset(Lambda(x, a*x), Interval(0, 1)) == \ + ImageSet(Lambda(x, a*x), Interval(0, 1)) + + assert imageset(Lambda(x, sin(cos(x))), Interval(0, 1)) == \ + ImageSet(Lambda(x, sin(cos(x))), Interval(0, 1)) + + +def test_image_piecewise(): + f = Piecewise((x, x <= -1), (1/x**2, x <= 5), (x**3, True)) + f1 = Piecewise((0, x <= 1), (1, x <= 2), (2, True)) + assert imageset(x, f, Interval(-5, 5)) == Union(Interval(-5, -1), Interval(Rational(1, 25), oo)) + assert imageset(x, f1, Interval(1, 2)) == FiniteSet(0, 1) + + +@XFAIL # See: https://github.com/sympy/sympy/pull/2723#discussion_r8659826 +def test_image_Intersection(): + x = Symbol('x', real=True) + y = Symbol('y', real=True) + assert imageset(x, x**2, Interval(-2, 0).intersect(Interval(x, y))) == \ + Interval(0, 4).intersect(Interval(Min(x**2, y**2), Max(x**2, y**2))) + + +def test_image_FiniteSet(): + x = Symbol('x', real=True) + assert imageset(x, 2*x, FiniteSet(1, 2, 3)) == FiniteSet(2, 4, 6) + + +def test_image_Union(): + x = Symbol('x', real=True) + assert imageset(x, x**2, Interval(-2, 0) + FiniteSet(1, 2, 3)) == \ + (Interval(0, 4) + FiniteSet(9)) + + +def test_image_EmptySet(): + x = Symbol('x', real=True) + assert imageset(x, 2*x, S.EmptySet) == S.EmptySet + + +def test_issue_5724_7680(): + assert I not in S.Reals # issue 7680 + assert Interval(-oo, oo).contains(I) is S.false + + +def test_boundary(): + assert FiniteSet(1).boundary == FiniteSet(1) + assert all(Interval(0, 1, left_open, right_open).boundary == FiniteSet(0, 1) + for left_open in (true, false) for right_open in (true, false)) + + +def test_boundary_Union(): + assert (Interval(0, 1) + Interval(2, 3)).boundary == FiniteSet(0, 1, 2, 3) + assert ((Interval(0, 1, False, True) + + Interval(1, 2, True, False)).boundary == FiniteSet(0, 1, 2)) + + assert (Interval(0, 1) + FiniteSet(2)).boundary == FiniteSet(0, 1, 2) + assert Union(Interval(0, 10), Interval(5, 15), evaluate=False).boundary \ + == FiniteSet(0, 15) + + assert Union(Interval(0, 10), Interval(0, 1), evaluate=False).boundary \ + == FiniteSet(0, 10) + assert Union(Interval(0, 10, True, True), + Interval(10, 15, True, True), evaluate=False).boundary \ + == FiniteSet(0, 10, 15) + + +@XFAIL +def test_union_boundary_of_joining_sets(): + """ Testing the boundary of unions is a hard problem """ + assert Union(Interval(0, 10), Interval(10, 15), evaluate=False).boundary \ + == FiniteSet(0, 15) + + +def test_boundary_ProductSet(): + open_square = Interval(0, 1, True, True) ** 2 + assert open_square.boundary == (FiniteSet(0, 1) * Interval(0, 1) + + Interval(0, 1) * FiniteSet(0, 1)) + + second_square = Interval(1, 2, True, True) * Interval(0, 1, True, True) + assert (open_square + second_square).boundary == ( + FiniteSet(0, 1) * Interval(0, 1) + + FiniteSet(1, 2) * Interval(0, 1) + + Interval(0, 1) * FiniteSet(0, 1) + + Interval(1, 2) * FiniteSet(0, 1)) + + +def test_boundary_ProductSet_line(): + line_in_r2 = Interval(0, 1) * FiniteSet(0) + assert line_in_r2.boundary == line_in_r2 + + +def test_is_open(): + assert Interval(0, 1, False, False).is_open is False + assert Interval(0, 1, True, False).is_open is False + assert Interval(0, 1, True, True).is_open is True + assert FiniteSet(1, 2, 3).is_open is False + + +def test_is_closed(): + assert Interval(0, 1, False, False).is_closed is True + assert Interval(0, 1, True, False).is_closed is False + assert FiniteSet(1, 2, 3).is_closed is True + + +def test_closure(): + assert Interval(0, 1, False, True).closure == Interval(0, 1, False, False) + + +def test_interior(): + assert Interval(0, 1, False, True).interior == Interval(0, 1, True, True) + + +def test_issue_7841(): + raises(TypeError, lambda: x in S.Reals) + + +def test_Eq(): + assert Eq(Interval(0, 1), Interval(0, 1)) + assert Eq(Interval(0, 1), Interval(0, 2)) == False + + s1 = FiniteSet(0, 1) + s2 = FiniteSet(1, 2) + + assert Eq(s1, s1) + assert Eq(s1, s2) == False + + assert Eq(s1*s2, s1*s2) + assert Eq(s1*s2, s2*s1) == False + + assert unchanged(Eq, FiniteSet({x, y}), FiniteSet({x})) + assert Eq(FiniteSet({x, y}).subs(y, x), FiniteSet({x})) is S.true + assert Eq(FiniteSet({x, y}), FiniteSet({x})).subs(y, x) is S.true + assert Eq(FiniteSet({x, y}).subs(y, x+1), FiniteSet({x})) is S.false + assert Eq(FiniteSet({x, y}), FiniteSet({x})).subs(y, x+1) is S.false + + assert Eq(ProductSet({1}, {2}), Interval(1, 2)) is S.false + assert Eq(ProductSet({1}), ProductSet({1}, {2})) is S.false + + assert Eq(FiniteSet(()), FiniteSet(1)) is S.false + assert Eq(ProductSet(), FiniteSet(1)) is S.false + + i1 = Interval(0, 1) + i2 = Interval(x, y) + assert unchanged(Eq, ProductSet(i1, i1), ProductSet(i2, i2)) + + +def test_SymmetricDifference(): + A = FiniteSet(0, 1, 2, 3, 4, 5) + B = FiniteSet(2, 4, 6, 8, 10) + C = Interval(8, 10) + + assert SymmetricDifference(A, B, evaluate=False).is_iterable is True + assert SymmetricDifference(A, C, evaluate=False).is_iterable is None + assert FiniteSet(*SymmetricDifference(A, B, evaluate=False)) == \ + FiniteSet(0, 1, 3, 5, 6, 8, 10) + raises(TypeError, + lambda: FiniteSet(*SymmetricDifference(A, C, evaluate=False))) + + assert SymmetricDifference(FiniteSet(0, 1, 2, 3, 4, 5), \ + FiniteSet(2, 4, 6, 8, 10)) == FiniteSet(0, 1, 3, 5, 6, 8, 10) + assert SymmetricDifference(FiniteSet(2, 3, 4), FiniteSet(2, 3, 4 ,5)) \ + == FiniteSet(5) + assert FiniteSet(1, 2, 3, 4, 5) ^ FiniteSet(1, 2, 5, 6) == \ + FiniteSet(3, 4, 6) + assert Set(S(1), S(2), S(3)) ^ Set(S(2), S(3), S(4)) == Union(Set(S(1), S(2), S(3)) - Set(S(2), S(3), S(4)), \ + Set(S(2), S(3), S(4)) - Set(S(1), S(2), S(3))) + assert Interval(0, 4) ^ Interval(2, 5) == Union(Interval(0, 4) - \ + Interval(2, 5), Interval(2, 5) - Interval(0, 4)) + + +def test_issue_9536(): + from sympy.functions.elementary.exponential import log + a = Symbol('a', real=True) + assert FiniteSet(log(a)).intersect(S.Reals) == Intersection(S.Reals, FiniteSet(log(a))) + + +def test_issue_9637(): + n = Symbol('n') + a = FiniteSet(n) + b = FiniteSet(2, n) + assert Complement(S.Reals, a) == Complement(S.Reals, a, evaluate=False) + assert Complement(Interval(1, 3), a) == Complement(Interval(1, 3), a, evaluate=False) + assert Complement(Interval(1, 3), b) == \ + Complement(Union(Interval(1, 2, False, True), Interval(2, 3, True, False)), a) + assert Complement(a, S.Reals) == Complement(a, S.Reals, evaluate=False) + assert Complement(a, Interval(1, 3)) == Complement(a, Interval(1, 3), evaluate=False) + + +def test_issue_9808(): + # See https://github.com/sympy/sympy/issues/16342 + assert Complement(FiniteSet(y), FiniteSet(1)) == Complement(FiniteSet(y), FiniteSet(1), evaluate=False) + assert Complement(FiniteSet(1, 2, x), FiniteSet(x, y, 2, 3)) == \ + Complement(FiniteSet(1), FiniteSet(y), evaluate=False) + + +def test_issue_9956(): + assert Union(Interval(-oo, oo), FiniteSet(1)) == Interval(-oo, oo) + assert Interval(-oo, oo).contains(1) is S.true + + +def test_issue_Symbol_inter(): + i = Interval(0, oo) + r = S.Reals + mat = Matrix([0, 0, 0]) + assert Intersection(r, i, FiniteSet(m), FiniteSet(m, n)) == \ + Intersection(i, FiniteSet(m)) + assert Intersection(FiniteSet(1, m, n), FiniteSet(m, n, 2), i) == \ + Intersection(i, FiniteSet(m, n)) + assert Intersection(FiniteSet(m, n, x), FiniteSet(m, z), r) == \ + Intersection(Intersection({m, z}, {m, n, x}), r) + assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, x), r) == \ + Intersection(FiniteSet(3, m, n), FiniteSet(m, n, x), r, evaluate=False) + assert Intersection(FiniteSet(m, n, 3), FiniteSet(m, n, 2, 3), r) == \ + Intersection(FiniteSet(3, m, n), r) + assert Intersection(r, FiniteSet(mat, 2, n), FiniteSet(0, mat, n)) == \ + Intersection(r, FiniteSet(n)) + assert Intersection(FiniteSet(sin(x), cos(x)), FiniteSet(sin(x), cos(x), 1), r) == \ + Intersection(r, FiniteSet(sin(x), cos(x))) + assert Intersection(FiniteSet(x**2, 1, sin(x)), FiniteSet(x**2, 2, sin(x)), r) == \ + Intersection(r, FiniteSet(x**2, sin(x))) + + +def test_issue_11827(): + assert S.Naturals0**4 + + +def test_issue_10113(): + f = x**2/(x**2 - 4) + assert imageset(x, f, S.Reals) == Union(Interval(-oo, 0), Interval(1, oo, True, True)) + assert imageset(x, f, Interval(-2, 2)) == Interval(-oo, 0) + assert imageset(x, f, Interval(-2, 3)) == Union(Interval(-oo, 0), Interval(Rational(9, 5), oo)) + + +def test_issue_10248(): + raises( + TypeError, lambda: list(Intersection(S.Reals, FiniteSet(x))) + ) + A = Symbol('A', real=True) + assert list(Intersection(S.Reals, FiniteSet(A))) == [A] + + +def test_issue_9447(): + a = Interval(0, 1) + Interval(2, 3) + assert Complement(S.UniversalSet, a) == Complement( + S.UniversalSet, Union(Interval(0, 1), Interval(2, 3)), evaluate=False) + assert Complement(S.Naturals, a) == Complement( + S.Naturals, Union(Interval(0, 1), Interval(2, 3)), evaluate=False) + + +def test_issue_10337(): + assert (FiniteSet(2) == 3) is False + assert (FiniteSet(2) != 3) is True + raises(TypeError, lambda: FiniteSet(2) < 3) + raises(TypeError, lambda: FiniteSet(2) <= 3) + raises(TypeError, lambda: FiniteSet(2) > 3) + raises(TypeError, lambda: FiniteSet(2) >= 3) + + +def test_issue_10326(): + bad = [ + EmptySet, + FiniteSet(1), + Interval(1, 2), + S.ComplexInfinity, + S.ImaginaryUnit, + S.Infinity, + S.NaN, + S.NegativeInfinity, + ] + interval = Interval(0, 5) + for i in bad: + assert i not in interval + + x = Symbol('x', real=True) + nr = Symbol('nr', extended_real=False) + assert x + 1 in Interval(x, x + 4) + assert nr not in Interval(x, x + 4) + assert Interval(1, 2) in FiniteSet(Interval(0, 5), Interval(1, 2)) + assert Interval(-oo, oo).contains(oo) is S.false + assert Interval(-oo, oo).contains(-oo) is S.false + + +def test_issue_2799(): + U = S.UniversalSet + a = Symbol('a', real=True) + inf_interval = Interval(a, oo) + R = S.Reals + + assert U + inf_interval == inf_interval + U + assert U + R == R + U + assert R + inf_interval == inf_interval + R + + +def test_issue_9706(): + assert Interval(-oo, 0).closure == Interval(-oo, 0, True, False) + assert Interval(0, oo).closure == Interval(0, oo, False, True) + assert Interval(-oo, oo).closure == Interval(-oo, oo) + + +def test_issue_8257(): + reals_plus_infinity = Union(Interval(-oo, oo), FiniteSet(oo)) + reals_plus_negativeinfinity = Union(Interval(-oo, oo), FiniteSet(-oo)) + assert Interval(-oo, oo) + FiniteSet(oo) == reals_plus_infinity + assert FiniteSet(oo) + Interval(-oo, oo) == reals_plus_infinity + assert Interval(-oo, oo) + FiniteSet(-oo) == reals_plus_negativeinfinity + assert FiniteSet(-oo) + Interval(-oo, oo) == reals_plus_negativeinfinity + + +def test_issue_10931(): + assert S.Integers - S.Integers == EmptySet + assert S.Integers - S.Reals == EmptySet + + +def test_issue_11174(): + soln = Intersection(Interval(-oo, oo), FiniteSet(-x), evaluate=False) + assert Intersection(FiniteSet(-x), S.Reals) == soln + + soln = Intersection(S.Reals, FiniteSet(x), evaluate=False) + assert Intersection(FiniteSet(x), S.Reals) == soln + + +def test_issue_18505(): + assert ImageSet(Lambda(n, sqrt(pi*n/2 - 1 + pi/2)), S.Integers).contains(0) == \ + Contains(0, ImageSet(Lambda(n, sqrt(pi*n/2 - 1 + pi/2)), S.Integers)) + + +def test_finite_set_intersection(): + # The following should not produce recursion errors + # Note: some of these are not completely correct. See + # https://github.com/sympy/sympy/issues/16342. + assert Intersection(FiniteSet(-oo, x), FiniteSet(x)) == FiniteSet(x) + assert Intersection._handle_finite_sets([FiniteSet(-oo, x), FiniteSet(0, x)]) == FiniteSet(x) + + assert Intersection._handle_finite_sets([FiniteSet(-oo, x), FiniteSet(x)]) == FiniteSet(x) + assert Intersection._handle_finite_sets([FiniteSet(2, 3, x, y), FiniteSet(1, 2, x)]) == \ + Intersection._handle_finite_sets([FiniteSet(1, 2, x), FiniteSet(2, 3, x, y)]) == \ + Intersection(FiniteSet(1, 2, x), FiniteSet(2, 3, x, y)) == \ + Intersection(FiniteSet(1, 2, x), FiniteSet(2, x, y)) + + assert FiniteSet(1+x-y) & FiniteSet(1) == \ + FiniteSet(1) & FiniteSet(1+x-y) == \ + Intersection(FiniteSet(1+x-y), FiniteSet(1), evaluate=False) + + assert FiniteSet(1) & FiniteSet(x) == FiniteSet(x) & FiniteSet(1) == \ + Intersection(FiniteSet(1), FiniteSet(x), evaluate=False) + + assert FiniteSet({x}) & FiniteSet({x, y}) == \ + Intersection(FiniteSet({x}), FiniteSet({x, y}), evaluate=False) + + +def test_union_intersection_constructor(): + # The actual exception does not matter here, so long as these fail + sets = [FiniteSet(1), FiniteSet(2)] + raises(Exception, lambda: Union(sets)) + raises(Exception, lambda: Intersection(sets)) + raises(Exception, lambda: Union(tuple(sets))) + raises(Exception, lambda: Intersection(tuple(sets))) + raises(Exception, lambda: Union(i for i in sets)) + raises(Exception, lambda: Intersection(i for i in sets)) + + # Python sets are treated the same as FiniteSet + # The union of a single set (of sets) is the set (of sets) itself + assert Union(set(sets)) == FiniteSet(*sets) + assert Intersection(set(sets)) == FiniteSet(*sets) + + assert Union({1}, {2}) == FiniteSet(1, 2) + assert Intersection({1, 2}, {2, 3}) == FiniteSet(2) + + +def test_Union_contains(): + assert zoo not in Union( + Interval.open(-oo, 0), Interval.open(0, oo)) + + +@XFAIL +def test_issue_16878b(): + # in intersection_sets for (ImageSet, Set) there is no code + # that handles the base_set of S.Reals like there is + # for Integers + assert imageset(x, (x, x), S.Reals).is_subset(S.Reals**2) is True + +def test_DisjointUnion(): + assert DisjointUnion(FiniteSet(1, 2, 3), FiniteSet(1, 2, 3), FiniteSet(1, 2, 3)).rewrite(Union) == (FiniteSet(1, 2, 3) * FiniteSet(0, 1, 2)) + assert DisjointUnion(Interval(1, 3), Interval(2, 4)).rewrite(Union) == Union(Interval(1, 3) * FiniteSet(0), Interval(2, 4) * FiniteSet(1)) + assert DisjointUnion(Interval(0, 5), Interval(0, 5)).rewrite(Union) == Union(Interval(0, 5) * FiniteSet(0), Interval(0, 5) * FiniteSet(1)) + assert DisjointUnion(Interval(-1, 2), S.EmptySet, S.EmptySet).rewrite(Union) == Interval(-1, 2) * FiniteSet(0) + assert DisjointUnion(Interval(-1, 2)).rewrite(Union) == Interval(-1, 2) * FiniteSet(0) + assert DisjointUnion(S.EmptySet, Interval(-1, 2), S.EmptySet).rewrite(Union) == Interval(-1, 2) * FiniteSet(1) + assert DisjointUnion(Interval(-oo, oo)).rewrite(Union) == Interval(-oo, oo) * FiniteSet(0) + assert DisjointUnion(S.EmptySet).rewrite(Union) == S.EmptySet + assert DisjointUnion().rewrite(Union) == S.EmptySet + raises(TypeError, lambda: DisjointUnion(Symbol('n'))) + + x = Symbol("x") + y = Symbol("y") + z = Symbol("z") + assert DisjointUnion(FiniteSet(x), FiniteSet(y, z)).rewrite(Union) == (FiniteSet(x) * FiniteSet(0)) + (FiniteSet(y, z) * FiniteSet(1)) + +def test_DisjointUnion_is_empty(): + assert DisjointUnion(S.EmptySet).is_empty is True + assert DisjointUnion(S.EmptySet, S.EmptySet).is_empty is True + assert DisjointUnion(S.EmptySet, FiniteSet(1, 2, 3)).is_empty is False + +def test_DisjointUnion_is_iterable(): + assert DisjointUnion(S.Integers, S.Naturals, S.Rationals).is_iterable is True + assert DisjointUnion(S.EmptySet, S.Reals).is_iterable is False + assert DisjointUnion(FiniteSet(1, 2, 3), S.EmptySet, FiniteSet(x, y)).is_iterable is True + assert DisjointUnion(S.EmptySet, S.EmptySet).is_iterable is False + +def test_DisjointUnion_contains(): + assert (0, 0) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (0, 1) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (0, 2) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (1, 0) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (1, 1) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (1, 2) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (2, 0) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (2, 1) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (2, 2) in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (0, 1, 2) not in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (0, 0.5) not in DisjointUnion(FiniteSet(0.5)) + assert (0, 5) not in DisjointUnion(FiniteSet(0, 1, 2), FiniteSet(0, 1, 2), FiniteSet(0, 1, 2)) + assert (x, 0) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y)) + assert (y, 0) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y)) + assert (z, 0) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y)) + assert (y, 2) in DisjointUnion(FiniteSet(x, y, z), S.EmptySet, FiniteSet(y)) + assert (0.5, 0) in DisjointUnion(Interval(0, 1), Interval(0, 2)) + assert (0.5, 1) in DisjointUnion(Interval(0, 1), Interval(0, 2)) + assert (1.5, 0) not in DisjointUnion(Interval(0, 1), Interval(0, 2)) + assert (1.5, 1) in DisjointUnion(Interval(0, 1), Interval(0, 2)) + +def test_DisjointUnion_iter(): + D = DisjointUnion(FiniteSet(3, 5, 7, 9), FiniteSet(x, y, z)) + it = iter(D) + L1 = [(x, 1), (y, 1), (z, 1)] + L2 = [(3, 0), (5, 0), (7, 0), (9, 0)] + nxt = next(it) + assert nxt in L2 + L2.remove(nxt) + nxt = next(it) + assert nxt in L1 + L1.remove(nxt) + nxt = next(it) + assert nxt in L2 + L2.remove(nxt) + nxt = next(it) + assert nxt in L1 + L1.remove(nxt) + nxt = next(it) + assert nxt in L2 + L2.remove(nxt) + nxt = next(it) + assert nxt in L1 + L1.remove(nxt) + nxt = next(it) + assert nxt in L2 + L2.remove(nxt) + raises(StopIteration, lambda: next(it)) + + raises(ValueError, lambda: iter(DisjointUnion(Interval(0, 1), S.EmptySet))) + +def test_DisjointUnion_len(): + assert len(DisjointUnion(FiniteSet(3, 5, 7, 9), FiniteSet(x, y, z))) == 7 + assert len(DisjointUnion(S.EmptySet, S.EmptySet, FiniteSet(x, y, z), S.EmptySet)) == 3 + raises(ValueError, lambda: len(DisjointUnion(Interval(0, 1), S.EmptySet))) + +def test_SetKind_ProductSet(): + p = ProductSet(FiniteSet(Matrix([1, 2])), FiniteSet(Matrix([1, 2]))) + mk = MatrixKind(NumberKind) + k = SetKind(TupleKind(mk, mk)) + assert p.kind is k + assert ProductSet(Interval(1, 2), FiniteSet(Matrix([1, 2]))).kind is SetKind(TupleKind(NumberKind, mk)) + +def test_SetKind_Interval(): + assert Interval(1, 2).kind is SetKind(NumberKind) + +def test_SetKind_EmptySet_UniversalSet(): + assert S.UniversalSet.kind is SetKind(UndefinedKind) + assert EmptySet.kind is SetKind() + +def test_SetKind_FiniteSet(): + assert FiniteSet(1, Matrix([1, 2])).kind is SetKind(UndefinedKind) + assert FiniteSet(1, 2).kind is SetKind(NumberKind) + +def test_SetKind_Unions(): + assert Union(FiniteSet(Matrix([1, 2])), Interval(1, 2)).kind is SetKind(UndefinedKind) + assert Union(Interval(1, 2), Interval(1, 7)).kind is SetKind(NumberKind) + +def test_SetKind_DisjointUnion(): + A = FiniteSet(1, 2, 3) + B = Interval(0, 5) + assert DisjointUnion(A, B).kind is SetKind(NumberKind) + +def test_SetKind_evaluate_False(): + U = lambda *args: Union(*args, evaluate=False) + assert U({1}, EmptySet).kind is SetKind(NumberKind) + assert U(Interval(1, 2), EmptySet).kind is SetKind(NumberKind) + assert U({1}, S.UniversalSet).kind is SetKind(UndefinedKind) + assert U(Interval(1, 2), Interval(4, 5), + FiniteSet(1)).kind is SetKind(NumberKind) + I = lambda *args: Intersection(*args, evaluate=False) + assert I({1}, S.UniversalSet).kind is SetKind(NumberKind) + assert I({1}, EmptySet).kind is SetKind() + C = lambda *args: Complement(*args, evaluate=False) + assert C(S.UniversalSet, {1, 2, 4, 5}).kind is SetKind(UndefinedKind) + assert C({1, 2, 3, 4, 5}, EmptySet).kind is SetKind(NumberKind) + assert C(EmptySet, {1, 2, 3, 4, 5}).kind is SetKind() + +def test_SetKind_ImageSet_Special(): + f = ImageSet(Lambda(n, n ** 2), Interval(1, 4)) + assert (f - FiniteSet(3)).kind is SetKind(NumberKind) + assert (f + Interval(16, 17)).kind is SetKind(NumberKind) + assert (f + FiniteSet(17)).kind is SetKind(NumberKind) + +def test_issue_20089(): + B = FiniteSet(FiniteSet(1, 2), FiniteSet(1)) + assert 1 not in B + assert 1.0 not in B + assert not Eq(1, FiniteSet(1, 2)) + assert FiniteSet(1) in B + A = FiniteSet(1, 2) + assert A in B + assert B.issubset(B) + assert not A.issubset(B) + assert 1 in A + C = FiniteSet(FiniteSet(1, 2), FiniteSet(1), 1, 2) + assert A.issubset(C) + assert B.issubset(C) + +def test_issue_19378(): + a = FiniteSet(1, 2) + b = ProductSet(a, a) + c = FiniteSet((1, 1), (1, 2), (2, 1), (2, 2)) + assert b.is_subset(c) is True + d = FiniteSet(1) + assert b.is_subset(d) is False + assert Eq(c, b).simplify() is S.true + assert Eq(a, c).simplify() is S.false + assert Eq({1}, {x}).simplify() == Eq({1}, {x}) + +def test_intersection_symbolic(): + n = Symbol('n') + # These should not throw an error + assert isinstance(Intersection(Range(n), Range(100)), Intersection) + assert isinstance(Intersection(Range(n), Interval(1, 100)), Intersection) + assert isinstance(Intersection(Range(100), Interval(1, n)), Intersection) + + +@XFAIL +def test_intersection_symbolic_failing(): + n = Symbol('n', integer=True, positive=True) + assert Intersection(Range(10, n), Range(4, 500, 5)) == Intersection( + Range(14, n), Range(14, 500, 5)) + assert Intersection(Interval(10, n), Range(4, 500, 5)) == Intersection( + Interval(14, n), Range(14, 500, 5)) + + +def test_issue_20379(): + #https://github.com/sympy/sympy/issues/20379 + x = pi - 3.14159265358979 + assert FiniteSet(x).evalf(2) == FiniteSet(Float('3.23108914886517e-15', 2)) + +def test_finiteset_simplify(): + S = FiniteSet(1, cos(1)**2 + sin(1)**2) + assert S.simplify() == {1} + +def test_issue_14336(): + #https://github.com/sympy/sympy/issues/14336 + U = S.Complexes + x = Symbol("x") + U -= U.intersect(Ne(x, 1).as_set()) + U -= U.intersect(S.true.as_set()) + +def test_issue_9855(): + #https://github.com/sympy/sympy/issues/9855 + x, y, z = symbols('x, y, z', real=True) + s1 = Interval(1, x) & Interval(y, 2) + s2 = Interval(1, 2) + assert s1.is_subset(s2) == None diff --git a/vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc b/vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6e801bdda309bbfdfc38599cf7d17816240e6833 --- /dev/null +++ b/vllm/lib/python3.10/site-packages/sympy/solvers/tests/__pycache__/test_solveset.cpython-310.pyc @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3344c79603feade249b2001f906940e3a0034a751af4387c48c80d1d2b38b19b +size 137704